From 3a4bdf5a6a006b795003e27a838ca10d3ce13824 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Mon, 2 Oct 2023 17:29:33 -0400 Subject: [PATCH 01/14] Make BasicBlock::bbJumpKind private --- src/coreclr/jit/assertionprop.cpp | 6 +- src/coreclr/jit/block.cpp | 8 +- src/coreclr/jit/block.h | 20 ++- src/coreclr/jit/codegenarm.cpp | 6 +- src/coreclr/jit/codegenarm64.cpp | 6 +- src/coreclr/jit/codegenarmarch.cpp | 2 +- src/coreclr/jit/codegencommon.cpp | 6 +- src/coreclr/jit/codegenlinear.cpp | 12 +- src/coreclr/jit/codegenloongarch64.cpp | 6 +- src/coreclr/jit/codegenriscv64.cpp | 6 +- src/coreclr/jit/codegenxarch.cpp | 8 +- src/coreclr/jit/compiler.cpp | 5 +- src/coreclr/jit/compiler.hpp | 12 +- src/coreclr/jit/emitarm.cpp | 4 +- src/coreclr/jit/emitarm64.cpp | 4 +- src/coreclr/jit/emitloongarch64.cpp | 2 +- src/coreclr/jit/emitriscv64.cpp | 2 +- src/coreclr/jit/emitxarch.cpp | 4 +- src/coreclr/jit/fgbasic.cpp | 88 ++++++------ src/coreclr/jit/fgdiagnostic.cpp | 48 +++---- src/coreclr/jit/fgehopt.cpp | 57 ++++---- src/coreclr/jit/fgflow.cpp | 12 +- src/coreclr/jit/fginline.cpp | 14 +- src/coreclr/jit/fgopt.cpp | 145 ++++++++++---------- src/coreclr/jit/fgprofile.cpp | 46 +++---- src/coreclr/jit/fgprofilesynthesis.cpp | 21 +-- src/coreclr/jit/flowgraph.cpp | 38 ++--- src/coreclr/jit/gschecks.cpp | 2 +- src/coreclr/jit/ifconversion.cpp | 8 +- src/coreclr/jit/importer.cpp | 100 +++++++------- src/coreclr/jit/importercalls.cpp | 6 +- src/coreclr/jit/indirectcalltransformer.cpp | 12 +- src/coreclr/jit/jiteh.cpp | 16 +-- src/coreclr/jit/lclvars.cpp | 2 +- src/coreclr/jit/lir.cpp | 2 +- src/coreclr/jit/liveness.cpp | 8 +- src/coreclr/jit/loopcloning.cpp | 32 ++--- src/coreclr/jit/lower.cpp | 28 ++-- src/coreclr/jit/lsra.cpp | 14 +- src/coreclr/jit/morph.cpp | 60 ++++---- src/coreclr/jit/objectalloc.cpp | 2 +- src/coreclr/jit/optimizebools.cpp | 20 +-- src/coreclr/jit/optimizer.cpp | 68 ++++----- src/coreclr/jit/patchpoint.cpp | 4 +- src/coreclr/jit/redundantbranchopts.cpp | 27 ++-- src/coreclr/jit/switchrecognition.cpp | 2 +- 46 files changed, 513 insertions(+), 488 deletions(-) diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index 6c353a6a238ef9..26f1a3a4d71ec5 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -5260,7 +5260,7 @@ class AssertionPropFlowCallback { ASSERT_TP pAssertionOut; - if (predBlock->bbJumpKind == BBJ_COND && (predBlock->bbJumpDest == block)) + if (predBlock->getBBJumpKind() == BBJ_COND && (predBlock->bbJumpDest == block)) { pAssertionOut = mJumpDestOut[predBlock->bbNum]; @@ -5460,7 +5460,7 @@ ASSERT_TP* Compiler::optComputeAssertionGen() printf(FMT_BB " valueGen = ", block->bbNum); optPrintAssertionIndices(block->bbAssertionGen); - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { printf(" => " FMT_BB " valueGen = ", block->bbJumpDest->bbNum); optPrintAssertionIndices(jumpDestGen[block->bbNum]); @@ -6020,7 +6020,7 @@ PhaseStatus Compiler::optAssertionPropMain() printf(FMT_BB ":\n", block->bbNum); optDumpAssertionIndices(" in = ", block->bbAssertionIn, "\n"); optDumpAssertionIndices(" out = ", block->bbAssertionOut, "\n"); - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { printf(" " FMT_BB " = ", block->bbJumpDest->bbNum); optDumpAssertionIndices(bbJtrueAssertionOut[block->bbNum], "\n"); diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 8b5cef28a71a82..742025a619e736 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -1419,7 +1419,7 @@ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind) /* Record the jump kind in the block */ - block->bbJumpKind = jumpKind; + block->setBBJumpKind(jumpKind DEBUG_ARG(this)); if (jumpKind == BBJ_THROW) { @@ -1499,9 +1499,9 @@ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind) bool BasicBlock::isBBCallAlwaysPair() const { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) - if (this->bbJumpKind == BBJ_CALLFINALLY) + if (this->getBBJumpKind() == BBJ_CALLFINALLY) #else - if ((this->bbJumpKind == BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) + if ((this->getBBJumpKind() == BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) #endif { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -1510,7 +1510,7 @@ bool BasicBlock::isBBCallAlwaysPair() const #endif // Some asserts that the next block is a BBJ_ALWAYS of the proper form. assert(this->bbNext != nullptr); - assert(this->bbNext->bbJumpKind == BBJ_ALWAYS); + assert(this->bbNext->getBBJumpKind() == BBJ_ALWAYS); assert(this->bbNext->bbFlags & BBF_KEEP_BBJ_ALWAYS); assert(this->bbNext->isEmpty()); diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 9c7953a12b9e56..9a390d35eb46ef 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -702,8 +702,26 @@ struct BasicBlock : private LIR::Range // a block corresponding to an exit from the try of a try/finally. bool isBBCallAlwaysPairTail() const; +private: BBjumpKinds bbJumpKind; // jump (if any) at the end of this block +public: + BBjumpKinds getBBJumpKind() const + { + return bbJumpKind; + } + + void setBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* comp)) + { +#ifdef DEBUG + // BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout + // TODO: Change assert to check if comp is in appropriate optimization phase to use BBJ_NONE + // (right now, this assertion does the null check to avoid unused variable warnings) + assert((kind != BBJ_NONE) || (comp != nullptr)); +#endif // DEBUG + bbJumpKind = kind; + } + /* The following union describes the jump target(s) of this block */ union { unsigned bbJumpOffs; // PC offset (temporary only) @@ -1556,7 +1574,7 @@ inline BBArrayIterator BBSwitchTargetList::end() const inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) { assert(block != nullptr); - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index e8ebf46272fc45..3c8e8cdad6128b 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -124,7 +124,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) assert(block->isBBCallAlwaysPair()); assert(block->bbNext != NULL); - assert(block->bbNext->bbJumpKind == BBJ_ALWAYS); + assert(block->bbNext->getBBJumpKind() == BBJ_ALWAYS); assert(block->bbNext->bbJumpDest != NULL); assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET); @@ -630,7 +630,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -1294,7 +1294,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 092a031f270480..c2a0823a091798 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -3745,7 +3745,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -4646,7 +4646,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); @@ -4837,7 +4837,7 @@ void CodeGen::genCodeForSelect(GenTreeOp* tree) // void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index a0a9967b24e042..6c0f23d4f488da 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -5515,7 +5515,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->bbJumpKind == BBJ_RETURN); + noway_assert(block->getBBJumpKind() == BBJ_RETURN); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 27bdb1e62a8e21..916ac7854a33a5 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -376,7 +376,7 @@ void CodeGen::genMarkLabelsForCodegen() for (BasicBlock* const block : compiler->Blocks()) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_ALWAYS: // This will also handle the BBJ_ALWAYS of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. case BBJ_COND: @@ -2256,7 +2256,7 @@ void CodeGen::genReportEH() { for (BasicBlock* const block : compiler->Blocks()) { - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { ++clonedFinallyCount; } @@ -2582,7 +2582,7 @@ void CodeGen::genReportEH() unsigned reportedClonedFinallyCount = 0; for (BasicBlock* const block : compiler->Blocks()) { - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { UNATIVE_OFFSET hndBeg, hndEnd; diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index d36eeb32210f9f..fdb473fe29ed7a 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -330,7 +330,7 @@ void CodeGen::genCodeForBBlist() // // Note: We need to have set compCurBB before calling emitAddLabel // - if ((block->bbPrev != nullptr) && (block->bbPrev->bbJumpKind == BBJ_COND) && + if ((block->bbPrev != nullptr) && (block->bbPrev->getBBJumpKind() == BBJ_COND) && (block->bbWeight != block->bbPrev->bbWeight)) { JITDUMP("Adding label due to BB weight difference: BBJ_COND " FMT_BB " with weight " FMT_WT @@ -619,7 +619,7 @@ void CodeGen::genCodeForBBlist() { // We only need the NOP if we're not going to generate any more code as part of the block end. - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_ALWAYS: case BBJ_THROW: @@ -662,7 +662,7 @@ void CodeGen::genCodeForBBlist() /* Do we need to generate a jump or return? */ - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_RETURN: genExitCode(block); @@ -812,10 +812,10 @@ void CodeGen::genCodeForBBlist() assert(ShouldAlignLoops()); assert(!block->isBBCallAlwaysPairTail()); #if FEATURE_EH_CALLFINALLY_THUNKS - assert(block->bbJumpKind != BBJ_CALLFINALLY); + assert(block->getBBJumpKind() != BBJ_CALLFINALLY); #endif // FEATURE_EH_CALLFINALLY_THUNKS - GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->bbJumpKind == BBJ_ALWAYS)); + GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->getBBJumpKind() == BBJ_ALWAYS)); } if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign())) @@ -2615,7 +2615,7 @@ void CodeGen::genStoreLongLclVar(GenTree* treeNode) // void CodeGen::genCodeForJcc(GenTreeCC* jcc) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); assert(jcc->OperIs(GT_JCC)); inst_JCC(jcc->gtCondition, compiler->compCurBB->bbJumpDest); diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index 6ce58fed53318a..26bbc218fc1a7a 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1217,7 +1217,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->bbJumpKind == BBJ_RETURN); + noway_assert(block->getBBJumpKind() == BBJ_RETURN); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ @@ -2928,7 +2928,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -4136,7 +4136,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // A GT_JCMP node is created for an integer-comparison's conditional branch. void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); assert(tree->OperIs(GT_JCMP)); assert(!varTypeIsFloating(tree)); diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 4a64ebb374a19f..7d8f3a8233d0da 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -886,7 +886,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->bbJumpKind == BBJ_RETURN); + noway_assert(block->getBBJumpKind() == BBJ_RETURN); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ @@ -2574,7 +2574,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -3780,7 +3780,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); assert(tree->OperIs(GT_JCMP)); assert(!varTypeIsFloating(tree)); diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 16ffe5a8d77117..cc959b33e344a9 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -369,7 +369,7 @@ void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block) } else { - assert(block->bbJumpKind == BBJ_EHFILTERRET); + assert(block->getBBJumpKind() == BBJ_EHFILTERRET); // The return value has already been computed. instGen_Return(0); @@ -1441,7 +1441,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); @@ -4263,7 +4263,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -10241,7 +10241,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) if (jmpEpilog) { - noway_assert(block->bbJumpKind == BBJ_RETURN); + noway_assert(block->getBBJumpKind() == BBJ_RETURN); noway_assert(block->GetFirstLIRNode()); // figure out what jump we have diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index e8c146a21707fa..65d01e701d2e61 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5275,7 +5275,8 @@ PhaseStatus Compiler::placeLoopAlignInstructions() } // If there is an unconditional jump (which is not part of callf/always pair) - if (opts.compJitHideAlignBehindJmp && (block->bbJumpKind == BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail()) + if (opts.compJitHideAlignBehindJmp && (block->getBBJumpKind() == BBJ_ALWAYS) && + !block->isBBCallAlwaysPairTail()) { // Track the lower weight blocks if (block->bbWeight < minBlockSoFar) @@ -5300,7 +5301,7 @@ PhaseStatus Compiler::placeLoopAlignInstructions() bool unmarkedLoopAlign = false; #if FEATURE_EH_CALLFINALLY_THUNKS - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { // It must be a retless BBJ_CALLFINALLY if we get here. assert(!block->isBBCallAlwaysPair()); diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index a786b56edc29dc..39c5ecd33681e5 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -635,7 +635,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbJumpKind != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if ((bcall->getBBJumpKind() != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -649,7 +649,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbJumpKind != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if ((bcall->getBBJumpKind() != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -769,7 +769,7 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbJumpKind != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if ((bcall->getBBJumpKind() != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -3125,7 +3125,7 @@ inline bool Compiler::fgIsThrowHlpBlk(BasicBlock* block) return false; } - if (!(block->bbFlags & BBF_INTERNAL) || block->bbJumpKind != BBJ_THROW) + if (!(block->bbFlags & BBF_INTERNAL) || block->getBBJumpKind() != BBJ_THROW) { return false; } @@ -3224,7 +3224,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) fgRemoveBlockAsPred(block); // Update jump kind after the scrub. - block->bbJumpKind = BBJ_THROW; + block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); // Any block with a throw is rare block->bbSetRunRarely(); @@ -3236,7 +3236,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) if (isCallAlwaysPair) { BasicBlock* leaveBlk = block->bbNext; - noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS); + noway_assert(leaveBlk->getBBJumpKind() == BBJ_ALWAYS); // leaveBlk is now unreachable, so scrub the pred lists. leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; diff --git a/src/coreclr/jit/emitarm.cpp b/src/coreclr/jit/emitarm.cpp index c1dc431c937287..10a1beadf139fa 100644 --- a/src/coreclr/jit/emitarm.cpp +++ b/src/coreclr/jit/emitarm.cpp @@ -4379,7 +4379,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount /* = 0 #ifdef DEBUG // Mark the finally call - if (ins == INS_b && emitComp->compCurBB->bbJumpKind == BBJ_CALLFINALLY) + if (ins == INS_b && emitComp->compCurBB->getBBJumpKind() == BBJ_CALLFINALLY) { id->idDebugOnlyInfo()->idFinallyCall = true; } @@ -4523,7 +4523,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp index f0428d222fc6c5..82131ee325dd40 100644 --- a/src/coreclr/jit/emitarm64.cpp +++ b/src/coreclr/jit/emitarm64.cpp @@ -8495,7 +8495,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) { id->idDebugOnlyInfo()->idCatchRet = true; } @@ -8670,7 +8670,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount) #ifdef DEBUG // Mark the finally call - if (ins == INS_bl_local && emitComp->compCurBB->bbJumpKind == BBJ_CALLFINALLY) + if (ins == INS_bl_local && emitComp->compCurBB->getBBJumpKind() == BBJ_CALLFINALLY) { id->idDebugOnlyInfo()->idFinallyCall = true; } diff --git a/src/coreclr/jit/emitloongarch64.cpp b/src/coreclr/jit/emitloongarch64.cpp index 73f2dffebada8f..d6004451fcb874 100644 --- a/src/coreclr/jit/emitloongarch64.cpp +++ b/src/coreclr/jit/emitloongarch64.cpp @@ -2046,7 +2046,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitriscv64.cpp b/src/coreclr/jit/emitriscv64.cpp index edfe30a3026f62..bfc91a35615728 100644 --- a/src/coreclr/jit/emitriscv64.cpp +++ b/src/coreclr/jit/emitriscv64.cpp @@ -1030,7 +1030,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index 65789413500cd3..3e2afe7a830c1c 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -7614,7 +7614,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) { id->idDebugOnlyInfo()->idCatchRet = true; } @@ -9221,7 +9221,7 @@ void emitter::emitIns_J(instruction ins, #ifdef DEBUG // Mark the finally call - if (ins == INS_call && emitComp->compCurBB->bbJumpKind == BBJ_CALLFINALLY) + if (ins == INS_call && emitComp->compCurBB->getBBJumpKind() == BBJ_CALLFINALLY) { id->idDebugOnlyInfo()->idFinallyCall = true; } diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 00925dcf12c2b5..3573a015de3856 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -206,7 +206,7 @@ BasicBlock* Compiler::fgNewBasicBlock(BBjumpKinds jumpKind) /* Allocate the block descriptor */ block = bbNewBasicBlock(jumpKind); - noway_assert(block->bbJumpKind == jumpKind); + noway_assert(block->getBBJumpKind() == jumpKind); /* Append the block to the end of the global basic block list */ @@ -395,7 +395,7 @@ void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSw { noway_assert(oldSwitchBlock != nullptr); noway_assert(newSwitchBlock != nullptr); - noway_assert(oldSwitchBlock->bbJumpKind == BBJ_SWITCH); + noway_assert(oldSwitchBlock->getBBJumpKind() == BBJ_SWITCH); assert(fgPredsComputed); // Walk the switch's jump table, updating the predecessor for each branch. @@ -457,7 +457,7 @@ void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* ne noway_assert(blockSwitch != nullptr); noway_assert(newTarget != nullptr); noway_assert(oldTarget != nullptr); - noway_assert(blockSwitch->bbJumpKind == BBJ_SWITCH); + noway_assert(blockSwitch->getBBJumpKind() == BBJ_SWITCH); assert(fgPredsComputed); // For the jump targets values that match oldTarget of our BBJ_SWITCH @@ -537,7 +537,7 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, Bas assert(block != nullptr); assert(fgPredsComputed); - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_CALLFINALLY: case BBJ_COND: @@ -911,7 +911,7 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed } // Determine if the call site is in a no-return block - if (isInlining && (impInlineInfo->iciBlock->bbJumpKind == BBJ_THROW)) + if (isInlining && (impInlineInfo->iciBlock->getBBJumpKind() == BBJ_THROW)) { compInlineResult->Note(InlineObservation::CALLSITE_IN_NORETURN_REGION); } @@ -2721,7 +2721,7 @@ void Compiler::fgMarkBackwardJump(BasicBlock* targetBlock, BasicBlock* sourceBlo for (BasicBlock* const block : Blocks(targetBlock, sourceBlock)) { - if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && (block->bbJumpKind != BBJ_RETURN)) + if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && (block->getBBJumpKind() != BBJ_RETURN)) { block->bbFlags |= BBF_BACKWARD_JUMP; compHasBackwardJump = true; @@ -2771,7 +2771,7 @@ void Compiler::fgLinkBasicBlocks() for (BasicBlock* const curBBdesc : Blocks()) { - switch (curBBdesc->bbJumpKind) + switch (curBBdesc->getBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: @@ -3675,7 +3675,7 @@ void Compiler::fgFindBasicBlocks() // Still inside the filter block->setHndIndex(XTnum); - if (block->bbJumpKind == BBJ_EHFILTERRET) + if (block->getBBJumpKind() == BBJ_EHFILTERRET) { // Mark catch handler as successor. block->bbJumpDest = hndBegBB; @@ -3808,7 +3808,7 @@ void Compiler::fgFindBasicBlocks() // BBJ_EHFINALLYRET that were imported to BBJ_EHFAULTRET. if ((hndBegBB->bbCatchTyp == BBCT_FAULT) && block->KindIs(BBJ_EHFINALLYRET)) { - block->bbJumpKind = BBJ_EHFAULTRET; + block->setBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } } @@ -4015,9 +4015,9 @@ void Compiler::fgFixEntryFlowForOSR() // Now branch from method start to the OSR entry. // fgEnsureFirstBBisScratch(); - assert(fgFirstBB->bbJumpKind == BBJ_NONE); + assert(fgFirstBB->getBBJumpKind() == BBJ_NONE); fgRemoveRefPred(fgFirstBB->bbNext, fgFirstBB); - fgFirstBB->bbJumpKind = BBJ_ALWAYS; + fgFirstBB->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgFirstBB->bbJumpDest = fgOSREntryBB; FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB); edge->setLikelihood(1.0); @@ -4057,7 +4057,7 @@ void Compiler::fgCheckBasicBlockControlFlow() continue; } - switch (blk->bbJumpKind) + switch (blk->getBBJumpKind()) { case BBJ_NONE: // block flows into the next one (no jump) @@ -4099,14 +4099,14 @@ void Compiler::fgCheckBasicBlockControlFlow() HBtab = ehGetDsc(blk->getHndIndex()); // Endfilter allowed only in a filter block - if (blk->bbJumpKind == BBJ_EHFILTERRET) + if (blk->getBBJumpKind() == BBJ_EHFILTERRET) { if (!HBtab->HasFilter()) { BADCODE("Unexpected endfilter"); } } - else if (blk->bbJumpKind == BBJ_EHFILTERRET) + else if (blk->getBBJumpKind() == BBJ_EHFILTERRET) { // endfinally allowed only in a finally block if (!HBtab->HasFinallyHandler()) @@ -4114,7 +4114,7 @@ void Compiler::fgCheckBasicBlockControlFlow() BADCODE("Unexpected endfinally"); } } - else if (blk->bbJumpKind == BBJ_EHFAULTRET) + else if (blk->getBBJumpKind() == BBJ_EHFAULTRET) { // 'endfault' (alias of IL 'endfinally') allowed only in a fault block if (!HBtab->HasFaultHandler()) @@ -4560,7 +4560,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) { // We'd like to use fgNewBBafter(), but we need to update the preds list before linking in the new block. // (We need the successors of 'curr' to be correct when we do this.) - BasicBlock* newBlock = bbNewBasicBlock(curr->bbJumpKind); + BasicBlock* newBlock = bbNewBasicBlock(curr->getBBJumpKind()); // Start the new block with no refs. When we set the preds below, this will get updated correctly. newBlock->bbRefs = 0; @@ -4568,7 +4568,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) // For each successor of the original block, set the new block as their predecessor. // Note we are using the "rational" version of the successor iterator that does not hide the finallyret arcs. // Without these arcs, a block 'b' may not be a member of succs(preds(b)) - if (curr->bbJumpKind != BBJ_SWITCH) + if (curr->getBBJumpKind() != BBJ_SWITCH) { for (BasicBlock* const succ : curr->Succs(this)) { @@ -4628,7 +4628,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) curr->bbFlags &= ~(BBF_HAS_JMP | BBF_RETLESS_CALL); // Default to fallthru, and add the arc for that. - curr->bbJumpKind = BBJ_NONE; + curr->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); fgAddRefPred(newBlock, curr); return newBlock; @@ -4874,7 +4874,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) JITDUMP("Splitting edge from " FMT_BB " to " FMT_BB "; adding " FMT_BB "\n", curr->bbNum, succ->bbNum, newBlock->bbNum); - if (curr->bbJumpKind == BBJ_COND) + if (curr->getBBJumpKind() == BBJ_COND) { fgReplacePred(succ, curr, newBlock); if (curr->bbJumpDest == succ) @@ -4884,7 +4884,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) } fgAddRefPred(newBlock, curr); } - else if (curr->bbJumpKind == BBJ_SWITCH) + else if (curr->getBBJumpKind() == BBJ_SWITCH) { // newBlock replaces 'succ' in the switch. fgReplaceSwitchJumpTarget(curr, newBlock, succ); @@ -4894,7 +4894,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) } else { - assert(curr->bbJumpKind == BBJ_ALWAYS); + assert(curr->getBBJumpKind() == BBJ_ALWAYS); fgReplacePred(succ, curr, newBlock); curr->bbJumpDest = newBlock; fgAddRefPred(newBlock, curr); @@ -4907,7 +4907,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) // This isn't accurate, but it is complex to compute a reasonable number so just assume that we take the // branch 50% of the time. // - if (curr->bbJumpKind != BBJ_ALWAYS) + if (curr->getBBJumpKind() != BBJ_ALWAYS) { newBlock->inheritWeightPercentage(curr, 50); } @@ -5054,7 +5054,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } #endif // FEATURE_EH_FUNCLETS - if (bPrev->bbJumpKind == BBJ_CALLFINALLY) + if (bPrev->getBBJumpKind() == BBJ_CALLFINALLY) { // bPrev CALL becomes RETLESS as the BBJ_ALWAYS block is unreachable bPrev->bbFlags |= BBF_RETLESS_CALL; @@ -5063,7 +5063,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) NO_WAY("No retless call finally blocks; need unwind target instead"); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } - else if (bPrev->bbJumpKind == BBJ_ALWAYS && bPrev->bbJumpDest == block->bbNext && + else if (bPrev->getBBJumpKind() == BBJ_ALWAYS && bPrev->bbJumpDest == block->bbNext && !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && (block->bbNext != fgFirstColdBlock)) { @@ -5071,7 +5071,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), // because that would violate our invariant that BBJ_CALLFINALLY blocks are followed by // BBJ_ALWAYS blocks. - bPrev->bbJumpKind = BBJ_NONE; + bPrev->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } // If this is the first Cold basic block update fgFirstColdBlock @@ -5092,7 +5092,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (block->isBBCallAlwaysPair()) { BasicBlock* leaveBlk = block->bbNext; - noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS); + noway_assert(leaveBlk->getBBJumpKind() == BBJ_ALWAYS); leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; leaveBlk->bbRefs = 0; @@ -5104,7 +5104,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) fgClearFinallyTargetBit(leaveBlk->bbJumpDest); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } - else if (block->bbJumpKind == BBJ_RETURN) + else if (block->getBBJumpKind() == BBJ_RETURN) { fgRemoveReturnBlock(block); } @@ -5129,7 +5129,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) #ifdef DEBUG /* Some extra checks for the empty case */ - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_NONE: break; @@ -5139,7 +5139,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) noway_assert(block->bbJumpDest != block); /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ - noway_assert(bPrev && bPrev->bbJumpKind == BBJ_NONE); + noway_assert(bPrev && bPrev->getBBJumpKind() == BBJ_NONE); break; default: @@ -5154,7 +5154,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) BasicBlock* succBlock; - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->getBBJumpKind() == BBJ_ALWAYS) { succBlock = block->bbJumpDest; } @@ -5207,7 +5207,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) /* Must be a fall through to next block */ - noway_assert(block->bbJumpKind == BBJ_NONE); + noway_assert(block->getBBJumpKind() == BBJ_NONE); /* old block no longer gets the extra ref count for being the first block */ block->bbRefs--; @@ -5235,7 +5235,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) /* If predBlock is a new predecessor, then add it to succBlock's predecessor's list. */ - if (predBlock->bbJumpKind != BBJ_SWITCH) + if (predBlock->getBBJumpKind() != BBJ_SWITCH) { // Even if the pred is not a switch, we could have a conditional branch // to the fallthrough, so duplicate there could be preds @@ -5246,7 +5246,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } /* change all jumps to the removed block */ - switch (predBlock->bbJumpKind) + switch (predBlock->getBBJumpKind()) { default: noway_assert(!"Unexpected bbJumpKind in fgRemoveBlock()"); @@ -5257,10 +5257,10 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) PREFIX_ASSUME(bPrev != nullptr); /* In the case of BBJ_ALWAYS we have to change the type of its predecessor */ - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->getBBJumpKind() == BBJ_ALWAYS) { /* bPrev now becomes a BBJ_ALWAYS */ - bPrev->bbJumpKind = BBJ_ALWAYS; + bPrev->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bPrev->bbJumpDest = succBlock; } break; @@ -5313,7 +5313,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (bPrev != nullptr) { - switch (bPrev->bbJumpKind) + switch (bPrev->getBBJumpKind()) { case BBJ_CALLFINALLY: // If prev is a BBJ_CALLFINALLY it better be marked as RETLESS @@ -5333,7 +5333,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if ((bPrev == fgFirstBB) || !bPrev->isBBCallAlwaysPairTail()) { // It's safe to change the jump type - bPrev->bbJumpKind = BBJ_NONE; + bPrev->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } } break; @@ -5378,11 +5378,11 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) if (bSrc->bbFallsThrough() && (bSrc->bbNext != bDst)) { - switch (bSrc->bbJumpKind) + switch (bSrc->getBBJumpKind()) { case BBJ_NONE: - bSrc->bbJumpKind = BBJ_ALWAYS; + bSrc->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bSrc->bbJumpDest = bDst; JITDUMP("Block " FMT_BB " ended with a BBJ_NONE, Changed to an unconditional jump to " FMT_BB "\n", bSrc->bbNum, bSrc->bbJumpDest->bbNum); @@ -5459,10 +5459,10 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) // If bSrc is an unconditional branch to the next block // then change it to a BBJ_NONE block // - if ((bSrc->bbJumpKind == BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && + if ((bSrc->getBBJumpKind() == BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (bSrc->bbJumpDest == bSrc->bbNext)) { - bSrc->bbJumpKind = BBJ_NONE; + bSrc->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); JITDUMP("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB " into a BBJ_NONE block\n", bSrc->bbNum, bSrc->bbNext->bbNum); @@ -6273,14 +6273,14 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) } else { - if (bAlt->bbJumpKind == BBJ_ALWAYS) + if (bAlt->getBBJumpKind() == BBJ_ALWAYS) { // Our result is true if bAlt's weight is more than bCur's weight result = (bAlt->bbWeight > bCur->bbWeight); } else { - noway_assert(bAlt->bbJumpKind == BBJ_COND); + noway_assert(bAlt->getBBJumpKind() == BBJ_COND); // Our result is true if bAlt's weight is more than twice bCur's weight result = (bAlt->bbWeight > (2 * bCur->bbWeight)); } @@ -6570,7 +6570,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, { goodBlk = blk; } - else if ((goodBlk->bbJumpKind == BBJ_COND) || (blk->bbJumpKind != BBJ_COND)) + else if ((goodBlk->getBBJumpKind() == BBJ_COND) || (blk->getBBJumpKind() != BBJ_COND)) { if ((blk == nearBlk) || !reachedNear) { diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index afc1bbc1db73ed..edf64aeccdd378 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -101,7 +101,7 @@ void Compiler::fgDebugCheckUpdate() if (block->isEmpty() && !(block->bbFlags & BBF_DONT_REMOVE)) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_CALLFINALLY: case BBJ_EHFINALLYRET: @@ -143,13 +143,13 @@ void Compiler::fgDebugCheckUpdate() // Check for an unnecessary jumps to the next block bool doAssertOnJumpToNextBlock = false; // unless we have a BBJ_COND or BBJ_ALWAYS we can not assert - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { // A conditional branch should never jump to the next block // as it can be folded into a BBJ_NONE; doAssertOnJumpToNextBlock = true; } - else if (block->bbJumpKind == BBJ_ALWAYS) + else if (block->getBBJumpKind() == BBJ_ALWAYS) { // Generally we will want to assert if a BBJ_ALWAYS branches to the next block doAssertOnJumpToNextBlock = true; @@ -184,7 +184,7 @@ void Compiler::fgDebugCheckUpdate() /* Make sure BBF_KEEP_BBJ_ALWAYS is set correctly */ - if ((block->bbJumpKind == BBJ_ALWAYS) && prevIsCallAlwaysPair) + if ((block->getBBJumpKind() == BBJ_ALWAYS) && prevIsCallAlwaysPair) { noway_assert(block->bbFlags & BBF_KEEP_BBJ_ALWAYS); } @@ -192,7 +192,7 @@ void Compiler::fgDebugCheckUpdate() /* For a BBJ_CALLFINALLY block we make sure that we are followed by */ /* an BBJ_ALWAYS block with BBF_INTERNAL set */ /* or that it's a BBF_RETLESS_CALL */ - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { assert((block->bbFlags & BBF_RETLESS_CALL) || block->isBBCallAlwaysPair()); } @@ -984,7 +984,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) } } - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { fprintf(fgxFile, "\\n"); @@ -1015,11 +1015,11 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { fprintf(fgxFile, ", shape = \"house\""); } - else if (block->bbJumpKind == BBJ_RETURN) + else if (block->getBBJumpKind() == BBJ_RETURN) { fprintf(fgxFile, ", shape = \"invhouse\""); } - else if (block->bbJumpKind == BBJ_THROW) + else if (block->getBBJumpKind() == BBJ_THROW) { fprintf(fgxFile, ", shape = \"trapezium\""); } @@ -1035,7 +1035,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) fprintf(fgxFile, "\n bbNum); fprintf(fgxFile, "\n ordinal=\"%d\"", blockOrdinal); - fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->bbJumpKind]); + fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->getBBJumpKind()]); if (block->hasTryIndex()) { fprintf(fgxFile, "\n inTry=\"%s\"", "true"); @@ -1152,7 +1152,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) fprintf(fgxFile, "\n id=\"%d\"", edgeNum); fprintf(fgxFile, "\n source=\"%d\"", bSource->bbNum); fprintf(fgxFile, "\n target=\"%d\"", bTarget->bbNum); - if (bSource->bbJumpKind == BBJ_SWITCH) + if (bSource->getBBJumpKind() == BBJ_SWITCH) { if (edge->getDupCount() >= 2) { @@ -2004,7 +2004,7 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 * } else { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: printf("-> " FMT_BB "%*s ( cond )", block->bbJumpDest->bbNum, @@ -2606,8 +2606,8 @@ bool BBPredsChecker::CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHb // block that does a local call to the finally. This BBJ_ALWAYS is within // the try region protected by the finally (for x86, ARM), but that's ok. BasicBlock* prevBlock = block->bbPrev; - if (prevBlock->bbJumpKind == BBJ_CALLFINALLY && block->bbJumpKind == BBJ_ALWAYS && - blockPred->bbJumpKind == BBJ_EHFINALLYRET) + if (prevBlock->getBBJumpKind() == BBJ_CALLFINALLY && block->getBBJumpKind() == BBJ_ALWAYS && + blockPred->getBBJumpKind() == BBJ_EHFINALLYRET) { return true; } @@ -2634,7 +2634,7 @@ bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHb } // Our try block can call our finally block - if ((block->bbCatchTyp == BBCT_FINALLY) && (blockPred->bbJumpKind == BBJ_CALLFINALLY) && + if ((block->bbCatchTyp == BBCT_FINALLY) && (blockPred->getBBJumpKind() == BBJ_CALLFINALLY) && comp->ehCallFinallyInCorrectRegion(blockPred, block->getHndIndex())) { return true; @@ -2660,7 +2660,7 @@ bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHb bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block) { - switch (blockPred->bbJumpKind) + switch (blockPred->getBBJumpKind()) { case BBJ_COND: assert(blockPred->bbNext == block || blockPred->bbJumpDest == block); @@ -2734,7 +2734,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } @@ -2756,7 +2756,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) for (BasicBlock* const bcall : comp->Blocks(comp->fgFirstFuncletBB)) { - if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } @@ -2878,12 +2878,12 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // if (compPostImportationCleanupDone || ((block->bbFlags & BBF_IMPORTED) != 0)) { - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { assert((!allNodesLinked || (block->lastNode()->gtNext == nullptr)) && block->lastNode()->OperIsConditionalJump()); } - else if (block->bbJumpKind == BBJ_SWITCH) + else if (block->getBBJumpKind() == BBJ_SWITCH) { assert((!allNodesLinked || (block->lastNode()->gtNext == nullptr)) && (block->lastNode()->gtOper == GT_SWITCH || block->lastNode()->gtOper == GT_SWITCH_TABLE)); @@ -2987,7 +2987,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // Don't depend on predecessors list for the check. for (BasicBlock* const succBlock : block->Succs()) { - if (succBlock->bbJumpKind == BBJ_CALLFINALLY) + if (succBlock->getBBJumpKind() == BBJ_CALLFINALLY) { BasicBlock* finallyBlock = succBlock->bbJumpDest; assert(finallyBlock->hasHndIndex()); @@ -3729,7 +3729,7 @@ void Compiler::fgDebugCheckBlockLinks() // If this is a switch, check that the tables are consistent. // Note that we don't call GetSwitchDescMap(), because it has the side-effect // of allocating it if it is not present. - if (block->bbJumpKind == BBJ_SWITCH && m_switchDescMap != nullptr) + if (block->getBBJumpKind() == BBJ_SWITCH && m_switchDescMap != nullptr) { SwitchUniqueSuccSet uniqueSuccSet; if (m_switchDescMap->Lookup(block, &uniqueSuccSet)) @@ -4792,13 +4792,13 @@ void Compiler::fgDebugCheckLoopTable() // The pre-header can only be BBJ_ALWAYS or BBJ_NONE and must enter the loop. BasicBlock* e = loop.lpEntry; - if (h->bbJumpKind == BBJ_ALWAYS) + if (h->getBBJumpKind() == BBJ_ALWAYS) { assert(h->bbJumpDest == e); } else { - assert(h->bbJumpKind == BBJ_NONE); + assert(h->getBBJumpKind() == BBJ_NONE); assert(h->bbNext == e); assert(loop.lpTop == e); assert(loop.lpIsTopEntry()); @@ -4907,7 +4907,7 @@ void Compiler::fgDebugCheckLoopTable() // TODO: We might want the following assert, but there are cases where we don't move all // return blocks out of the loop. // Return blocks are not allowed inside a loop; they should have been moved elsewhere. - // assert(block->bbJumpKind != BBJ_RETURN); + // assert(block->getBBJumpKind() != BBJ_RETURN); } else { diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 0d6fedf24ce3e0..f6549f3b538dfe 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -100,7 +100,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() } // If the finally's block jumps back to itself, then it is not empty. - if ((firstBlock->bbJumpKind == BBJ_ALWAYS) && firstBlock->bbJumpDest == firstBlock) + if ((firstBlock->getBBJumpKind() == BBJ_ALWAYS) && firstBlock->bbJumpDest == firstBlock) { JITDUMP("EH#%u finally has basic block that jumps to itself; skipping.\n", XTnum); XTnum++; @@ -142,7 +142,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() { BasicBlock* nextBlock = currentBlock->bbNext; - if ((currentBlock->bbJumpKind == BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) + if ((currentBlock->getBBJumpKind() == BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) { // Retarget the call finally to jump to the return // point. @@ -160,10 +160,10 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() JITDUMP("so that " FMT_BB " jumps to " FMT_BB "; then remove " FMT_BB "\n", currentBlock->bbNum, postTryFinallyBlock->bbNum, leaveBlock->bbNum); - noway_assert(leaveBlock->bbJumpKind == BBJ_ALWAYS); + noway_assert(leaveBlock->getBBJumpKind() == BBJ_ALWAYS); currentBlock->bbJumpDest = postTryFinallyBlock; - currentBlock->bbJumpKind = BBJ_ALWAYS; + currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Ref count updates. fgAddRefPred(postTryFinallyBlock, currentBlock); @@ -373,7 +373,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // Look for blocks that are always jumps to a call finally // pair that targets the finally - if (firstTryBlock->bbJumpKind != BBJ_ALWAYS) + if (firstTryBlock->getBBJumpKind() != BBJ_ALWAYS) { JITDUMP("EH#%u first try block " FMT_BB " not jump to a callfinally; skipping.\n", XTnum, firstTryBlock->bbNum); @@ -437,7 +437,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; block = block->bbNext) { - if ((block->bbJumpKind == BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) + if ((block->getBBJumpKind() == BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) { assert(block->isBBCallAlwaysPair()); @@ -463,7 +463,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // Time to optimize. // // (1) Convert the callfinally to a normal jump to the handler - callFinally->bbJumpKind = BBJ_ALWAYS; + callFinally->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Identify the leave block and the continuation BasicBlock* const leave = callFinally->bbNext; @@ -536,13 +536,13 @@ PhaseStatus Compiler::fgRemoveEmptyTry() block->clearHndIndex(); } - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->getBBJumpKind() == BBJ_EHFINALLYRET) { Statement* finallyRet = block->lastStmt(); GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(block, finallyRet); - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = continuation; fgAddRefPred(continuation, block); fgRemoveRefPred(leave, block); @@ -738,7 +738,7 @@ PhaseStatus Compiler::fgCloneFinally() for (const BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext) { - if (block->bbJumpKind == BBJ_SWITCH) + if (block->getBBJumpKind() == BBJ_SWITCH) { hasSwitch = true; break; @@ -753,7 +753,7 @@ PhaseStatus Compiler::fgCloneFinally() regionStmtCount++; } - hasFinallyRet = hasFinallyRet || (block->bbJumpKind == BBJ_EHFINALLYRET); + hasFinallyRet = hasFinallyRet || (block->getBBJumpKind() == BBJ_EHFINALLYRET); isAllRare = isAllRare && block->isRunRarely(); } @@ -821,11 +821,11 @@ PhaseStatus Compiler::fgCloneFinally() // through to a callfinally. BasicBlock* jumpDest = nullptr; - if ((block->bbJumpKind == BBJ_NONE) && (block == lastTryBlock)) + if ((block->getBBJumpKind() == BBJ_NONE) && (block == lastTryBlock)) { jumpDest = block->bbNext; } - else if (block->bbJumpKind == BBJ_ALWAYS) + else if (block->getBBJumpKind() == BBJ_ALWAYS) { jumpDest = block->bbJumpDest; } @@ -989,7 +989,7 @@ PhaseStatus Compiler::fgCloneFinally() { BasicBlock* const placeToMoveAfter = firstCallFinallyBlock->bbPrev; - if ((placeToMoveAfter->bbJumpKind == BBJ_ALWAYS) && + if ((placeToMoveAfter->getBBJumpKind() == BBJ_ALWAYS) && (placeToMoveAfter->bbJumpDest == normalCallFinallyBlock)) { JITDUMP("Moving callfinally " FMT_BB " to be first in line, before " FMT_BB "\n", @@ -1050,7 +1050,8 @@ PhaseStatus Compiler::fgCloneFinally() // Avoid asserts when `fgNewBBinRegion` verifies the handler table, by mapping any cloned finally // return blocks to BBJ_ALWAYS (which we would do below if we didn't do it here). - BBjumpKinds bbNewJumpKind = (block->bbJumpKind == BBJ_EHFINALLYRET) ? BBJ_ALWAYS : block->bbJumpKind; + BBjumpKinds bbNewJumpKind = + (block->getBBJumpKind() == BBJ_EHFINALLYRET) ? BBJ_ALWAYS : block->getBBJumpKind(); if (block == firstBlock) { @@ -1132,13 +1133,13 @@ PhaseStatus Compiler::fgCloneFinally() { BasicBlock* newBlock = blockMap[block]; - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->getBBJumpKind() == BBJ_EHFINALLYRET) { Statement* finallyRet = newBlock->lastStmt(); GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(newBlock, finallyRet); - assert(newBlock->bbJumpKind == BBJ_ALWAYS); // we mapped this above already + assert(newBlock->getBBJumpKind() == BBJ_ALWAYS); // we mapped this above already newBlock->bbJumpDest = normalCallFinallyReturn; fgAddRefPred(normalCallFinallyReturn, newBlock); @@ -1181,7 +1182,7 @@ PhaseStatus Compiler::fgCloneFinally() // This call returns to the expected spot, so // retarget it to branch to the clone. currentBlock->bbJumpDest = firstCloneBlock; - currentBlock->bbJumpKind = BBJ_ALWAYS; + currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Ref count updates. fgAddRefPred(firstCloneBlock, currentBlock); @@ -1195,7 +1196,7 @@ PhaseStatus Compiler::fgCloneFinally() // All preds should be BBJ_EHFINALLYRETs from the finally. for (BasicBlock* const leavePred : leaveBlock->PredBlocks()) { - assert(leavePred->bbJumpKind == BBJ_EHFINALLYRET); + assert(leavePred->getBBJumpKind() == BBJ_EHFINALLYRET); assert(leavePred->getHndIndex() == XTnum); } @@ -1241,9 +1242,9 @@ PhaseStatus Compiler::fgCloneFinally() BasicBlock* const hndEndIter = HBtab->ebdHndLast->bbNext; for (BasicBlock* block = hndBegIter; block != hndEndIter; block = block->bbNext) { - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->getBBJumpKind() == BBJ_EHFINALLYRET) { - block->bbJumpKind = BBJ_EHFAULTRET; + block->setBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } } } @@ -1407,7 +1408,7 @@ void Compiler::fgDebugCheckTryFinallyExits() // logically "belong" to a child region and the exit // path validity will be checked when looking at the // try blocks in that region. - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { continue; } @@ -1433,13 +1434,13 @@ void Compiler::fgDebugCheckTryFinallyExits() bool isCallToFinally = false; #if FEATURE_EH_CALLFINALLY_THUNKS - if (succBlock->bbJumpKind == BBJ_CALLFINALLY) + if (succBlock->getBBJumpKind() == BBJ_CALLFINALLY) { // case (a1) isCallToFinally = isFinally && (succBlock->bbJumpDest == finallyBlock); } #else - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { // case (a2) isCallToFinally = isFinally && (block->bbJumpDest == finallyBlock); @@ -1453,7 +1454,7 @@ void Compiler::fgDebugCheckTryFinallyExits() // case (b) isJumpToClonedFinally = true; } - else if (succBlock->bbJumpKind == BBJ_ALWAYS) + else if (succBlock->getBBJumpKind() == BBJ_ALWAYS) { if (succBlock->isEmpty()) { @@ -1466,7 +1467,7 @@ void Compiler::fgDebugCheckTryFinallyExits() } } } - else if (succBlock->bbJumpKind == BBJ_NONE) + else if (succBlock->getBBJumpKind() == BBJ_NONE) { if (succBlock->isEmpty()) { @@ -1899,7 +1900,7 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, { // We expect callfinallys to be invoked by a BBJ_ALWAYS at this // stage in compilation. - if (block->bbJumpKind != BBJ_ALWAYS) + if (block->getBBJumpKind() != BBJ_ALWAYS) { // Possible paranoia assert here -- no flow successor of // this block should be a callfinally for this try. @@ -2195,7 +2196,7 @@ PhaseStatus Compiler::fgTailMergeThrows() BasicBlock* const predBlock = predEdge->getSourceBlock(); nextPredEdge = predEdge->getNextPredEdge(); - switch (predBlock->bbJumpKind) + switch (predBlock->getBBJumpKind()) { case BBJ_NONE: { diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index 040cd378ac9c0f..14f42c83254c53 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -343,7 +343,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) BasicBlock* bNext; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_CALLFINALLY: if (!(block->bbFlags & BBF_RETLESS_CALL)) @@ -354,7 +354,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) bNext = block->bbNext; /* bNext is an unreachable BBJ_ALWAYS block */ - noway_assert(bNext->bbJumpKind == BBJ_ALWAYS); + noway_assert(bNext->getBBJumpKind() == BBJ_ALWAYS); while (bNext->countOfInEdges() > 0) { @@ -403,7 +403,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbFlags & BBF_REMOVED) || bcall->bbJumpKind != BBJ_CALLFINALLY || + if ((bcall->bbFlags & BBF_REMOVED) || bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; @@ -470,7 +470,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } @@ -491,7 +491,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switchBlk) { - assert(switchBlk->bbJumpKind == BBJ_SWITCH); + assert(switchBlk->getBBJumpKind() == BBJ_SWITCH); BlockToSwitchDescMap* switchMap = GetSwitchDescMap(); SwitchUniqueSuccSet res; if (switchMap->Lookup(switchBlk, &res)) @@ -546,7 +546,7 @@ void Compiler::SwitchUniqueSuccSet::UpdateTarget(CompAllocator alloc, BasicBlock* from, BasicBlock* to) { - assert(switchBlk->bbJumpKind == BBJ_SWITCH); // Precondition. + assert(switchBlk->getBBJumpKind() == BBJ_SWITCH); // Precondition. // Is "from" still in the switch table (because it had more than one entry before?) bool fromStillPresent = false; diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index f29293c6b8c96c..fd880a2d00348c 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -675,12 +675,12 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorIsIntegralConst(0)) { - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_compiler)); m_compiler->fgRemoveRefPred(block->bbNext, block); } else { - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(m_compiler)); m_compiler->fgRemoveRefPred(block->bbJumpDest, block); } } @@ -1444,7 +1444,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) // DDB 91389: Don't throw away the (only) inlinee block // when its return type is not BBJ_RETURN. // In other words, we need its BBJ_ to perform the right thing. - if (InlineeCompiler->fgFirstBB->bbJumpKind == BBJ_RETURN) + if (InlineeCompiler->fgFirstBB->getBBJumpKind() == BBJ_RETURN) { // Inlinee contains just one BB. So just insert its statement list to topBlock. if (InlineeCompiler->fgFirstBB->bbStmtList != nullptr) @@ -1523,20 +1523,20 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) block->bbFlags |= BBF_INTERNAL; } - if (block->bbJumpKind == BBJ_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN) { noway_assert((block->bbFlags & BBF_HAS_JMP) == 0); if (block->bbNext) { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = bottomBlock; } else { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum); - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } fgAddRefPred(bottomBlock, block); @@ -1945,7 +1945,7 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) unsigned lclCnt = InlineeMethodInfo->locals.numArgs; bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; + bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; // If the callee contains zero-init locals, we need to explicitly initialize them if we are // in a loop or if the caller doesn't have compInitMem set. Otherwise we can rely on the diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 7d4c0f9b11ac46..9814f8b9e6b0d5 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -292,7 +292,7 @@ void Compiler::fgComputeReturnBlocks() { // If this is a BBJ_RETURN block, add it to our list of all BBJ_RETURN blocks. This list is only // used to find return blocks. - if (block->bbJumpKind == BBJ_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN) { fgReturnBlocks = new (this, CMK_Reachability) BasicBlockList(block, fgReturnBlocks); } @@ -362,7 +362,7 @@ void Compiler::fgComputeEnterBlocksSet() // For ARM code, prevent creating retless calls by adding the BBJ_ALWAYS to the "fgAlwaysBlks" list. for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { assert(block->isBBCallAlwaysPair()); @@ -466,7 +466,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) block->bbFlags &= ~(BBF_REMOVED | BBF_INTERNAL); block->bbFlags |= BBF_IMPORTED; - block->bbJumpKind = BBJ_THROW; + block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); block->bbSetRunRarely(); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -474,7 +474,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) // the target node (of BBJ_ALWAYS) since BBJ_CALLFINALLY node is getting converted to a BBJ_THROW. if (bIsBBCallAlwaysPair) { - noway_assert(block->bbNext->bbJumpKind == BBJ_ALWAYS); + noway_assert(block->bbNext->getBBJumpKind() == BBJ_ALWAYS); fgClearFinallyTargetBit(block->bbNext->bbJumpDest); } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -638,7 +638,7 @@ bool Compiler::fgRemoveDeadBlocks() // For ARM code, prevent creating retless calls by adding the BBJ_ALWAYS to the "fgAlwaysBlks" list. for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { assert(block->isBBCallAlwaysPair()); @@ -1650,7 +1650,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // plausible flow target. Simplest is to just mark it as a throw. if (bbIsHandlerBeg(newTryEntry->bbNext)) { - newTryEntry->bbJumpKind = BBJ_THROW; + newTryEntry->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); } else { @@ -1787,7 +1787,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() GenTree* const jumpIfEntryStateZero = gtNewOperNode(GT_JTRUE, TYP_VOID, compareEntryStateToZero); fgNewStmtAtBeg(fromBlock, jumpIfEntryStateZero); - fromBlock->bbJumpKind = BBJ_COND; + fromBlock->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); fromBlock->bbJumpDest = toBlock; fgAddRefPred(toBlock, fromBlock); newBlock->inheritWeight(fromBlock); @@ -1827,7 +1827,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // it can be reached directly from "outside". // assert(fgFirstBB->bbJumpDest == osrEntry); - assert(fgFirstBB->bbJumpKind == BBJ_ALWAYS); + assert(fgFirstBB->getBBJumpKind() == BBJ_ALWAYS); if (entryJumpTarget != osrEntry) { @@ -1918,7 +1918,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) noway_assert(block->bbNext == bNext); - if (block->bbJumpKind != BBJ_NONE) + if (block->getBBJumpKind() != BBJ_NONE) { return false; } @@ -2002,7 +2002,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) // (if they are valid). for (BasicBlock* const predBlock : bNext->PredBlocks()) { - if (predBlock->bbJumpKind == BBJ_SWITCH) + if (predBlock->getBBJumpKind() == BBJ_SWITCH) { return false; } @@ -2027,7 +2027,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) { noway_assert(block != nullptr); noway_assert((block->bbFlags & BBF_REMOVED) == 0); - noway_assert(block->bbJumpKind == BBJ_NONE); + noway_assert(block->getBBJumpKind() == BBJ_NONE); noway_assert(bNext == block->bbNext); noway_assert(bNext != nullptr); @@ -2234,7 +2234,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) // or if both block and bNext have non-zero weights // then we will use the max weight for the block. // - if (bNext->bbJumpKind == BBJ_THROW) + if (bNext->getBBJumpKind() == BBJ_THROW) { block->bbSetRunRarely(); } @@ -2268,7 +2268,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* set the right links */ - block->bbJumpKind = bNext->bbJumpKind; + block->setBBJumpKind(bNext->getBBJumpKind() DEBUG_ARG(this)); VarSetOps::AssignAllowUninitRhs(this, block->bbLiveOut, bNext->bbLiveOut); // Update the beginning and ending IL offsets (bbCodeOffs and bbCodeOffsEnd). @@ -2328,7 +2328,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* Set the jump targets */ - switch (bNext->bbJumpKind) + switch (bNext->getBBJumpKind()) { case BBJ_CALLFINALLY: // Propagate RETLESS property @@ -2345,7 +2345,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) fgReplacePred(bNext->bbJumpDest, bNext, block); /* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */ - if (bNext->bbJumpKind == BBJ_COND && bNext->bbJumpDest != bNext->bbNext) + if (bNext->getBBJumpKind() == BBJ_COND && bNext->bbJumpDest != bNext->bbNext) { fgReplacePred(bNext->bbNext, bNext, block); } @@ -2375,7 +2375,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } @@ -2627,14 +2627,14 @@ void Compiler::fgUnreachableBlock(BasicBlock* block) // void Compiler::fgRemoveConditionalJump(BasicBlock* block) { - noway_assert(block->bbJumpKind == BBJ_COND && block->bbJumpDest == block->bbNext); + noway_assert(block->getBBJumpKind() == BBJ_COND && block->bbJumpDest == block->bbNext); assert(compRationalIRForm == block->IsLIR()); FlowEdge* flow = fgGetPredForBlock(block->bbNext, block); noway_assert(flow->getDupCount() == 2); // Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount. - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); --block->bbNext->bbRefs; flow->decrementDupCount(); @@ -2735,7 +2735,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc bool optimizeJump = true; assert(bDest->isEmpty()); - assert(bDest->bbJumpKind == BBJ_ALWAYS); + assert(bDest->getBBJumpKind() == BBJ_ALWAYS); // We do not optimize jumps between two different try regions. // However jumping to a block that is not in any try region is OK @@ -2886,7 +2886,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) bool madeChanges = false; BasicBlock* bPrev = block->bbPrev; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: case BBJ_SWITCH: @@ -2930,7 +2930,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) } /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ - if (bPrev->bbJumpKind != BBJ_NONE) + if (bPrev->getBBJumpKind() != BBJ_NONE) { break; } @@ -2957,7 +2957,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) { /* If this block follows a BBJ_CALLFINALLY do not remove it * (because we don't know who may jump to it) */ - if (bPrev->bbJumpKind == BBJ_CALLFINALLY) + if (bPrev->getBBJumpKind() == BBJ_CALLFINALLY) { break; } @@ -2980,7 +2980,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) { BasicBlock* succBlock; - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->getBBJumpKind() == BBJ_ALWAYS) { succBlock = block->bbJumpDest; } @@ -2997,7 +2997,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) bool okToMerge = true; // assume it's ok for (BasicBlock* const predBlock : block->PredBlocks()) { - if (predBlock->bbJumpKind == BBJ_EHCATCHRET) + if (predBlock->getBBJumpKind() == BBJ_EHCATCHRET) { assert(predBlock->bbJumpDest == block); okToMerge = false; // we can't get rid of the empty block @@ -3119,7 +3119,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) // bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) { - assert(block->bbJumpKind == BBJ_SWITCH); + assert(block->getBBJumpKind() == BBJ_SWITCH); unsigned jmpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jmpTab = block->bbJumpSwt->bbsDstTab; @@ -3134,7 +3134,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) bNewDest = bDest; // Do we have a JUMP to an empty unconditional JUMP block? - if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) && + if (bDest->isEmpty() && (bDest->getBBJumpKind() == BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { bool optimizeJump = true; @@ -3312,7 +3312,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) // Change the switch jump into a BBJ_ALWAYS block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); if (jmpCnt > 1) { for (unsigned i = 1; i < jmpCnt; ++i) @@ -3377,7 +3377,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) } block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; - block->bbJumpKind = BBJ_COND; + block->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); JITDUMP("After:\n"); DISPNODE(switchTree); @@ -3502,7 +3502,7 @@ bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock* target, unsigne // // This is by no means the only kind of tail that it is beneficial to duplicate, // just the only one we recognize for now. - if (target->bbJumpKind != BBJ_COND) + if (target->getBBJumpKind() != BBJ_COND) { return false; } @@ -3741,7 +3741,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // if (opts.IsOSR()) { - assert(target->bbJumpKind == BBJ_COND); + assert(target->getBBJumpKind() == BBJ_COND); if ((target->bbNext->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) { @@ -3788,7 +3788,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // Fix up block's flow // - block->bbJumpKind = BBJ_COND; + block->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); block->bbJumpDest = target->bbJumpDest; fgAddRefPred(block->bbJumpDest, block); fgRemoveRefPred(target, block); @@ -3829,7 +3829,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi assert(block->bbNext == bNext); assert(block->bbPrev == bPrev); - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->getBBJumpKind() == BBJ_ALWAYS) { // We can't remove it if it is a branch from hot => cold if (!fgInDifferentRegions(block, bNext)) @@ -3841,7 +3841,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi if (!block->isBBCallAlwaysPairTail()) { /* the unconditional jump is to the next BB */ - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); #ifdef DEBUG if (verbose) { @@ -3859,7 +3859,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi else { /* remove the conditional statement at the end of block */ - noway_assert(block->bbJumpKind == BBJ_COND); + noway_assert(block->getBBJumpKind() == BBJ_COND); noway_assert(block->isValid()); #ifdef DEBUG @@ -3967,7 +3967,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi /* Conditional is gone - simply fall into the next block */ - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); /* Update bbRefs and bbNum - Conditional predecessors to the same * block are counted twice so we have to remove one of them */ @@ -4002,7 +4002,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) return false; } - if (bJump->bbJumpKind != BBJ_ALWAYS) + if (bJump->getBBJumpKind() != BBJ_ALWAYS) { return false; } @@ -4021,7 +4021,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) BasicBlock* bDest = bJump->bbJumpDest; - if (bDest->bbJumpKind != BBJ_COND) + if (bDest->getBBJumpKind() != BBJ_COND) { return false; } @@ -4232,7 +4232,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // We need to update the following flags of the bJump block if they were set in the bDest block bJump->bbFlags |= bDest->bbFlags & BBF_COPY_PROPAGATE; - bJump->bbJumpKind = BBJ_COND; + bJump->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); bJump->bbJumpDest = bDest->bbNext; /* Update bbRefs and bbPreds */ @@ -4324,7 +4324,7 @@ bool Compiler::fgOptimizeSwitchJumps() // assert(!block->IsLIR()); - if (block->bbJumpKind != BBJ_SWITCH) + if (block->getBBJumpKind() != BBJ_SWITCH) { continue; } @@ -4393,7 +4393,7 @@ bool Compiler::fgOptimizeSwitchJumps() // Wire up the new control flow. // - block->bbJumpKind = BBJ_COND; + block->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); block->bbJumpDest = dominantTarget; FlowEdge* const blockToTargetEdge = fgAddRefPred(dominantTarget, block); FlowEdge* const blockToNewBlockEdge = newBlock->bbPreds; @@ -4516,7 +4516,7 @@ bool Compiler::fgExpandRarelyRunBlocks() noway_assert(tmpbb->isBBCallAlwaysPair()); bPrevPrev = tmpbb; #else - if (tmpbb->bbJumpKind == BBJ_CALLFINALLY) + if (tmpbb->getBBJumpKind() == BBJ_CALLFINALLY) { bPrevPrev = tmpbb; } @@ -4610,7 +4610,7 @@ bool Compiler::fgExpandRarelyRunBlocks() const char* reason = nullptr; - switch (bPrev->bbJumpKind) + switch (bPrev->getBBJumpKind()) { case BBJ_ALWAYS: @@ -4742,7 +4742,7 @@ bool Compiler::fgExpandRarelyRunBlocks() } /* COMPACT blocks if possible */ - if (bPrev->bbJumpKind == BBJ_NONE) + if (bPrev->getBBJumpKind() == BBJ_NONE) { if (fgCanCompactBlocks(bPrev, block)) { @@ -4934,7 +4934,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // if (forwardBranch) { - if (bPrev->bbJumpKind == BBJ_ALWAYS) + if (bPrev->getBBJumpKind() == BBJ_ALWAYS) { // We can pull up the blocks that the unconditional jump branches to // if the weight of bDest is greater or equal to the weight of block @@ -5017,9 +5017,9 @@ bool Compiler::fgReorderBlocks(bool useProfile) } } } - else // (bPrev->bbJumpKind == BBJ_COND) + else // (bPrev->getBBJumpKind() == BBJ_COND) { - noway_assert(bPrev->bbJumpKind == BBJ_COND); + noway_assert(bPrev->getBBJumpKind() == BBJ_COND); // // We will reverse branch if the taken-jump to bDest ratio (i.e. 'takenRatio') // is more than 51% @@ -5211,7 +5211,8 @@ bool Compiler::fgReorderBlocks(bool useProfile) /* (bPrev is known to be a normal block at this point) */ if (!isRare) { - if ((bDest == block->bbNext) && (block->bbJumpKind == BBJ_RETURN) && (bPrev->bbJumpKind == BBJ_ALWAYS)) + if ((bDest == block->bbNext) && (block->getBBJumpKind() == BBJ_RETURN) && + (bPrev->getBBJumpKind() == BBJ_ALWAYS)) { // This is a common case with expressions like "return Expr1 && Expr2" -- move the return // to establish fall-through. @@ -5245,7 +5246,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) const bool optimizedBranch = fgOptimizeBranch(bPrev); if (optimizedBranch) { - noway_assert(bPrev->bbJumpKind == BBJ_COND); + noway_assert(bPrev->getBBJumpKind() == BBJ_COND); optimizedBranches = true; } continue; @@ -5422,7 +5423,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // if (bEnd2->isBBCallAlwaysPair()) { - noway_assert(bNext->bbJumpKind == BBJ_ALWAYS); + noway_assert(bNext->getBBJumpKind() == BBJ_ALWAYS); // Move bEnd2 and bNext forward bEnd2 = bNext; bNext = bNext->bbNext; @@ -5501,12 +5502,12 @@ bool Compiler::fgReorderBlocks(bool useProfile) { if (bDest != nullptr) { - if (bPrev->bbJumpKind == BBJ_COND) + if (bPrev->getBBJumpKind() == BBJ_COND) { printf("Decided to reverse conditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); } - else if (bPrev->bbJumpKind == BBJ_ALWAYS) + else if (bPrev->getBBJumpKind() == BBJ_ALWAYS) { printf("Decided to straighten unconditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); @@ -5576,7 +5577,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) noway_assert(bEnd != nullptr); // bEnd can't be a BBJ_CALLFINALLY unless it is a RETLESS call - noway_assert((bEnd->bbJumpKind != BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); + noway_assert((bEnd->getBBJumpKind() != BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); // bStartPrev must be set to the block that precedes bStart noway_assert(bStartPrev->bbNext == bStart); @@ -5715,7 +5716,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) BasicBlock* nearBlk = nullptr; BasicBlock* jumpBlk = nullptr; - if ((bEnd->bbJumpKind == BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && + if ((bEnd->getBBJumpKind() == BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && fgIsForwardBranch(bEnd, bPrev)) { // Set nearBlk to be the block in [startBlk..endBlk] @@ -5843,7 +5844,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) printf("block " FMT_BB, bStart->bbNum); } - if (bPrev->bbJumpKind == BBJ_COND) + if (bPrev->getBBJumpKind() == BBJ_COND) { printf(" by reversing conditional jump at " FMT_BB "\n", bPrev->bbNum); } @@ -5854,7 +5855,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) } #endif // DEBUG - if (bPrev->bbJumpKind == BBJ_COND) + if (bPrev->getBBJumpKind() == BBJ_COND) { /* Reverse the bPrev jump condition */ Statement* const condTestStmt = bPrev->lastStmt(); @@ -6102,7 +6103,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) bNext = block->bbNext; bDest = nullptr; - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->getBBJumpKind() == BBJ_ALWAYS) { bDest = block->bbJumpDest; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, bDest)) @@ -6114,7 +6115,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } } - if (block->bbJumpKind == BBJ_NONE) + if (block->getBBJumpKind() == BBJ_NONE) { bDest = nullptr; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->bbNext)) @@ -6146,7 +6147,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) if (bDest != nullptr) { // Do we have a JUMP to an empty unconditional JUMP block? - if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) && + if (bDest->isEmpty() && (bDest->getBBJumpKind() == BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { if (fgOptimizeBranchToEmptyUnconditional(block, bDest)) @@ -6165,12 +6166,12 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // (b) block jump target is elsewhere but join free, and // bNext's jump target has a join. // - if ((block->bbJumpKind == BBJ_COND) && // block is a BBJ_COND block - (bNext != nullptr) && // block is not the last block - (bNext->bbRefs == 1) && // No other block jumps to bNext - (bNext->bbJumpKind == BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block - bNext->isEmpty() && // and it is an empty block - (bNext != bNext->bbJumpDest) && // special case for self jumps + if ((block->getBBJumpKind() == BBJ_COND) && // block is a BBJ_COND block + (bNext != nullptr) && // block is not the last block + (bNext->bbRefs == 1) && // No other block jumps to bNext + (bNext->getBBJumpKind() == BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block + bNext->isEmpty() && // and it is an empty block + (bNext != bNext->bbJumpDest) && // special case for self jumps (bDest != fgFirstColdBlock) && (!fgInDifferentRegions(block, bDest))) // do not cross hot/cold sections { @@ -6383,7 +6384,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // // Update the switch jump table such that it follows jumps to jumps: // - if (block->bbJumpKind == BBJ_SWITCH) + if (block->getBBJumpKind() == BBJ_SWITCH) { if (fgOptimizeSwitchBranches(block)) { @@ -6418,11 +6419,11 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't remove the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. - if (block->countOfInEdges() == 0 && bPrev->bbJumpKind == BBJ_CALLFINALLY) + if (block->countOfInEdges() == 0 && bPrev->getBBJumpKind() == BBJ_CALLFINALLY) { assert(bPrev->isBBCallAlwaysPair()); noway_assert(!(bPrev->bbFlags & BBF_RETLESS_CALL)); - noway_assert(block->bbJumpKind == BBJ_ALWAYS); + noway_assert(block->getBBJumpKind() == BBJ_ALWAYS); bPrev = block; continue; } @@ -6454,7 +6455,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } else if (block->countOfInEdges() == 1) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: @@ -6551,7 +6552,7 @@ unsigned Compiler::fgGetCodeEstimate(BasicBlock* block) { unsigned costSz = 0; // estimate of block's code size cost - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_NONE: costSz = 0; @@ -6899,7 +6900,7 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) } bool const isNoSplit = stmt == predBlock->firstStmt(); - bool const isFallThrough = (predBlock->bbJumpKind == BBJ_NONE); + bool const isFallThrough = (predBlock->getBBJumpKind() == BBJ_NONE); // Is this block possibly better than what we have? // @@ -6976,7 +6977,7 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) // Fix up the flow. // - predBlock->bbJumpKind = BBJ_ALWAYS; + predBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); predBlock->bbJumpDest = crossJumpTarget; fgRemoveRefPred(block, predBlock); @@ -7067,7 +7068,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) // ternaries in C#). // The logic below could be generalized to BBJ_SWITCH, but this currently // has almost no CQ benefit but does have a TP impact. - if ((block->bbJumpKind != BBJ_COND) || (block->bbNext == block->bbJumpDest)) + if ((block->getBBJumpKind() != BBJ_COND) || (block->bbNext == block->bbJumpDest)) { return false; } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 16d0b0e307010b..317dd4a25bca2f 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -473,7 +473,7 @@ void BlockCountInstrumentor::RelocateProbes() } JITDUMP("Return " FMT_BB " is successor of possible tail call\n", block->bbNum); - assert(block->bbJumpKind == BBJ_RETURN); + assert(block->getBBJumpKind() == BBJ_RETURN); // Scan for critical preds, and add relocated probes to non-critical preds. // @@ -499,12 +499,12 @@ void BlockCountInstrumentor::RelocateProbes() { // Ensure this pred is not a fall through. // - if (pred->bbJumpKind == BBJ_NONE) + if (pred->getBBJumpKind() == BBJ_NONE) { - pred->bbJumpKind = BBJ_ALWAYS; + pred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } - assert(pred->bbJumpKind == BBJ_ALWAYS); + assert(pred->getBBJumpKind() == BBJ_ALWAYS); } } @@ -945,7 +945,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) visitor->VisitBlock(block); nBlocks++; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_CALLFINALLY: { @@ -1028,7 +1028,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) JITDUMP("No jump dest for " FMT_BB ", suspect bad code\n", block->bbNum); visitor->Badcode(); } - else if (block->bbJumpKind != BBJ_LEAVE) + else if (block->getBBJumpKind() != BBJ_LEAVE) { JITDUMP("EH RET in " FMT_BB " most-nested in try, suspect bad code\n", block->bbNum); visitor->Badcode(); @@ -1552,9 +1552,9 @@ void EfficientEdgeCountInstrumentor::SplitCriticalEdges() // Importer folding may have changed the block jump kind // to BBJ_NONE. If so, warp it back to BBJ_ALWAYS. // - if (block->bbJumpKind == BBJ_NONE) + if (block->getBBJumpKind() == BBJ_NONE) { - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); block->bbJumpDest = target; } @@ -1657,7 +1657,7 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() } JITDUMP("Return " FMT_BB " is successor of possible tail call\n", block->bbNum); - assert(block->bbJumpKind == BBJ_RETURN); + assert(block->getBBJumpKind() == BBJ_RETURN); // This block should have just one probe, which we no longer need. // @@ -1695,12 +1695,12 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() // Ensure this pred is not a fall through. // - if (pred->bbJumpKind == BBJ_NONE) + if (pred->getBBJumpKind() == BBJ_NONE) { - pred->bbJumpKind = BBJ_ALWAYS; + pred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } - assert(pred->bbJumpKind == BBJ_ALWAYS); + assert(pred->getBBJumpKind() == BBJ_ALWAYS); } } @@ -3166,7 +3166,7 @@ void EfficientEdgeCountReconstructor::Prepare() m_unknownBlocks++; #ifdef DEBUG - if (block->bbJumpKind == BBJ_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN) { nReturns++; } @@ -3233,7 +3233,7 @@ void EfficientEdgeCountReconstructor::Prepare() CLRRandom* const random = m_comp->impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomEdgeCounts()); - const bool isReturn = sourceBlock->bbJumpKind == BBJ_RETURN; + const bool isReturn = sourceBlock->getBBJumpKind() == BBJ_RETURN; // We simulate the distribution of counts seen in StdOptimizationData.Mibc. // @@ -3922,7 +3922,7 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf // void EfficientEdgeCountReconstructor::MarkInterestingBlocks(BasicBlock* block, BlockInfo* info) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_SWITCH: MarkInterestingSwitches(block, info); @@ -3949,7 +3949,7 @@ void EfficientEdgeCountReconstructor::MarkInterestingBlocks(BasicBlock* block, B // void EfficientEdgeCountReconstructor::MarkInterestingSwitches(BasicBlock* block, BlockInfo* info) { - assert(block->bbJumpKind == BBJ_SWITCH); + assert(block->getBBJumpKind() == BBJ_SWITCH); // Thresholds for detecting a dominant switch case. // @@ -4429,11 +4429,11 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) bSrc = bDst->bbPreds->getSourceBlock(); // Does this block flow into only one other block - if (bSrc->bbJumpKind == BBJ_NONE) + if (bSrc->getBBJumpKind() == BBJ_NONE) { bOnlyNext = bSrc->bbNext; } - else if (bSrc->bbJumpKind == BBJ_ALWAYS) + else if (bSrc->getBBJumpKind() == BBJ_ALWAYS) { bOnlyNext = bSrc->bbJumpDest; } @@ -4450,11 +4450,11 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) } // Does this block flow into only one other block - if (bDst->bbJumpKind == BBJ_NONE) + if (bDst->getBBJumpKind() == BBJ_NONE) { bOnlyNext = bDst->bbNext; } - else if (bDst->bbJumpKind == BBJ_ALWAYS) + else if (bDst->getBBJumpKind() == BBJ_ALWAYS) { bOnlyNext = bDst->bbJumpDest; } @@ -4485,7 +4485,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) // To minimize asmdiffs for now, modify weights only if splitting. if (fgFirstColdBlock != nullptr) { - if (bSrc->bbJumpKind == BBJ_CALLFINALLY) + if (bSrc->getBBJumpKind() == BBJ_CALLFINALLY) { newWeight = bSrc->bbWeight; } @@ -4687,7 +4687,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() } slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; - switch (bSrc->bbJumpKind) + switch (bSrc->getBBJumpKind()) { case BBJ_ALWAYS: case BBJ_EHCATCHRET: @@ -4756,7 +4756,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() bSrc = edge->getSourceBlock(); slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; - if (bSrc->bbJumpKind == BBJ_COND) + if (bSrc->getBBJumpKind() == BBJ_COND) { weight_t diff; FlowEdge* otherEdge; diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index 286510cf71d606..722f5f8cadfdd1 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -132,7 +132,7 @@ void ProfileSynthesis::AssignLikelihoods() for (BasicBlock* const block : m_comp->Blocks()) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -332,8 +332,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // THROW heuristic // - bool const isJumpThrow = (jump->bbJumpKind == BBJ_THROW); - bool const isNextThrow = (next->bbJumpKind == BBJ_THROW); + bool const isJumpThrow = (jump->getBBJumpKind() == BBJ_THROW); + bool const isNextThrow = (next->getBBJumpKind() == BBJ_THROW); if (isJumpThrow != isNextThrow) { @@ -402,8 +402,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // RETURN heuristic // - bool const isJumpReturn = (jump->bbJumpKind == BBJ_RETURN); - bool const isNextReturn = (next->bbJumpKind == BBJ_RETURN); + bool const isJumpReturn = (jump->getBBJumpKind() == BBJ_RETURN); + bool const isNextReturn = (next->getBBJumpKind() == BBJ_RETURN); if (isJumpReturn != isNextReturn) { @@ -499,7 +499,7 @@ void ProfileSynthesis::RepairLikelihoods() for (BasicBlock* const block : m_comp->Blocks()) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -551,7 +551,7 @@ void ProfileSynthesis::RepairLikelihoods() } JITDUMP("\n"); - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { AssignLikelihoodCond(block); } @@ -591,7 +591,7 @@ void ProfileSynthesis::BlendLikelihoods() { weight_t sum = SumOutgoingLikelihoods(block, &likelihoods); - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -627,7 +627,7 @@ void ProfileSynthesis::BlendLikelihoods() bool const consistent = Compiler::fgProfileWeightsEqual(sum, 1.0, epsilon); bool const zero = Compiler::fgProfileWeightsEqual(block->bbWeight, 0.0, epsilon); - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { AssignLikelihoodCond(block); } @@ -1214,7 +1214,8 @@ void ProfileSynthesis::ComputeCyclicProbabilities(SimpleLoop* loop) // // Currently we don't know which edges do this. // - if ((exitBlock->bbJumpKind == BBJ_COND) && (exitBlockWeight > (missingExitWeight + currentExitWeight))) + if ((exitBlock->getBBJumpKind() == BBJ_COND) && + (exitBlockWeight > (missingExitWeight + currentExitWeight))) { JITDUMP("Will adjust likelihood of the exit edge from loop exit block " FMT_BB " to reflect capping; current likelihood is " FMT_WT "\n", diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 3b157483cd75f2..2d5c2b3fd68a36 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -120,7 +120,7 @@ PhaseStatus Compiler::fgInsertGCPolls() JITDUMP("Selecting CALL poll in block " FMT_BB " because it is the single return block\n", block->bbNum); pollType = GCPOLL_CALL; } - else if (BBJ_SWITCH == block->bbJumpKind) + else if (BBJ_SWITCH == block->getBBJumpKind()) { // We don't want to deal with all the outgoing edges of a switch block. // @@ -254,15 +254,15 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) BasicBlock* topFallThrough = nullptr; unsigned char lpIndexFallThrough = BasicBlock::NOT_IN_LOOP; - if (top->bbJumpKind == BBJ_COND) + if (top->getBBJumpKind() == BBJ_COND) { topFallThrough = top->bbNext; lpIndexFallThrough = topFallThrough->bbNatLoopNum; } BasicBlock* poll = fgNewBBafter(BBJ_NONE, top, true); - bottom = fgNewBBafter(top->bbJumpKind, poll, true); - BBjumpKinds oldJumpKind = top->bbJumpKind; + bottom = fgNewBBafter(top->getBBJumpKind(), poll, true); + BBjumpKinds oldJumpKind = top->getBBJumpKind(); unsigned char lpIndex = top->bbNatLoopNum; // Update block flags @@ -372,7 +372,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) #endif top->bbJumpDest = bottom; - top->bbJumpKind = BBJ_COND; + top->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); // Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor. fgAddRefPred(bottom, poll); @@ -1287,7 +1287,7 @@ void Compiler::fgLoopCallMark() for (BasicBlock* const block : Blocks()) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: case BBJ_CALLFINALLY: @@ -1728,7 +1728,7 @@ void Compiler::fgAddSyncMethodEnterExit() // non-exceptional cases for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind == BBJ_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN) { fgCreateMonitorTree(lvaMonAcquired, info.compThisArg, block, false /*exit*/); } @@ -1772,7 +1772,7 @@ GenTree* Compiler::fgCreateMonitorTree(unsigned lvaMonAcquired, unsigned lvaThis } #endif - if (block->bbJumpKind == BBJ_RETURN && block->lastStmt()->GetRootNode()->gtOper == GT_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN && block->lastStmt()->GetRootNode()->gtOper == GT_RETURN) { GenTreeUnOp* retNode = block->lastStmt()->GetRootNode()->AsUnOp(); GenTree* retExpr = retNode->gtOp1; @@ -1821,7 +1821,7 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) assert(genReturnBB != nullptr); assert(genReturnBB != block); assert(fgReturnCount <= 1); // We have a single return for synchronized methods - assert(block->bbJumpKind == BBJ_RETURN); + assert(block->getBBJumpKind() == BBJ_RETURN); assert((block->bbFlags & BBF_HAS_JMP) == 0); assert(block->hasTryIndex()); assert(!block->hasHndIndex()); @@ -1837,7 +1837,7 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) assert(ehDsc->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX); // Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB. - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); @@ -1949,7 +1949,7 @@ bool Compiler::fgMoreThanOneReturnBlock() for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind == BBJ_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN) { retCnt++; if (retCnt > 1) @@ -2309,7 +2309,7 @@ class MergedReturns // Change BBJ_RETURN to BBJ_ALWAYS targeting const return block. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); - returnBlock->bbJumpKind = BBJ_ALWAYS; + returnBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); returnBlock->bbJumpDest = constReturnBlock; comp->fgAddRefPred(constReturnBlock, returnBlock); @@ -2596,7 +2596,7 @@ PhaseStatus Compiler::fgAddInternal() for (BasicBlock* block = fgFirstBB; block != lastBlockBeforeGenReturns->bbNext; block = block->bbNext) { - if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if ((block->getBBJumpKind() == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { merger.Record(block); } @@ -3125,7 +3125,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) // It's a jump from outside the handler; add it to the newHead preds list and remove // it from the block preds list. - switch (predBlock->bbJumpKind) + switch (predBlock->getBBJumpKind()) { case BBJ_CALLFINALLY: noway_assert(predBlock->bbJumpDest == block); @@ -3451,7 +3451,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // so the code size for block needs be large // enough to make it worth our while // - if ((lblk == nullptr) || (lblk->bbJumpKind != BBJ_COND) || (fgGetCodeEstimate(block) >= 8)) + if ((lblk == nullptr) || (lblk->getBBJumpKind() != BBJ_COND) || (fgGetCodeEstimate(block) >= 8)) { // This block is now a candidate for first cold block // Also remember the predecessor to this block @@ -3503,7 +3503,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // if (prevToFirstColdBlock->bbFallsThrough()) { - switch (prevToFirstColdBlock->bbJumpKind) + switch (prevToFirstColdBlock->getBBJumpKind()) { default: noway_assert(!"Unhandled jumpkind in fgDetermineFirstColdBlock()"); @@ -3523,7 +3523,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // This is a slightly more complicated case, because we will // probably need to insert a block to jump to the cold section. // - if (firstColdBlock->isEmpty() && (firstColdBlock->bbJumpKind == BBJ_ALWAYS)) + if (firstColdBlock->isEmpty() && (firstColdBlock->getBBJumpKind() == BBJ_ALWAYS)) { // We can just use this block as the transitionBlock firstColdBlock = firstColdBlock->bbNext; @@ -3548,7 +3548,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // convert it to BBJ_ALWAYS to force an explicit jump. prevToFirstColdBlock->bbJumpDest = firstColdBlock; - prevToFirstColdBlock->bbJumpKind = BBJ_ALWAYS; + prevToFirstColdBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); break; } } @@ -3981,7 +3981,7 @@ PhaseStatus Compiler::fgSetBlockOrder() (((src)->bbNum < (dst)->bbNum) || (((src)->bbFlags | (dst)->bbFlags) & BBF_GC_SAFE_POINT)) bool partiallyInterruptible = true; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: diff --git a/src/coreclr/jit/gschecks.cpp b/src/coreclr/jit/gschecks.cpp index e0937b8975fbb1..404d86e3abc0ec 100644 --- a/src/coreclr/jit/gschecks.cpp +++ b/src/coreclr/jit/gschecks.cpp @@ -529,7 +529,7 @@ void Compiler::gsParamsToShadows() // We would have to insert assignments in all such blocks, just before GT_JMP stmnt. for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind != BBJ_RETURN) + if (block->getBBJumpKind() != BBJ_RETURN) { continue; } diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index f51417453225a2..da0683be95ab4b 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -83,7 +83,7 @@ class OptIfConversionDsc bool OptIfConversionDsc::IfConvertCheckInnerBlockFlow(BasicBlock* block) { // Block should have a single successor or be a return. - if (!(block->GetUniqueSucc() != nullptr || (m_doElseConversion && (block->bbJumpKind == BBJ_RETURN)))) + if (!(block->GetUniqueSucc() != nullptr || (m_doElseConversion && (block->getBBJumpKind() == BBJ_RETURN)))) { return false; } @@ -137,7 +137,7 @@ bool OptIfConversionDsc::IfConvertCheckThenFlow() { // All the Then blocks up to m_finalBlock are in a valid flow. m_flowFound = true; - if (thenBlock->bbJumpKind == BBJ_RETURN) + if (thenBlock->getBBJumpKind() == BBJ_RETURN) { assert(m_finalBlock == nullptr); m_mainOper = GT_RETURN; @@ -553,7 +553,7 @@ void OptIfConversionDsc::IfConvertDump() bool OptIfConversionDsc::optIfConvert() { // Does the block end by branching via a JTRUE after a compare? - if (m_startBlock->bbJumpKind != BBJ_COND || m_startBlock->NumSucc() != 2) + if (m_startBlock->getBBJumpKind() != BBJ_COND || m_startBlock->NumSucc() != 2) { return false; } @@ -743,7 +743,7 @@ bool OptIfConversionDsc::optIfConvert() // Update the flow from the original block. m_comp->fgRemoveAllRefPreds(m_startBlock->bbNext, m_startBlock); - m_startBlock->bbJumpKind = BBJ_ALWAYS; + m_startBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); #ifdef DEBUG if (m_comp->verbose) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index b0c2a37f7eaab5..704536165d5845 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -2455,7 +2455,7 @@ GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { - block->bbJumpKind = BBJ_THROW; + block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; @@ -4101,7 +4101,7 @@ bool Compiler::impIsImplicitTailCallCandidate( // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. - if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN)) + if (!isRecursive && (compCurBB->getBBJumpKind() != BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN @@ -4250,7 +4250,7 @@ void Compiler::impImportLeave(BasicBlock* block) impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; - assert(block->bbJumpKind == BBJ_LEAVE); + assert(block->getBBJumpKind() == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); @@ -4321,8 +4321,8 @@ void Compiler::impImportLeave(BasicBlock* block) if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); - callBlock = block; - callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY + callBlock = block; + callBlock->setBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) { @@ -4344,7 +4344,7 @@ void Compiler::impImportLeave(BasicBlock* block) /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->getBBJumpKind() == BBJ_ALWAYS); if (step->bbJumpDest != nullptr) { fgRemoveRefPred(step->bbJumpDest, step); @@ -4419,7 +4419,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); - block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) { @@ -4523,7 +4523,7 @@ void Compiler::impImportLeave(BasicBlock* block) impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; - assert(block->bbJumpKind == BBJ_LEAVE); + assert(block->getBBJumpKind() == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; @@ -4572,9 +4572,9 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == nullptr) { - step = block; - step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET - stepType = ST_Catch; + step = block; + step->setBBJumpKind(BBJ_EHCATCHRET DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_EHCATCHRET + stepType = ST_Catch; #ifdef DEBUG if (verbose) @@ -4606,7 +4606,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->getBBJumpKind() == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4651,7 +4651,7 @@ void Compiler::impImportLeave(BasicBlock* block) // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgRemoveRefPred(block->bbJumpDest, block); block->bbJumpDest = callBlock; fgAddRefPred(callBlock, block); @@ -4672,8 +4672,8 @@ void Compiler::impImportLeave(BasicBlock* block) #else // !FEATURE_EH_CALLFINALLY_THUNKS - callBlock = block; - callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY + callBlock = block; + callBlock->setBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) @@ -4708,7 +4708,7 @@ void Compiler::impImportLeave(BasicBlock* block) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS - if (step->bbJumpKind == BBJ_EHCATCHRET) + if (step->getBBJumpKind() == BBJ_EHCATCHRET) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. @@ -4758,7 +4758,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->getBBJumpKind() == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4850,12 +4850,12 @@ void Compiler::impImportLeave(BasicBlock* block) if (stepType == ST_FinallyReturn) { - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->getBBJumpKind() == BBJ_ALWAYS); } else { assert(stepType == ST_Catch); - assert(step->bbJumpKind == BBJ_EHCATCHRET); + assert(step->getBBJumpKind() == BBJ_EHCATCHRET); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ @@ -4908,7 +4908,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == nullptr) { - block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) @@ -4931,7 +4931,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->getBBJumpKind() == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4992,9 +4992,9 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // work around this we will duplicate B0 (call it B0Dup) before resetting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { - BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind); + BasicBlock* dupBlock = bbNewBasicBlock(block->getBBJumpKind()); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; fgAddRefPred(dupBlock->bbJumpDest, dupBlock); @@ -5024,7 +5024,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) } #endif // FEATURE_EH_FUNCLETS - block->bbJumpKind = BBJ_LEAVE; + block->setBBJumpKind(BBJ_LEAVE DEBUG_ARG(this)); fgInitBBLookup(); fgRemoveRefPred(block->bbJumpDest, block); @@ -6002,7 +6002,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Change block to BBJ_THROW so we won't trigger importation of successors. // - block->bbJumpKind = BBJ_THROW; + block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. @@ -6715,7 +6715,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); - assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET); + assert(compCurBB->getBBJumpKind() == BBJ_EHFILTERRET); /* Mark catch handler as successor */ @@ -7256,7 +7256,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } JITDUMP(" %04X", jmpAddr); - if (block->bbJumpKind != BBJ_LEAVE) + if (block->getBBJumpKind() != BBJ_LEAVE) { impResetLeaveBlock(block, jmpAddr); } @@ -7302,16 +7302,16 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // We may have already modified `block`'s jump kind, if this is a re-importation. // - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); fgRemoveRefPred(block->bbJumpDest, block); - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { - assert(block->bbJumpKind == BBJ_NONE); + assert(block->getBBJumpKind() == BBJ_NONE); } if (op1->gtFlags & GTF_GLOB_EFFECT) @@ -7363,11 +7363,12 @@ void Compiler::impImportBlockCode(BasicBlock* block) assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); - assertImp((block->bbJumpKind == BBJ_COND) // normal case - || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the - // block for the second time + assertImp((block->getBBJumpKind() == BBJ_COND) // normal case + || + (block->getBBJumpKind() == foldedJumpKind)); // this can happen if we are reimporting the + // block for the second time - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { if (foldedJumpKind == BBJ_NONE) { @@ -7380,7 +7381,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) block->bbJumpDest->bbNum); fgRemoveRefPred(block->bbNext, block); } - block->bbJumpKind = foldedJumpKind; + block->setBBJumpKind(foldedJumpKind DEBUG_ARG(this)); } break; @@ -7548,16 +7549,16 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // We may have already modified `block`'s jump kind, if this is a re-importation. // - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); fgRemoveRefPred(block->bbJumpDest, block); - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { - assert(block->bbJumpKind == BBJ_NONE); + assert(block->getBBJumpKind() == BBJ_NONE); } if (op1->gtFlags & GTF_GLOB_EFFECT) @@ -7633,13 +7634,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (curJump != block->bbNext) { // transform the basic block into a BBJ_ALWAYS - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = curJump; } else { // transform the basic block into a BBJ_NONE - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } foundVal = true; } @@ -7657,8 +7658,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) { printf("\nSwitch folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, - block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->bbJumpKind == BBJ_ALWAYS) + block->getBBJumpKind() == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->getBBJumpKind() == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -8531,9 +8532,10 @@ void Compiler::impImportBlockCode(BasicBlock* block) lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */); } - bool bbInALoop = impBlockIsInALoop(block); - bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) && - (!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN)); + bool bbInALoop = impBlockIsInALoop(block); + bool bbIsReturn = + (block->getBBJumpKind() == BBJ_RETURN) && + (!compIsForInlining() || (impInlineInfo->iciBlock->getBBJumpKind() == BBJ_RETURN)); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { @@ -11279,7 +11281,7 @@ void Compiler::impImportBlock(BasicBlock* block) unsigned multRef = impCanReimport ? unsigned(~0) : 0; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: @@ -12117,11 +12119,11 @@ void Compiler::impImport() JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; - if (entryBlock->bbJumpKind == BBJ_NONE) + if (entryBlock->getBBJumpKind() == BBJ_NONE) { entryBlock = entryBlock->bbNext; } - else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS)) + else if (opts.IsOSR() && (entryBlock->getBBJumpKind() == BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } @@ -12239,7 +12241,7 @@ void Compiler::impFixPredLists() continue; } - if (finallyBlock->bbJumpKind != BBJ_EHFINALLYRET) + if (finallyBlock->getBBJumpKind() != BBJ_EHFINALLYRET) { continue; } diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index d340354d34ef1f..fbe0978f2514b9 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -1095,7 +1095,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); - assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN); + assert(!isExplicitTailCall || compCurBB->getBBJumpKind() == BBJ_RETURN); // Ask VM for permission to tailcall if (canTailCall) @@ -1271,10 +1271,10 @@ var_types Compiler::impImportCall(OPCODE opcode, // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // - if (compCurBB->bbJumpKind != BBJ_RETURN) + if (compCurBB->getBBJumpKind() != BBJ_RETURN) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); - assert(successor->bbJumpKind == BBJ_RETURN); + assert(successor->getBBJumpKind() == BBJ_RETURN); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index 37f0d626cbbc30..15cee342aa603b 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -572,8 +572,8 @@ class IndirectCallTransformer { // There's no need for a new block here. We can just append to currBlock. // - checkBlock = currBlock; - checkBlock->bbJumpKind = BBJ_COND; + checkBlock = currBlock; + checkBlock->setBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); } else { @@ -652,7 +652,7 @@ class IndirectCallTransformer if (isLastCheck && ((origCall->gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT_EXACT) != 0)) { checkBlock->bbJumpDest = nullptr; - checkBlock->bbJumpKind = BBJ_NONE; + checkBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(compiler)); return; } @@ -1073,7 +1073,7 @@ class IndirectCallTransformer // BasicBlock* const coldBlock = checkBlock->bbPrev; - if (coldBlock->bbJumpKind != BBJ_NONE) + if (coldBlock->getBBJumpKind() != BBJ_NONE) { JITDUMP("Unexpected flow from cold path " FMT_BB "\n", coldBlock->bbNum); return; @@ -1081,7 +1081,7 @@ class IndirectCallTransformer BasicBlock* const hotBlock = coldBlock->bbPrev; - if ((hotBlock->bbJumpKind != BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock)) + if ((hotBlock->getBBJumpKind() != BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock)) { JITDUMP("Unexpected flow from hot path " FMT_BB "\n", hotBlock->bbNum); return; @@ -1126,7 +1126,7 @@ class IndirectCallTransformer // not fall through to the check block. // compiler->fgRemoveRefPred(checkBlock, coldBlock); - coldBlock->bbJumpKind = BBJ_ALWAYS; + coldBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(compiler)); coldBlock->bbJumpDest = elseBlock; compiler->fgAddRefPred(elseBlock, coldBlock); } diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index a257ebc173502e..888058d133b62d 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -960,7 +960,7 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** be bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex) { - assert(blockCallFinally->bbJumpKind == BBJ_CALLFINALLY); + assert(blockCallFinally->getBBJumpKind() == BBJ_CALLFINALLY); assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(finallyIndex < compHndBBtabCount); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); @@ -2276,7 +2276,7 @@ bool Compiler::fgNormalizeEHCase2() // Change pred branches. // - if (predBlock->bbJumpKind != BBJ_NONE) + if (predBlock->getBBJumpKind() != BBJ_NONE) { fgReplaceJumpTarget(predBlock, newTryStart, insertBeforeBlk); } @@ -3506,7 +3506,7 @@ void Compiler::fgVerifyHandlerTab() } // Check for legal block types - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_EHFINALLYRET: { @@ -4056,12 +4056,12 @@ void Compiler::fgClearFinallyTargetBit(BasicBlock* block) for (BasicBlock* const predBlock : block->PredBlocks()) { - if (predBlock->bbJumpKind == BBJ_ALWAYS && predBlock->bbJumpDest == block) + if (predBlock->getBBJumpKind() == BBJ_ALWAYS && predBlock->bbJumpDest == block) { BasicBlock* pPrev = predBlock->bbPrev; if (pPrev != nullptr) { - if (pPrev->bbJumpKind == BBJ_CALLFINALLY) + if (pPrev->getBBJumpKind() == BBJ_CALLFINALLY) { // We found a BBJ_CALLFINALLY / BBJ_ALWAYS that still points to this finally target return; @@ -4113,7 +4113,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) ((xtab->ebdHndBeg->bbNext == block) && (xtab->ebdHndBeg->bbFlags & BBF_INTERNAL))); // After we've already inserted a header block, and we're // trying to decide how to split up the predecessor edges. - if (predBlock->bbJumpKind == BBJ_CALLFINALLY) + if (predBlock->getBBJumpKind() == BBJ_CALLFINALLY) { assert(predBlock->bbJumpDest == block); @@ -4184,7 +4184,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) // The block is a handler. Check if the pred block is from its filter. We only need to // check the end filter flag, as there is only a single filter for any handler, and we // already know predBlock is a predecessor of block. - if (predBlock->bbJumpKind == BBJ_EHFILTERRET) + if (predBlock->getBBJumpKind() == BBJ_EHFILTERRET) { assert(!xtab->InHndRegionBBRange(predBlock)); return false; @@ -4413,7 +4413,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) { BasicBlock* bFilterLast = HBtab->BBFilterLast(); assert(bFilterLast != nullptr); - assert(bFilterLast->bbJumpKind == BBJ_EHFILTERRET); + assert(bFilterLast->getBBJumpKind() == BBJ_EHFILTERRET); assert(bFilterLast->bbJumpDest == block); #ifdef DEBUG if (verbose) diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 57b4f164fd444c..820545508968ed 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -4098,7 +4098,7 @@ void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, if (!varDsc->lvDisqualifySingleDefRegCandidate) // If this var is already disqualified, we can skip this { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; + bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; // TODO: Zero-inits in LSRA are created with below condition. But if filter out based on that condition // we filter a lot of interesting variables that would benefit otherwise with EH var enregistration. // bool needsExplicitZeroInit = !varDsc->lvIsParam && (info.compInitMem || diff --git a/src/coreclr/jit/lir.cpp b/src/coreclr/jit/lir.cpp index 4389b6d6c4d8e3..7edb0515ae3239 100644 --- a/src/coreclr/jit/lir.cpp +++ b/src/coreclr/jit/lir.cpp @@ -1770,7 +1770,7 @@ void LIR::InsertBeforeTerminator(BasicBlock* block, LIR::Range&& range) assert(insertionPoint != nullptr); #if DEBUG - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: assert(insertionPoint->OperIsConditionalJump()); diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 62f0e1784a1d37..9c9aafe0686b65 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -378,7 +378,7 @@ void Compiler::fgPerBlockLocalVarLiveness() block->bbMemoryLiveIn = fullMemoryKindSet; block->bbMemoryLiveOut = fullMemoryKindSet; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_EHFINALLYRET: case BBJ_EHFAULTRET: @@ -491,7 +491,7 @@ void Compiler::fgPerBlockLocalVarLiveness() // Mark the FrameListRoot as used, if applicable. - if (block->bbJumpKind == BBJ_RETURN && compMethodRequiresPInvokeFrame()) + if (block->getBBJumpKind() == BBJ_RETURN && compMethodRequiresPInvokeFrame()) { assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) @@ -886,7 +886,7 @@ void Compiler::fgExtendDbgLifetimes() { VarSetOps::ClearD(this, initVars); - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_NONE: PREFIX_ASSUME(block->bbNext != nullptr); @@ -2451,7 +2451,7 @@ void Compiler::fgInterBlockLocalVarLiveness() { // Get the set of live variables on exit from an exception region. VarSetOps::UnionD(this, exceptVars, block->bbLiveOut); - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->getBBJumpKind() == BBJ_EHFINALLYRET) { // Live on exit from finally. // We track these separately because, in addition to having EH live-out semantics, diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index c17c4cdd29527e..c6e6dc91c3d88e 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -1766,7 +1766,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd) unsigned loopRetCount = 0; for (BasicBlock* const blk : loop.LoopBlocks()) { - if (blk->bbJumpKind == BBJ_RETURN) + if (blk->getBBJumpKind() == BBJ_RETURN) { loopRetCount++; } @@ -1855,7 +1855,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd) BasicBlock* top = loop.lpTop; BasicBlock* bottom = loop.lpBottom; - if (bottom->bbJumpKind != BBJ_COND) + if (bottom->getBBJumpKind() != BBJ_COND) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Couldn't find termination test.\n", loopInd); return false; @@ -1945,7 +1945,7 @@ BasicBlock* Compiler::optInsertLoopChoiceConditions(LoopCloneContext* context, JITDUMP("Inserting loop " FMT_LP " loop choice conditions\n", loopNum); assert(context->HasBlockConditions(loopNum)); assert(slowHead != nullptr); - assert(insertAfter->bbJumpKind == BBJ_NONE); + assert(insertAfter->getBBJumpKind() == BBJ_NONE); if (context->HasBlockConditions(loopNum)) { @@ -2043,11 +2043,11 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) h2->bbNatLoopNum = ambientLoop; h2->bbFlags |= BBF_LOOP_PREHEADER; - if (h->bbJumpKind != BBJ_NONE) + if (h->getBBJumpKind() != BBJ_NONE) { - assert(h->bbJumpKind == BBJ_ALWAYS); + assert(h->getBBJumpKind() == BBJ_ALWAYS); assert(h->bbJumpDest == loop.lpEntry); - h2->bbJumpKind = BBJ_ALWAYS; + h2->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); h2->bbJumpDest = loop.lpEntry; } @@ -2062,16 +2062,16 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Make 'h' fall through to 'h2' (if it didn't already). // Don't add the h->h2 edge because we're going to insert the cloning conditions between 'h' and 'h2', and // optInsertLoopChoiceConditions() will add the edge. - h->bbJumpKind = BBJ_NONE; + h->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); h->bbJumpDest = nullptr; // Make X2 after B, if necessary. (Not necessary if B is a BBJ_ALWAYS.) // "newPred" will be the predecessor of the blocks of the cloned loop. BasicBlock* b = loop.lpBottom; BasicBlock* newPred = b; - if (b->bbJumpKind != BBJ_ALWAYS) + if (b->getBBJumpKind() != BBJ_ALWAYS) { - assert(b->bbJumpKind == BBJ_COND); + assert(b->getBBJumpKind() == BBJ_COND); BasicBlock* x = b->bbNext; if (x != nullptr) @@ -2116,7 +2116,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone)); for (BasicBlock* const blk : loop.LoopBlocks()) { - BasicBlock* newBlk = fgNewBBafter(blk->bbJumpKind, newPred, /*extendRegion*/ true); + BasicBlock* newBlk = fgNewBBafter(blk->getBBJumpKind(), newPred, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " (copy of " FMT_BB ") after " FMT_BB "\n", newBlk->bbNum, blk->bbNum, newPred->bbNum); // Call CloneBlockState to make a copy of the block's statements (and attributes), and assert that it @@ -2175,7 +2175,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) bool b = blockMap->Lookup(blk, &newblk); assert(b && newblk != nullptr); - assert(blk->bbJumpKind == newblk->bbJumpKind); + assert(blk->getBBJumpKind() == newblk->getBBJumpKind()); // First copy the jump destination(s) from "blk". optCopyBlkDest(blk, newblk); @@ -2184,7 +2184,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) optRedirectBlock(newblk, blockMap); // Add predecessor edges for the new successors, as well as the fall-through paths. - switch (newblk->bbJumpKind) + switch (newblk->getBBJumpKind()) { case BBJ_NONE: fgAddRefPred(newblk->bbNext, newblk); @@ -2243,7 +2243,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // We should always have block conditions. assert(context->HasBlockConditions(loopInd)); - assert(h->bbJumpKind == BBJ_NONE); + assert(h->getBBJumpKind() == BBJ_NONE); assert(h->bbNext == h2); // If any condition is false, go to slowHead (which branches or falls through to e2). @@ -2254,8 +2254,8 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) if (slowHead->bbNext != e2) { // We can't just fall through to the slow path entry, so make it an unconditional branch. - assert(slowHead->bbJumpKind == BBJ_NONE); // This is how we created it above. - slowHead->bbJumpKind = BBJ_ALWAYS; + assert(slowHead->getBBJumpKind() == BBJ_NONE); // This is how we created it above. + slowHead->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); slowHead->bbJumpDest = e2; } @@ -2266,7 +2266,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Add the fall-through path pred (either to T/E for fall-through from conditions to fast path, // or H2 if branch to E of fast path). - assert(condLast->bbJumpKind == BBJ_COND); + assert(condLast->getBBJumpKind() == BBJ_COND); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", condLast->bbNum, condLast->bbNext->bbNum); fgAddRefPred(condLast->bbNext, condLast); diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 313354d1078260..b985a5a8b1229b 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -801,12 +801,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) noway_assert(comp->opts.OptimizationDisabled()); if (originalSwitchBB->bbNext == jumpTab[0]) { - originalSwitchBB->bbJumpKind = BBJ_NONE; + originalSwitchBB->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = nullptr; } else { - originalSwitchBB->bbJumpKind = BBJ_ALWAYS; + originalSwitchBB->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = jumpTab[0]; } // Remove extra predecessor links if there was more than one case. @@ -891,16 +891,16 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor. // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. - assert(originalSwitchBB->bbJumpKind == BBJ_NONE); + assert(originalSwitchBB->getBBJumpKind() == BBJ_NONE); assert(originalSwitchBB->bbNext == afterDefaultCondBlock); - assert(afterDefaultCondBlock->bbJumpKind == BBJ_SWITCH); + assert(afterDefaultCondBlock->getBBJumpKind() == BBJ_SWITCH); assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. // The GT_SWITCH code is still in originalSwitchBB (it will be removed later). // Turn originalSwitchBB into a BBJ_COND. - originalSwitchBB->bbJumpKind = BBJ_COND; + originalSwitchBB->setBBJumpKind(BBJ_COND DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1]; // Fix the pred for the default case: the default block target still has originalSwitchBB @@ -957,12 +957,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) } if (afterDefaultCondBlock->bbNext == uniqueSucc) { - afterDefaultCondBlock->bbJumpKind = BBJ_NONE; + afterDefaultCondBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); afterDefaultCondBlock->bbJumpDest = nullptr; } else { - afterDefaultCondBlock->bbJumpKind = BBJ_ALWAYS; + afterDefaultCondBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); afterDefaultCondBlock->bbJumpDest = uniqueSucc; } } @@ -1036,13 +1036,13 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // case: there is no need to compare against the case index, since it's // guaranteed to be taken (since the default case was handled first, above). - currentBlock->bbJumpKind = BBJ_ALWAYS; + currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); } else { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. - currentBlock->bbJumpKind = BBJ_COND; + currentBlock->setBBJumpKind(BBJ_COND DEBUG_ARG(comp)); // Now, build the conditional statement for the current case that is // being evaluated: @@ -1074,8 +1074,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // so fgRemoveBlock() doesn't complain. JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); - assert(currentBlock->bbJumpKind == BBJ_SWITCH); - currentBlock->bbJumpKind = BBJ_NONE; + assert(currentBlock->getBBJumpKind() == BBJ_SWITCH); + currentBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); currentBlock->bbFlags &= ~BBF_DONT_REMOVE; comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. } @@ -1159,7 +1159,7 @@ bool Lowering::TryLowerSwitchToBitTest( { assert(jumpCount >= 2); assert(targetCount >= 2); - assert(bbSwitch->bbJumpKind == BBJ_SWITCH); + assert(bbSwitch->getBBJumpKind() == BBJ_SWITCH); assert(switchValue->OperIs(GT_LCL_VAR)); // @@ -1247,7 +1247,7 @@ bool Lowering::TryLowerSwitchToBitTest( // GenCondition bbSwitchCondition; - bbSwitch->bbJumpKind = BBJ_COND; + bbSwitch->setBBJumpKind(BBJ_COND DEBUG_ARG(comp)); comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); @@ -5296,7 +5296,7 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* JITDUMP("======= Inserting PInvoke method epilog\n"); // Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls. - assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) || + assert(((returnBB == comp->genReturnBB) && (returnBB->getBBJumpKind() == BBJ_RETURN)) || returnBB->endsWithTailCallOrJmp(comp)); LIR::Range& returnBlockRange = LIR::AsRange(returnBB); diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index ec19a65c134645..1b7aebaea1997b 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -964,7 +964,7 @@ void LinearScan::setBlockSequence() blockInfo[block->bbNum].hasCriticalInEdge = true; hasCriticalEdges = true; } - else if (predBlock->bbJumpKind == BBJ_SWITCH) + else if (predBlock->getBBJumpKind() == BBJ_SWITCH) { assert(!"Switch with single successor"); } @@ -993,7 +993,7 @@ void LinearScan::setBlockSequence() // according to the desired order. We will handle the EH successors below. const unsigned numSuccs = block->NumSucc(compiler); bool checkForCriticalOutEdge = (numSuccs > 1); - if (!checkForCriticalOutEdge && block->bbJumpKind == BBJ_SWITCH) + if (!checkForCriticalOutEdge && block->getBBJumpKind() == BBJ_SWITCH) { assert(!"Switch with single successor"); } @@ -1549,7 +1549,7 @@ void LinearScan::identifyCandidatesExceptionDataflow() if (block->hasEHBoundaryOut()) { VarSetOps::UnionD(compiler, exceptVars, block->bbLiveOut); - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->getBBJumpKind() == BBJ_EHFINALLYRET) { // Live on exit from finally. // We track these separately because, in addition to having EH live-out semantics, @@ -2513,7 +2513,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, // IG08: // ... // ... - if (block->bbJumpKind == BBJ_THROW) + if (block->getBBJumpKind() == BBJ_THROW) { JITDUMP(" - throw block; "); return nullptr; @@ -2544,7 +2544,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, assert(!predBlock->hasEHBoundaryOut()); if (isBlockVisited(predBlock)) { - if (predBlock->bbJumpKind == BBJ_COND) + if (predBlock->getBBJumpKind() == BBJ_COND) { // Special handling to improve matching on backedges. BasicBlock* otherBlock = (block == predBlock->bbNext) ? predBlock->bbJumpDest : predBlock->bbNext; @@ -8177,7 +8177,7 @@ void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) // Note: Only switches and JCMP/JTEST (for Arm4) have input regs (and so can be fed by copies), so those // are the only block-ending branches that need special handling. regMaskTP consumedRegs = RBM_NONE; - if (block->bbJumpKind == BBJ_SWITCH) + if (block->getBBJumpKind() == BBJ_SWITCH) { // At this point, Lowering has transformed any non-switch-table blocks into // cascading ifs. @@ -8216,7 +8216,7 @@ void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) // Note: GT_COPY has special handling in codegen and its generation is merged with the // node that consumes its result. So both, the input and output regs of GT_COPY must be // excluded from the set available for resolution. - else if (block->bbJumpKind == BBJ_COND) + else if (block->getBBJumpKind() == BBJ_COND) { GenTree* lastNode = LIR::AsRange(block).LastNode(); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 8cc25ba6b68bc1..0342221537d57d 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -6126,7 +6126,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // No unique successor. compCurBB should be a return. // - assert(compCurBB->bbJumpKind == BBJ_RETURN); + assert(compCurBB->getBBJumpKind() == BBJ_RETURN); } else { @@ -6190,7 +6190,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) // Many tailcalls will have call and ret in the same block, and thus be // BBJ_RETURN, but if the call falls through to a ret, and we are doing a // tailcall, change it here. - compCurBB->bbJumpKind = BBJ_RETURN; + compCurBB->setBBJumpKind(BBJ_RETURN DEBUG_ARG(this)); } GenTree* stmtExpr = fgMorphStmt->GetRootNode(); @@ -6329,7 +6329,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) // Fast tail call: in case of fast tail calls, we need a jmp epilog and // hence mark it as BBJ_RETURN with BBF_JMP flag set. - noway_assert(compCurBB->bbJumpKind == BBJ_RETURN); + noway_assert(compCurBB->getBBJumpKind() == BBJ_RETURN); if (canFastTailCall) { compCurBB->bbFlags |= BBF_HAS_JMP; @@ -6338,7 +6338,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. - compCurBB->bbJumpKind = BBJ_THROW; + compCurBB->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); } if (isRootReplaced) @@ -7490,7 +7490,7 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa } // Finish hooking things up. - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgAddRefPred(block->bbJumpDest, block); block->bbFlags &= ~BBF_HAS_JMP; } @@ -8032,7 +8032,7 @@ GenTree* Compiler::fgMorphConst(GenTree* tree) // of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf. bool useLazyStrCns = false; - if (compCurBB->bbJumpKind == BBJ_THROW) + if (compCurBB->getBBJumpKind() == BBJ_THROW) { useLazyStrCns = true; } @@ -13120,7 +13120,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) return result; } - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); @@ -13183,9 +13183,9 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (cond->AsIntCon()->gtIconVal != 0) { /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ - block->bbJumpKind = BBJ_ALWAYS; - bTaken = block->bbJumpDest; - bNotTaken = block->bbNext; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + bTaken = block->bbJumpDest; + bNotTaken = block->bbNext; } else { @@ -13199,9 +13199,9 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) } /* JTRUE 0 - transform the basic block into a BBJ_NONE */ - block->bbJumpKind = BBJ_NONE; - bTaken = block->bbNext; - bNotTaken = block->bbJumpDest; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + bTaken = block->bbNext; + bNotTaken = block->bbJumpDest; } if (fgHaveValidEdgeWeights) @@ -13254,7 +13254,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) FlowEdge* edge; // Now fix the weights of the edges out of 'bUpdated' - switch (bUpdated->bbJumpKind) + switch (bUpdated->getBBJumpKind()) { case BBJ_NONE: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); @@ -13294,8 +13294,8 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, - block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->bbJumpKind == BBJ_ALWAYS) + block->getBBJumpKind() == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->getBBJumpKind() == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -13356,7 +13356,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) } } } - else if (block->bbJumpKind == BBJ_SWITCH) + else if (block->getBBJumpKind() == BBJ_SWITCH) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); @@ -13429,13 +13429,13 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (curJump != block->bbNext) { // transform the basic block into a BBJ_ALWAYS - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = curJump; } else { // transform the basic block into a BBJ_NONE - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } foundVal = true; } @@ -13453,8 +13453,8 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, - block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->bbJumpKind == BBJ_ALWAYS) + block->getBBJumpKind() == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->getBBJumpKind() == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -13727,10 +13727,10 @@ void Compiler::fgMorphStmts(BasicBlock* block) // - a tail call dispatched via runtime help (IL stubs), in which // case there will not be any tailcall and the block will be ending // with BBJ_RETURN (as normal control flow) - noway_assert((call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) && + noway_assert((call->IsFastTailCall() && (compCurBB->getBBJumpKind() == BBJ_RETURN) && ((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) || - (call->IsTailCallViaJitHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) || - (!call->IsTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN))); + (call->IsTailCallViaJitHelper() && (compCurBB->getBBJumpKind() == BBJ_THROW)) || + (!call->IsTailCall() && (compCurBB->getBBJumpKind() == BBJ_RETURN))); } #ifdef DEBUG @@ -13806,7 +13806,7 @@ void Compiler::fgMorphStmts(BasicBlock* block) if (fgRemoveRestOfBlock) { - if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH)) + if ((block->getBBJumpKind() == BBJ_COND) || (block->getBBJumpKind() == BBJ_SWITCH)) { Statement* first = block->firstStmt(); noway_assert(first); @@ -13814,8 +13814,8 @@ void Compiler::fgMorphStmts(BasicBlock* block) noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr); GenTree* last = lastStmt->GetRootNode(); - if (((block->bbJumpKind == BBJ_COND) && (last->gtOper == GT_JTRUE)) || - ((block->bbJumpKind == BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) + if (((block->getBBJumpKind() == BBJ_COND) && (last->gtOper == GT_JTRUE)) || + ((block->getBBJumpKind() == BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) { GenTree* op1 = last->AsOp()->gtOp1; @@ -13923,7 +13923,7 @@ void Compiler::fgMorphBlocks() fgMorphStmts(block); // Do we need to merge the result of this block into a single return block? - if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if ((block->getBBJumpKind() == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { if ((genReturnBB != nullptr) && (genReturnBB != block)) { @@ -13979,7 +13979,7 @@ void Compiler::fgMorphBlocks() // void Compiler::fgMergeBlockReturn(BasicBlock* block) { - assert((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); + assert((block->getBBJumpKind() == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); assert((genReturnBB != nullptr) && (genReturnBB != block)); // TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN. @@ -14004,7 +14004,7 @@ void Compiler::fgMergeBlockReturn(BasicBlock* block) else #endif // !TARGET_X86 { - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); fgReturnCount--; diff --git a/src/coreclr/jit/objectalloc.cpp b/src/coreclr/jit/objectalloc.cpp index e589bb9f92d858..473fe3c1c0cad4 100644 --- a/src/coreclr/jit/objectalloc.cpp +++ b/src/coreclr/jit/objectalloc.cpp @@ -510,7 +510,7 @@ unsigned int ObjectAllocator::MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* a // Initialize the object memory if necessary. bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; + bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; LclVarDsc* const lclDsc = comp->lvaGetDesc(lclNum); if (comp->fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 2efbf40b6d5357..68191baedd2e53 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -587,7 +587,7 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock() // Update the flow. m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1); - m_b1->bbJumpKind = BBJ_NONE; + m_b1->setBBJumpKind(BBJ_NONE DEBUG_ARG(m_comp)); // Fixup flags. m_b2->bbFlags |= (m_b1->bbFlags & BBF_COPY_PROPAGATE); @@ -877,18 +877,18 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() if (optReturnBlock) { m_b1->bbJumpDest = nullptr; - m_b1->bbJumpKind = BBJ_RETURN; + m_b1->setBBJumpKind(BBJ_RETURN DEBUG_ARG(m_comp)); #ifdef DEBUG m_b1->bbJumpSwt = m_b2->bbJumpSwt; #endif - assert(m_b2->bbJumpKind == BBJ_RETURN); + assert(m_b2->getBBJumpKind() == BBJ_RETURN); assert(m_b1->bbNext == m_b2); assert(m_b3 != nullptr); } else { - assert(m_b1->bbJumpKind == BBJ_COND); - assert(m_b2->bbJumpKind == BBJ_COND); + assert(m_b1->getBBJumpKind() == BBJ_COND); + assert(m_b2->getBBJumpKind() == BBJ_COND); assert(m_b1->bbJumpDest == m_b2->bbJumpDest); assert(m_b1->bbNext == m_b2); assert(m_b2->bbNext != nullptr); @@ -1180,7 +1180,7 @@ void OptBoolsDsc::optOptimizeBoolsGcStress() return; } - assert(m_b1->bbJumpKind == BBJ_COND); + assert(m_b1->getBBJumpKind() == BBJ_COND); Statement* const stmt = m_b1->lastStmt(); GenTree* const cond = stmt->GetRootNode(); @@ -1469,7 +1469,7 @@ PhaseStatus Compiler::optOptimizeBools() // We're only interested in conditional jumps here - if (b1->bbJumpKind != BBJ_COND) + if (b1->getBBJumpKind() != BBJ_COND) { continue; } @@ -1492,7 +1492,7 @@ PhaseStatus Compiler::optOptimizeBools() // The next block needs to be a condition or return block. - if (b2->bbJumpKind == BBJ_COND) + if (b2->getBBJumpKind() == BBJ_COND) { if ((b1->bbJumpDest != b2->bbJumpDest) && (b1->bbJumpDest != b2->bbNext)) { @@ -1517,7 +1517,7 @@ PhaseStatus Compiler::optOptimizeBools() } #endif } - else if (b2->bbJumpKind == BBJ_RETURN) + else if (b2->getBBJumpKind() == BBJ_RETURN) { // Set b3 to b1 jump destination BasicBlock* b3 = b1->bbJumpDest; @@ -1531,7 +1531,7 @@ PhaseStatus Compiler::optOptimizeBools() // b3 must be RETURN type - if (b3->bbJumpKind != BBJ_RETURN) + if (b3->getBBJumpKind() != BBJ_RETURN) { continue; } diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index a583db4b3562c4..59d50c68501975 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -741,7 +741,7 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT bool initBlockOk = (predBlock == initBlock); if (!initBlockOk) { - if ((predBlock->bbJumpKind == BBJ_NONE) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && + if ((predBlock->getBBJumpKind() == BBJ_NONE) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && (predBlock->countOfInEdges() == 1) && (predBlock->firstStmt() == nullptr) && (predBlock->bbPrev != nullptr) && predBlock->bbPrev->bbFallsThrough()) { @@ -1150,8 +1150,8 @@ bool Compiler::optExtractInitTestIncr( // If we are rebuilding the loop table, we would already have the pre-header block introduced // the first time, which might be empty if no hoisting has yet occurred. In this case, look a // little harder for the possible loop initialization statement. - if ((initBlock->bbJumpKind == BBJ_NONE) && (initBlock->bbNext == top) && (initBlock->countOfInEdges() == 1) && - (initBlock->bbPrev != nullptr) && initBlock->bbPrev->bbFallsThrough()) + if ((initBlock->getBBJumpKind() == BBJ_NONE) && (initBlock->bbNext == top) && + (initBlock->countOfInEdges() == 1) && (initBlock->bbPrev != nullptr) && initBlock->bbPrev->bbFallsThrough()) { initBlock = initBlock->bbPrev; phdrStmt = initBlock->firstStmt(); @@ -1305,7 +1305,7 @@ bool Compiler::optRecordLoop( // 5. Finding a constant initializer is optional; if the initializer is not found, or is not constant, // it is still considered a for-like loop. // - if (bottom->bbJumpKind == BBJ_COND) + if (bottom->getBBJumpKind() == BBJ_COND) { GenTree* init; GenTree* test; @@ -1385,7 +1385,7 @@ void Compiler::optCheckPreds() } } noway_assert(bb); - switch (bb->bbJumpKind) + switch (bb->getBBJumpKind()) { case BBJ_COND: if (bb->bbJumpDest == block) @@ -1801,7 +1801,7 @@ class LoopSearch // BasicBlock* FindEntry(BasicBlock* head, BasicBlock* top, BasicBlock* bottom) { - if (head->bbJumpKind == BBJ_ALWAYS) + if (head->getBBJumpKind() == BBJ_ALWAYS) { if (head->bbJumpDest->bbNum <= bottom->bbNum && head->bbJumpDest->bbNum >= top->bbNum) { @@ -2294,7 +2294,7 @@ class LoopSearch { // Need to reconnect the flow from `block` to `oldNext`. - if ((block->bbJumpKind == BBJ_COND) && (block->bbJumpDest == newNext)) + if ((block->getBBJumpKind() == BBJ_COND) && (block->bbJumpDest == newNext)) { // Reverse the jump condition GenTree* test = block->lastNode(); @@ -2321,7 +2321,7 @@ class LoopSearch noway_assert((newBlock == nullptr) || loopBlocks.CanRepresent(newBlock->bbNum)); } } - else if ((block->bbJumpKind == BBJ_ALWAYS) && (block->bbJumpDest == newNext)) + else if ((block->getBBJumpKind() == BBJ_ALWAYS) && (block->bbJumpDest == newNext)) { // We've made `block`'s jump target its bbNext, so remove the jump. if (!comp->fgOptimizeBranchToNext(block, newNext, block->bbPrev)) @@ -2398,7 +2398,7 @@ class LoopSearch { BasicBlock* exitPoint; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: case BBJ_CALLFINALLY: @@ -2416,7 +2416,7 @@ class LoopSearch // On non-funclet platforms (x86), the catch exit is a BBJ_ALWAYS, but we don't want that to // be considered a loop exit block, as catch handlers don't have predecessor lists and don't // show up as might be expected in the dominator tree. - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->getBBJumpKind() == BBJ_ALWAYS) { if (!BasicBlock::sameHndRegion(block, exitPoint)) { @@ -2738,7 +2738,7 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R BasicBlock* newJumpDest = nullptr; - switch (blk->bbJumpKind) + switch (blk->getBBJumpKind()) { case BBJ_NONE: case BBJ_THROW: @@ -2818,10 +2818,10 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R // TODO-Cleanup: This should be a static member of the BasicBlock class. void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to) { - assert(from->bbJumpKind == to->bbJumpKind); // Precondition. + assert(from->getBBJumpKind() == to->getBBJumpKind()); // Precondition. // copy the jump destination(s) from "from" to "to". - switch (to->bbJumpKind) + switch (to->getBBJumpKind()) { case BBJ_ALWAYS: case BBJ_LEAVE: @@ -2936,7 +2936,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // entry block. If the `head` branches to `top` because it is the BBJ_ALWAYS of a // BBJ_CALLFINALLY/BBJ_ALWAYS pair, we canonicalize by introducing a new fall-through // head block. See FindEntry() for the logic that allows this. - if ((h->bbJumpKind == BBJ_ALWAYS) && (h->bbJumpDest == t) && (h->bbFlags & BBF_KEEP_BBJ_ALWAYS)) + if ((h->getBBJumpKind() == BBJ_ALWAYS) && (h->bbJumpDest == t) && (h->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // Insert new head @@ -3030,7 +3030,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // not keeping pred lists in good shape. // BasicBlock* const t = optLoopTable[loopInd].lpTop; - assert(siblingB->bbJumpKind == BBJ_COND); + assert(siblingB->getBBJumpKind() == BBJ_COND); assert(siblingB->bbNext == t); JITDUMP(FMT_LP " head " FMT_BB " is also " FMT_LP " bottom\n", loopInd, h->bbNum, sibling); @@ -3207,8 +3207,8 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati // assert(h->bbNext == t); assert(h->bbFallsThrough()); - assert((h->bbJumpKind == BBJ_NONE) || (h->bbJumpKind == BBJ_COND)); - if (h->bbJumpKind == BBJ_COND) + assert((h->getBBJumpKind() == BBJ_NONE) || (h->getBBJumpKind() == BBJ_COND)); + if (h->getBBJumpKind() == BBJ_COND) { BasicBlock* const hj = h->bbJumpDest; assert((hj->bbNum < t->bbNum) || (hj->bbNum > b->bbNum)); @@ -3360,7 +3360,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati childLoop = optLoopTable[childLoop].lpSibling) { if ((optLoopTable[childLoop].lpEntry == origE) && (optLoopTable[childLoop].lpHead == h) && - (newT->bbJumpKind == BBJ_NONE) && (newT->bbNext == origE)) + (newT->getBBJumpKind() == BBJ_NONE) && (newT->bbNext == origE)) { optUpdateLoopHead(childLoop, h, newT); @@ -4280,7 +4280,7 @@ PhaseStatus Compiler::optUnrollLoops() goto DONE_LOOP; } - if (block->bbJumpKind == BBJ_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN) { ++loopRetCount; } @@ -4361,7 +4361,7 @@ PhaseStatus Compiler::optUnrollLoops() for (BasicBlock* block = loop.lpTop; block != loop.lpBottom->bbNext; block = block->bbNext) { BasicBlock* newBlock = insertAfter = - fgNewBBafter(block->bbJumpKind, insertAfter, /*extendRegion*/ true); + fgNewBBafter(block->getBBJumpKind(), insertAfter, /*extendRegion*/ true); blockMap.Set(block, newBlock, BlockToBlockMap::Overwrite); if (!BasicBlock::CloneBlockState(this, newBlock, block, lvar, lval)) @@ -4415,7 +4415,7 @@ PhaseStatus Compiler::optUnrollLoops() { testCopyStmt->SetRootNode(sideEffList); } - newBlock->bbJumpKind = BBJ_NONE; + newBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } } @@ -4486,8 +4486,8 @@ PhaseStatus Compiler::optUnrollLoops() fgRemoveAllRefPreds(succ, block); } + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); block->bbStmtList = nullptr; - block->bbJumpKind = BBJ_NONE; block->bbJumpDest = nullptr; block->bbNatLoopNum = newLoopNum; @@ -4524,21 +4524,21 @@ PhaseStatus Compiler::optUnrollLoops() // // If the initBlock is a BBJ_COND drop the condition (and make initBlock a BBJ_NONE block). // - if (initBlock->bbJumpKind == BBJ_COND) + if (initBlock->getBBJumpKind() == BBJ_COND) { assert(dupCond); Statement* initBlockBranchStmt = initBlock->lastStmt(); noway_assert(initBlockBranchStmt->GetRootNode()->OperIs(GT_JTRUE)); fgRemoveStmt(initBlock, initBlockBranchStmt); fgRemoveRefPred(initBlock->bbJumpDest, initBlock); - initBlock->bbJumpKind = BBJ_NONE; + initBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { /* the loop must execute */ assert(!dupCond); assert(totalIter > 0); - noway_assert(initBlock->bbJumpKind == BBJ_NONE); + noway_assert(initBlock->getBBJumpKind() == BBJ_NONE); } // The loop will be removed, so no need to fix up the pre-header. @@ -4548,7 +4548,7 @@ PhaseStatus Compiler::optUnrollLoops() // For unrolled loops, all the unrolling preconditions require the pre-header block to fall // through into TOP. - assert(head->bbJumpKind == BBJ_NONE); + assert(head->getBBJumpKind() == BBJ_NONE); } // If we actually unrolled, tail is now reached @@ -4840,7 +4840,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // Does the BB end with an unconditional jump? - if (block->bbJumpKind != BBJ_ALWAYS || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) + if (block->getBBJumpKind() != BBJ_ALWAYS || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // It can't be one of the ones we use for our exception magic return false; @@ -4850,7 +4850,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) BasicBlock* const bTest = block->bbJumpDest; // Does the bTest consist of 'jtrue(cond) block' ? - if (bTest->bbJumpKind != BBJ_COND) + if (bTest->getBBJumpKind() != BBJ_COND) { return false; } @@ -5077,7 +5077,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) bool foundCondTree = false; // Create a new block after `block` to put the copied condition code. - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); block->bbJumpDest = nullptr; BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true); @@ -5434,7 +5434,7 @@ void Compiler::optMarkLoopHeads() { if (blockNum <= predBlock->bbNum) { - if (predBlock->bbJumpKind == BBJ_CALLFINALLY) + if (predBlock->getBBJumpKind() == BBJ_CALLFINALLY) { // Loops never have BBJ_CALLFINALLY as the source of their "back edge". continue; @@ -5539,7 +5539,7 @@ void Compiler::optFindAndScaleGeneralLoopBlocks() } // We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops. - if ((bottom->bbJumpKind != BBJ_COND) && (bottom->bbJumpKind != BBJ_ALWAYS)) + if ((bottom->getBBJumpKind() != BBJ_COND) && (bottom->getBBJumpKind() != BBJ_ALWAYS)) { continue; } @@ -8198,7 +8198,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) // The preheader block is part of the containing loop (if any). preHead->bbNatLoopNum = loop.lpParent; - if (fgIsUsingProfileWeights() && (head->bbJumpKind == BBJ_COND)) + if (fgIsUsingProfileWeights() && (head->getBBJumpKind() == BBJ_COND)) { if ((head->bbWeight == BB_ZERO_WEIGHT) || (entry->bbWeight == BB_ZERO_WEIGHT)) { @@ -8306,7 +8306,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) continue; } - switch (predBlock->bbJumpKind) + switch (predBlock->getBBJumpKind()) { case BBJ_NONE: // This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop, @@ -9181,7 +9181,7 @@ void Compiler::optRemoveRedundantZeroInits() if (tree->Data()->IsIntegralConst(0)) { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; + bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; if (!bbInALoop || bbIsReturn) { diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index a2d6cb56335379..2423a6d9da47aa 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -145,7 +145,7 @@ class PatchpointTransformer BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_NONE, block); // Update flow and flags - block->bbJumpKind = BBJ_COND; + block->setBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); block->bbJumpDest = remainderBlock; block->bbFlags |= BBF_INTERNAL; @@ -233,7 +233,7 @@ class PatchpointTransformer } // Update flow - block->bbJumpKind = BBJ_THROW; + block->setBBJumpKind(BBJ_THROW DEBUG_ARG(compiler)); block->bbJumpDest = nullptr; // Add helper call diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index 32369d303d206e..cdf76a4e5a6b6b 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -44,7 +44,7 @@ PhaseStatus Compiler::optRedundantBranches() // We currently can optimize some BBJ_CONDs. // - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { bool madeChangesThisBlock = m_compiler->optRedundantRelop(block); @@ -57,7 +57,7 @@ PhaseStatus Compiler::optRedundantBranches() // a BBJ_COND, retry; perhaps one of the later optimizations // we can do has enabled one of the earlier optimizations. // - if (madeChangesThisBlock && (block->bbJumpKind == BBJ_COND)) + if (madeChangesThisBlock && (block->getBBJumpKind() == BBJ_COND)) { JITDUMP("Will retry RBO in " FMT_BB " after partial optimization\n", block->bbNum); madeChangesThisBlock |= m_compiler->optRedundantBranch(block); @@ -508,7 +508,7 @@ bool Compiler::optRedundantBranch(BasicBlock* const block) // Check the current dominator // - if (domBlock->bbJumpKind == BBJ_COND) + if (domBlock->getBBJumpKind() == BBJ_COND) { Statement* const domJumpStmt = domBlock->lastStmt(); GenTree* const domJumpTree = domJumpStmt->GetRootNode(); @@ -971,8 +971,8 @@ bool Compiler::optJumpThreadCheck(BasicBlock* const block, BasicBlock* const dom // bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop) { - assert(block->bbJumpKind == BBJ_COND); - assert(domBlock->bbJumpKind == BBJ_COND); + assert(block->getBBJumpKind() == BBJ_COND); + assert(domBlock->getBBJumpKind() == BBJ_COND); // If the dominating block is not the immediate dominator // we might need to duplicate a lot of code to thread @@ -990,7 +990,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl BasicBlock* idomBlock = block->bbIDom; while ((idomBlock != nullptr) && (idomBlock != domBlock)) { - if (idomBlock->bbJumpKind == BBJ_COND) + if (idomBlock->getBBJumpKind() == BBJ_COND) { JITDUMP(" -- " FMT_BB " not closest branching dom, so no threading\n", idomBlock->bbNum); return false; @@ -1082,7 +1082,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl // Treat switch preds as ambiguous for now. // - if (predBlock->bbJumpKind == BBJ_SWITCH) + if (predBlock->getBBJumpKind() == BBJ_SWITCH) { JITDUMP(FMT_BB " is a switch pred\n", predBlock->bbNum); BlockSetOps::AddElemD(this, jti.m_ambiguousPreds, predBlock->bbNum); @@ -1450,8 +1450,9 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // const bool fallThroughIsTruePred = BlockSetOps::IsMember(this, jti.m_truePreds, jti.m_fallThroughPred->bbNum); - if ((jti.m_fallThroughPred->bbJumpKind == BBJ_NONE) && ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || - (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) + if ((jti.m_fallThroughPred->getBBJumpKind() == BBJ_NONE) && + ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || + (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) { JITDUMP(FMT_BB " has ambiguous preds and a (%s) fall through pred and no (%s) preds.\n" "Converting fall through pred " FMT_BB " to BBJ_ALWAYS\n", @@ -1460,7 +1461,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // Possibly defer this until after early out below. // - jti.m_fallThroughPred->bbJumpKind = BBJ_ALWAYS; + jti.m_fallThroughPred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); jti.m_fallThroughPred->bbJumpDest = jti.m_block; modifiedFlow = true; } @@ -1532,7 +1533,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) fgRemoveStmt(jti.m_block, lastStmt); JITDUMP(" repurposing " FMT_BB " to always jump to " FMT_BB "\n", jti.m_block->bbNum, jti.m_trueTarget->bbNum); fgRemoveRefPred(jti.m_falseTarget, jti.m_block); - jti.m_block->bbJumpKind = BBJ_ALWAYS; + jti.m_block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); } else if (falsePredsWillReuseBlock) { @@ -1541,7 +1542,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) JITDUMP(" repurposing " FMT_BB " to always fall through to " FMT_BB "\n", jti.m_block->bbNum, jti.m_falseTarget->bbNum); fgRemoveRefPred(jti.m_trueTarget, jti.m_block); - jti.m_block->bbJumpKind = BBJ_NONE; + jti.m_block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } // Now reroute the flow from the predecessors. @@ -1623,7 +1624,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // surviving ssa input, and update all the value numbers...) // BasicBlock* const ambBlock = jti.m_ambiguousVNBlock; - if ((ambBlock != nullptr) && (jti.m_block->bbJumpKind == BBJ_COND) && + if ((ambBlock != nullptr) && (jti.m_block->getBBJumpKind() == BBJ_COND) && (jti.m_block->GetUniquePred(this) == ambBlock)) { JITDUMP(FMT_BB " has just one remaining predcessor " FMT_BB "\n", jti.m_block->bbNum, ambBlock->bbNum); diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp index 5052e6ff57411b..90bfa43142e75a 100644 --- a/src/coreclr/jit/switchrecognition.cpp +++ b/src/coreclr/jit/switchrecognition.cpp @@ -319,7 +319,7 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* assert(isTest); // Convert firstBlock to a switch block - firstBlock->bbJumpKind = BBJ_SWITCH; + firstBlock->setBBJumpKind(BBJ_SWITCH DEBUG_ARG(this)); firstBlock->bbJumpDest = nullptr; firstBlock->bbCodeOffsEnd = lastBlock->bbCodeOffsEnd; firstBlock->lastStmt()->GetRootNode()->ChangeOper(GT_SWITCH); From 23787976a10045d7fc3f751a7962c800d82588ee Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Mon, 2 Oct 2023 19:15:21 -0400 Subject: [PATCH 02/14] Convert bbJumpKind comparisons to KindIs() --- src/coreclr/jit/assertionprop.cpp | 6 +- src/coreclr/jit/block.cpp | 6 +- src/coreclr/jit/codegenarm.cpp | 6 +- src/coreclr/jit/codegenarm64.cpp | 6 +- src/coreclr/jit/codegenarmarch.cpp | 2 +- src/coreclr/jit/codegencommon.cpp | 4 +- src/coreclr/jit/codegenlinear.cpp | 8 +- src/coreclr/jit/codegenloongarch64.cpp | 6 +- src/coreclr/jit/codegenriscv64.cpp | 6 +- src/coreclr/jit/codegenxarch.cpp | 8 +- src/coreclr/jit/compiler.cpp | 5 +- src/coreclr/jit/compiler.hpp | 10 +- src/coreclr/jit/emitarm.cpp | 4 +- src/coreclr/jit/emitarm64.cpp | 4 +- src/coreclr/jit/emitloongarch64.cpp | 2 +- src/coreclr/jit/emitriscv64.cpp | 2 +- src/coreclr/jit/emitxarch.cpp | 4 +- src/coreclr/jit/fgbasic.cpp | 56 +-- src/coreclr/jit/fgdiagnostic.cpp | 39 +- src/coreclr/jit/fgehopt.cpp | 46 +-- src/coreclr/jit/fgflow.cpp | 10 +- src/coreclr/jit/fginline.cpp | 6 +- src/coreclr/jit/fgopt.cpp | 428 ++++++++++---------- src/coreclr/jit/fgprofile.cpp | 34 +- src/coreclr/jit/fgprofilesynthesis.cpp | 15 +- src/coreclr/jit/flowgraph.cpp | 16 +- src/coreclr/jit/gschecks.cpp | 2 +- src/coreclr/jit/ifconversion.cpp | 6 +- src/coreclr/jit/importer.cpp | 60 ++- src/coreclr/jit/importercalls.cpp | 6 +- src/coreclr/jit/indirectcalltransformer.cpp | 4 +- src/coreclr/jit/jiteh.cpp | 14 +- src/coreclr/jit/lclvars.cpp | 2 +- src/coreclr/jit/liveness.cpp | 4 +- src/coreclr/jit/loopcloning.cpp | 22 +- src/coreclr/jit/lower.cpp | 10 +- src/coreclr/jit/lsra.cpp | 14 +- src/coreclr/jit/morph.cpp | 36 +- src/coreclr/jit/objectalloc.cpp | 2 +- src/coreclr/jit/optimizebools.cpp | 16 +- src/coreclr/jit/optimizer.cpp | 48 +-- src/coreclr/jit/redundantbranchopts.cpp | 22 +- 42 files changed, 498 insertions(+), 509 deletions(-) diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index 26f1a3a4d71ec5..044d1dc0679a19 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -5260,7 +5260,7 @@ class AssertionPropFlowCallback { ASSERT_TP pAssertionOut; - if (predBlock->getBBJumpKind() == BBJ_COND && (predBlock->bbJumpDest == block)) + if (predBlock->KindIs(BBJ_COND) && (predBlock->bbJumpDest == block)) { pAssertionOut = mJumpDestOut[predBlock->bbNum]; @@ -5460,7 +5460,7 @@ ASSERT_TP* Compiler::optComputeAssertionGen() printf(FMT_BB " valueGen = ", block->bbNum); optPrintAssertionIndices(block->bbAssertionGen); - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { printf(" => " FMT_BB " valueGen = ", block->bbJumpDest->bbNum); optPrintAssertionIndices(jumpDestGen[block->bbNum]); @@ -6020,7 +6020,7 @@ PhaseStatus Compiler::optAssertionPropMain() printf(FMT_BB ":\n", block->bbNum); optDumpAssertionIndices(" in = ", block->bbAssertionIn, "\n"); optDumpAssertionIndices(" out = ", block->bbAssertionOut, "\n"); - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { printf(" " FMT_BB " = ", block->bbJumpDest->bbNum); optDumpAssertionIndices(bbJtrueAssertionOut[block->bbNum], "\n"); diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 742025a619e736..a5798928b59591 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -1499,9 +1499,9 @@ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind) bool BasicBlock::isBBCallAlwaysPair() const { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) - if (this->getBBJumpKind() == BBJ_CALLFINALLY) + if (this->KindIs(BBJ_CALLFINALLY)) #else - if ((this->getBBJumpKind() == BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) + if (this->KindIs(BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) #endif { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -1510,7 +1510,7 @@ bool BasicBlock::isBBCallAlwaysPair() const #endif // Some asserts that the next block is a BBJ_ALWAYS of the proper form. assert(this->bbNext != nullptr); - assert(this->bbNext->getBBJumpKind() == BBJ_ALWAYS); + assert(this->bbNext->KindIs(BBJ_ALWAYS)); assert(this->bbNext->bbFlags & BBF_KEEP_BBJ_ALWAYS); assert(this->bbNext->isEmpty()); diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index 3c8e8cdad6128b..54c4b7e20dcd55 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -124,7 +124,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) assert(block->isBBCallAlwaysPair()); assert(block->bbNext != NULL); - assert(block->bbNext->getBBJumpKind() == BBJ_ALWAYS); + assert(block->bbNext->KindIs(BBJ_ALWAYS)); assert(block->bbNext->bbJumpDest != NULL); assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET); @@ -630,7 +630,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -1294,7 +1294,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index c2a0823a091798..daf56c5a8654be 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -3745,7 +3745,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -4646,7 +4646,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); @@ -4837,7 +4837,7 @@ void CodeGen::genCodeForSelect(GenTreeOp* tree) // void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index 6c0f23d4f488da..9a3698627fac53 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -5515,7 +5515,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->getBBJumpKind() == BBJ_RETURN); + noway_assert(block->KindIs(BBJ_RETURN)); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 916ac7854a33a5..6a1e1cecbc0e73 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -2256,7 +2256,7 @@ void CodeGen::genReportEH() { for (BasicBlock* const block : compiler->Blocks()) { - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { ++clonedFinallyCount; } @@ -2582,7 +2582,7 @@ void CodeGen::genReportEH() unsigned reportedClonedFinallyCount = 0; for (BasicBlock* const block : compiler->Blocks()) { - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { UNATIVE_OFFSET hndBeg, hndEnd; diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index fdb473fe29ed7a..f9d5d1c7cfc040 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -330,7 +330,7 @@ void CodeGen::genCodeForBBlist() // // Note: We need to have set compCurBB before calling emitAddLabel // - if ((block->bbPrev != nullptr) && (block->bbPrev->getBBJumpKind() == BBJ_COND) && + if ((block->bbPrev != nullptr) && block->bbPrev->KindIs(BBJ_COND) && (block->bbWeight != block->bbPrev->bbWeight)) { JITDUMP("Adding label due to BB weight difference: BBJ_COND " FMT_BB " with weight " FMT_WT @@ -812,10 +812,10 @@ void CodeGen::genCodeForBBlist() assert(ShouldAlignLoops()); assert(!block->isBBCallAlwaysPairTail()); #if FEATURE_EH_CALLFINALLY_THUNKS - assert(block->getBBJumpKind() != BBJ_CALLFINALLY); + assert(!block->KindIs(BBJ_CALLFINALLY)); #endif // FEATURE_EH_CALLFINALLY_THUNKS - GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->getBBJumpKind() == BBJ_ALWAYS)); + GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->KindIs(BBJ_ALWAYS))); } if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign())) @@ -2615,7 +2615,7 @@ void CodeGen::genStoreLongLclVar(GenTree* treeNode) // void CodeGen::genCodeForJcc(GenTreeCC* jcc) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); assert(jcc->OperIs(GT_JCC)); inst_JCC(jcc->gtCondition, compiler->compCurBB->bbJumpDest); diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index 26bbc218fc1a7a..6c2d7b26b0eabb 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1217,7 +1217,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->getBBJumpKind() == BBJ_RETURN); + noway_assert(block->KindIs(BBJ_RETURN)); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ @@ -2928,7 +2928,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -4136,7 +4136,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // A GT_JCMP node is created for an integer-comparison's conditional branch. void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); assert(tree->OperIs(GT_JCMP)); assert(!varTypeIsFloating(tree)); diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 7d8f3a8233d0da..d411cc292f4bd5 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -886,7 +886,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->getBBJumpKind() == BBJ_RETURN); + noway_assert(block->KindIs(BBJ_RETURN)); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ @@ -2574,7 +2574,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -3780,7 +3780,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); assert(tree->OperIs(GT_JCMP)); assert(!varTypeIsFloating(tree)); diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index cc959b33e344a9..3ac1a84d9307ca 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -369,7 +369,7 @@ void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block) } else { - assert(block->getBBJumpKind() == BBJ_EHFILTERRET); + assert(block->KindIs(BBJ_EHFILTERRET)); // The return value has already been computed. instGen_Return(0); @@ -1441,7 +1441,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); @@ -4263,7 +4263,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -10241,7 +10241,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) if (jmpEpilog) { - noway_assert(block->getBBJumpKind() == BBJ_RETURN); + noway_assert(block->KindIs(BBJ_RETURN)); noway_assert(block->GetFirstLIRNode()); // figure out what jump we have diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 65d01e701d2e61..c3f63b48e4ab4c 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5275,8 +5275,7 @@ PhaseStatus Compiler::placeLoopAlignInstructions() } // If there is an unconditional jump (which is not part of callf/always pair) - if (opts.compJitHideAlignBehindJmp && (block->getBBJumpKind() == BBJ_ALWAYS) && - !block->isBBCallAlwaysPairTail()) + if (opts.compJitHideAlignBehindJmp && block->KindIs(BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail()) { // Track the lower weight blocks if (block->bbWeight < minBlockSoFar) @@ -5301,7 +5300,7 @@ PhaseStatus Compiler::placeLoopAlignInstructions() bool unmarkedLoopAlign = false; #if FEATURE_EH_CALLFINALLY_THUNKS - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { // It must be a retless BBJ_CALLFINALLY if we get here. assert(!block->isBBCallAlwaysPair()); diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 39c5ecd33681e5..8ac6d7bdf47b7e 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -635,7 +635,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->getBBJumpKind() != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -649,7 +649,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->getBBJumpKind() != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -769,7 +769,7 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->getBBJumpKind() != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -3125,7 +3125,7 @@ inline bool Compiler::fgIsThrowHlpBlk(BasicBlock* block) return false; } - if (!(block->bbFlags & BBF_INTERNAL) || block->getBBJumpKind() != BBJ_THROW) + if (!(block->bbFlags & BBF_INTERNAL) || !block->KindIs(BBJ_THROW)) { return false; } @@ -3236,7 +3236,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) if (isCallAlwaysPair) { BasicBlock* leaveBlk = block->bbNext; - noway_assert(leaveBlk->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(leaveBlk->KindIs(BBJ_ALWAYS)); // leaveBlk is now unreachable, so scrub the pred lists. leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; diff --git a/src/coreclr/jit/emitarm.cpp b/src/coreclr/jit/emitarm.cpp index 10a1beadf139fa..33ae40ee208ef5 100644 --- a/src/coreclr/jit/emitarm.cpp +++ b/src/coreclr/jit/emitarm.cpp @@ -4379,7 +4379,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount /* = 0 #ifdef DEBUG // Mark the finally call - if (ins == INS_b && emitComp->compCurBB->getBBJumpKind() == BBJ_CALLFINALLY) + if (ins == INS_b && emitComp->compCurBB->KindIs(BBJ_CALLFINALLY)) { id->idDebugOnlyInfo()->idFinallyCall = true; } @@ -4523,7 +4523,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp index 82131ee325dd40..5e0b4f2e78a959 100644 --- a/src/coreclr/jit/emitarm64.cpp +++ b/src/coreclr/jit/emitarm64.cpp @@ -8495,7 +8495,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } @@ -8670,7 +8670,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount) #ifdef DEBUG // Mark the finally call - if (ins == INS_bl_local && emitComp->compCurBB->getBBJumpKind() == BBJ_CALLFINALLY) + if (ins == INS_bl_local && emitComp->compCurBB->KindIs(BBJ_CALLFINALLY)) { id->idDebugOnlyInfo()->idFinallyCall = true; } diff --git a/src/coreclr/jit/emitloongarch64.cpp b/src/coreclr/jit/emitloongarch64.cpp index d6004451fcb874..40c4937fe3b6f1 100644 --- a/src/coreclr/jit/emitloongarch64.cpp +++ b/src/coreclr/jit/emitloongarch64.cpp @@ -2046,7 +2046,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitriscv64.cpp b/src/coreclr/jit/emitriscv64.cpp index bfc91a35615728..fd99e65dc73739 100644 --- a/src/coreclr/jit/emitriscv64.cpp +++ b/src/coreclr/jit/emitriscv64.cpp @@ -1030,7 +1030,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index 3e2afe7a830c1c..d03bb82ea9cc5a 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -7614,7 +7614,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } @@ -9221,7 +9221,7 @@ void emitter::emitIns_J(instruction ins, #ifdef DEBUG // Mark the finally call - if (ins == INS_call && emitComp->compCurBB->getBBJumpKind() == BBJ_CALLFINALLY) + if (ins == INS_call && emitComp->compCurBB->KindIs(BBJ_CALLFINALLY)) { id->idDebugOnlyInfo()->idFinallyCall = true; } diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 3573a015de3856..254372e770c3e1 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -206,7 +206,7 @@ BasicBlock* Compiler::fgNewBasicBlock(BBjumpKinds jumpKind) /* Allocate the block descriptor */ block = bbNewBasicBlock(jumpKind); - noway_assert(block->getBBJumpKind() == jumpKind); + noway_assert(block->KindIs(jumpKind)); /* Append the block to the end of the global basic block list */ @@ -395,7 +395,7 @@ void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSw { noway_assert(oldSwitchBlock != nullptr); noway_assert(newSwitchBlock != nullptr); - noway_assert(oldSwitchBlock->getBBJumpKind() == BBJ_SWITCH); + noway_assert(oldSwitchBlock->KindIs(BBJ_SWITCH)); assert(fgPredsComputed); // Walk the switch's jump table, updating the predecessor for each branch. @@ -457,7 +457,7 @@ void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* ne noway_assert(blockSwitch != nullptr); noway_assert(newTarget != nullptr); noway_assert(oldTarget != nullptr); - noway_assert(blockSwitch->getBBJumpKind() == BBJ_SWITCH); + noway_assert(blockSwitch->KindIs(BBJ_SWITCH)); assert(fgPredsComputed); // For the jump targets values that match oldTarget of our BBJ_SWITCH @@ -911,7 +911,7 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed } // Determine if the call site is in a no-return block - if (isInlining && (impInlineInfo->iciBlock->getBBJumpKind() == BBJ_THROW)) + if (isInlining && impInlineInfo->iciBlock->KindIs(BBJ_THROW)) { compInlineResult->Note(InlineObservation::CALLSITE_IN_NORETURN_REGION); } @@ -2721,7 +2721,7 @@ void Compiler::fgMarkBackwardJump(BasicBlock* targetBlock, BasicBlock* sourceBlo for (BasicBlock* const block : Blocks(targetBlock, sourceBlock)) { - if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && (block->getBBJumpKind() != BBJ_RETURN)) + if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && !block->KindIs(BBJ_RETURN)) { block->bbFlags |= BBF_BACKWARD_JUMP; compHasBackwardJump = true; @@ -3675,7 +3675,7 @@ void Compiler::fgFindBasicBlocks() // Still inside the filter block->setHndIndex(XTnum); - if (block->getBBJumpKind() == BBJ_EHFILTERRET) + if (block->KindIs(BBJ_EHFILTERRET)) { // Mark catch handler as successor. block->bbJumpDest = hndBegBB; @@ -4015,7 +4015,7 @@ void Compiler::fgFixEntryFlowForOSR() // Now branch from method start to the OSR entry. // fgEnsureFirstBBisScratch(); - assert(fgFirstBB->getBBJumpKind() == BBJ_NONE); + assert(fgFirstBB->KindIs(BBJ_NONE)); fgRemoveRefPred(fgFirstBB->bbNext, fgFirstBB); fgFirstBB->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgFirstBB->bbJumpDest = fgOSREntryBB; @@ -4099,14 +4099,14 @@ void Compiler::fgCheckBasicBlockControlFlow() HBtab = ehGetDsc(blk->getHndIndex()); // Endfilter allowed only in a filter block - if (blk->getBBJumpKind() == BBJ_EHFILTERRET) + if (blk->KindIs(BBJ_EHFILTERRET)) { if (!HBtab->HasFilter()) { BADCODE("Unexpected endfilter"); } } - else if (blk->getBBJumpKind() == BBJ_EHFILTERRET) + else if (blk->KindIs(BBJ_EHFILTERRET)) { // endfinally allowed only in a finally block if (!HBtab->HasFinallyHandler()) @@ -4114,7 +4114,7 @@ void Compiler::fgCheckBasicBlockControlFlow() BADCODE("Unexpected endfinally"); } } - else if (blk->getBBJumpKind() == BBJ_EHFAULTRET) + else if (blk->KindIs(BBJ_EHFAULTRET)) { // 'endfault' (alias of IL 'endfinally') allowed only in a fault block if (!HBtab->HasFaultHandler()) @@ -4568,7 +4568,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) // For each successor of the original block, set the new block as their predecessor. // Note we are using the "rational" version of the successor iterator that does not hide the finallyret arcs. // Without these arcs, a block 'b' may not be a member of succs(preds(b)) - if (curr->getBBJumpKind() != BBJ_SWITCH) + if (!curr->KindIs(BBJ_SWITCH)) { for (BasicBlock* const succ : curr->Succs(this)) { @@ -4874,7 +4874,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) JITDUMP("Splitting edge from " FMT_BB " to " FMT_BB "; adding " FMT_BB "\n", curr->bbNum, succ->bbNum, newBlock->bbNum); - if (curr->getBBJumpKind() == BBJ_COND) + if (curr->KindIs(BBJ_COND)) { fgReplacePred(succ, curr, newBlock); if (curr->bbJumpDest == succ) @@ -4884,7 +4884,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) } fgAddRefPred(newBlock, curr); } - else if (curr->getBBJumpKind() == BBJ_SWITCH) + else if (curr->KindIs(BBJ_SWITCH)) { // newBlock replaces 'succ' in the switch. fgReplaceSwitchJumpTarget(curr, newBlock, succ); @@ -4894,7 +4894,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) } else { - assert(curr->getBBJumpKind() == BBJ_ALWAYS); + assert(curr->KindIs(BBJ_ALWAYS)); fgReplacePred(succ, curr, newBlock); curr->bbJumpDest = newBlock; fgAddRefPred(newBlock, curr); @@ -4907,7 +4907,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) // This isn't accurate, but it is complex to compute a reasonable number so just assume that we take the // branch 50% of the time. // - if (curr->getBBJumpKind() != BBJ_ALWAYS) + if (!curr->KindIs(BBJ_ALWAYS)) { newBlock->inheritWeightPercentage(curr, 50); } @@ -5054,7 +5054,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } #endif // FEATURE_EH_FUNCLETS - if (bPrev->getBBJumpKind() == BBJ_CALLFINALLY) + if (bPrev->KindIs(BBJ_CALLFINALLY)) { // bPrev CALL becomes RETLESS as the BBJ_ALWAYS block is unreachable bPrev->bbFlags |= BBF_RETLESS_CALL; @@ -5063,7 +5063,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) NO_WAY("No retless call finally blocks; need unwind target instead"); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } - else if (bPrev->getBBJumpKind() == BBJ_ALWAYS && bPrev->bbJumpDest == block->bbNext && + else if (bPrev->KindIs(BBJ_ALWAYS) && bPrev->bbJumpDest == block->bbNext && !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && (block->bbNext != fgFirstColdBlock)) { @@ -5092,7 +5092,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (block->isBBCallAlwaysPair()) { BasicBlock* leaveBlk = block->bbNext; - noway_assert(leaveBlk->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(leaveBlk->KindIs(BBJ_ALWAYS)); leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; leaveBlk->bbRefs = 0; @@ -5104,7 +5104,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) fgClearFinallyTargetBit(leaveBlk->bbJumpDest); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } - else if (block->getBBJumpKind() == BBJ_RETURN) + else if (block->KindIs(BBJ_RETURN)) { fgRemoveReturnBlock(block); } @@ -5139,7 +5139,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) noway_assert(block->bbJumpDest != block); /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ - noway_assert(bPrev && bPrev->getBBJumpKind() == BBJ_NONE); + noway_assert(bPrev && bPrev->KindIs(BBJ_NONE)); break; default: @@ -5154,7 +5154,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) BasicBlock* succBlock; - if (block->getBBJumpKind() == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { succBlock = block->bbJumpDest; } @@ -5207,7 +5207,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) /* Must be a fall through to next block */ - noway_assert(block->getBBJumpKind() == BBJ_NONE); + noway_assert(block->KindIs(BBJ_NONE)); /* old block no longer gets the extra ref count for being the first block */ block->bbRefs--; @@ -5235,7 +5235,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) /* If predBlock is a new predecessor, then add it to succBlock's predecessor's list. */ - if (predBlock->getBBJumpKind() != BBJ_SWITCH) + if (!predBlock->KindIs(BBJ_SWITCH)) { // Even if the pred is not a switch, we could have a conditional branch // to the fallthrough, so duplicate there could be preds @@ -5257,7 +5257,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) PREFIX_ASSUME(bPrev != nullptr); /* In the case of BBJ_ALWAYS we have to change the type of its predecessor */ - if (block->getBBJumpKind() == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { /* bPrev now becomes a BBJ_ALWAYS */ bPrev->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); @@ -5459,7 +5459,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) // If bSrc is an unconditional branch to the next block // then change it to a BBJ_NONE block // - if ((bSrc->getBBJumpKind() == BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && + if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (bSrc->bbJumpDest == bSrc->bbNext)) { bSrc->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); @@ -6273,14 +6273,14 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) } else { - if (bAlt->getBBJumpKind() == BBJ_ALWAYS) + if (bAlt->KindIs(BBJ_ALWAYS)) { // Our result is true if bAlt's weight is more than bCur's weight result = (bAlt->bbWeight > bCur->bbWeight); } else { - noway_assert(bAlt->getBBJumpKind() == BBJ_COND); + noway_assert(bAlt->KindIs(BBJ_COND)); // Our result is true if bAlt's weight is more than twice bCur's weight result = (bAlt->bbWeight > (2 * bCur->bbWeight)); } @@ -6570,7 +6570,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, { goodBlk = blk; } - else if ((goodBlk->getBBJumpKind() == BBJ_COND) || (blk->getBBJumpKind() != BBJ_COND)) + else if (goodBlk->KindIs(BBJ_COND) || !blk->KindIs(BBJ_COND)) { if ((blk == nearBlk) || !reachedNear) { diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index edf64aeccdd378..b8b868214ae9c8 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -143,13 +143,13 @@ void Compiler::fgDebugCheckUpdate() // Check for an unnecessary jumps to the next block bool doAssertOnJumpToNextBlock = false; // unless we have a BBJ_COND or BBJ_ALWAYS we can not assert - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { // A conditional branch should never jump to the next block // as it can be folded into a BBJ_NONE; doAssertOnJumpToNextBlock = true; } - else if (block->getBBJumpKind() == BBJ_ALWAYS) + else if (block->KindIs(BBJ_ALWAYS)) { // Generally we will want to assert if a BBJ_ALWAYS branches to the next block doAssertOnJumpToNextBlock = true; @@ -184,7 +184,7 @@ void Compiler::fgDebugCheckUpdate() /* Make sure BBF_KEEP_BBJ_ALWAYS is set correctly */ - if ((block->getBBJumpKind() == BBJ_ALWAYS) && prevIsCallAlwaysPair) + if (block->KindIs(BBJ_ALWAYS) && prevIsCallAlwaysPair) { noway_assert(block->bbFlags & BBF_KEEP_BBJ_ALWAYS); } @@ -192,7 +192,7 @@ void Compiler::fgDebugCheckUpdate() /* For a BBJ_CALLFINALLY block we make sure that we are followed by */ /* an BBJ_ALWAYS block with BBF_INTERNAL set */ /* or that it's a BBF_RETLESS_CALL */ - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY) == BBJ_CALLFINALLY) { assert((block->bbFlags & BBF_RETLESS_CALL) || block->isBBCallAlwaysPair()); } @@ -984,7 +984,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) } } - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { fprintf(fgxFile, "\\n"); @@ -1015,11 +1015,11 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { fprintf(fgxFile, ", shape = \"house\""); } - else if (block->getBBJumpKind() == BBJ_RETURN) + else if (block->KindIs(BBJ_RETURN)) { fprintf(fgxFile, ", shape = \"invhouse\""); } - else if (block->getBBJumpKind() == BBJ_THROW) + else if (block->KindIs(BBJ_THROW)) { fprintf(fgxFile, ", shape = \"trapezium\""); } @@ -1152,7 +1152,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) fprintf(fgxFile, "\n id=\"%d\"", edgeNum); fprintf(fgxFile, "\n source=\"%d\"", bSource->bbNum); fprintf(fgxFile, "\n target=\"%d\"", bTarget->bbNum); - if (bSource->getBBJumpKind() == BBJ_SWITCH) + if (bSource->KindIs(BBJ_SWITCH)) { if (edge->getDupCount() >= 2) { @@ -2606,8 +2606,7 @@ bool BBPredsChecker::CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHb // block that does a local call to the finally. This BBJ_ALWAYS is within // the try region protected by the finally (for x86, ARM), but that's ok. BasicBlock* prevBlock = block->bbPrev; - if (prevBlock->getBBJumpKind() == BBJ_CALLFINALLY && block->getBBJumpKind() == BBJ_ALWAYS && - blockPred->getBBJumpKind() == BBJ_EHFINALLYRET) + if (prevBlock->KindIs(BBJ_CALLFINALLY) && block->KindIs(BBJ_ALWAYS) && blockPred->KindIs(BBJ_EHFINALLYRET)) { return true; } @@ -2634,7 +2633,7 @@ bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHb } // Our try block can call our finally block - if ((block->bbCatchTyp == BBCT_FINALLY) && (blockPred->getBBJumpKind() == BBJ_CALLFINALLY) && + if ((block->bbCatchTyp == BBCT_FINALLY) && blockPred->KindIs(BBJ_CALLFINALLY) && comp->ehCallFinallyInCorrectRegion(blockPred, block->getHndIndex())) { return true; @@ -2734,7 +2733,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; } @@ -2756,7 +2755,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) for (BasicBlock* const bcall : comp->Blocks(comp->fgFirstFuncletBB)) { - if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; } @@ -2878,12 +2877,12 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // if (compPostImportationCleanupDone || ((block->bbFlags & BBF_IMPORTED) != 0)) { - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { assert((!allNodesLinked || (block->lastNode()->gtNext == nullptr)) && block->lastNode()->OperIsConditionalJump()); } - else if (block->getBBJumpKind() == BBJ_SWITCH) + else if (block->KindIs(BBJ_SWITCH)) { assert((!allNodesLinked || (block->lastNode()->gtNext == nullptr)) && (block->lastNode()->gtOper == GT_SWITCH || block->lastNode()->gtOper == GT_SWITCH_TABLE)); @@ -2987,7 +2986,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // Don't depend on predecessors list for the check. for (BasicBlock* const succBlock : block->Succs()) { - if (succBlock->getBBJumpKind() == BBJ_CALLFINALLY) + if (succBlock->KindIs(BBJ_CALLFINALLY)) { BasicBlock* finallyBlock = succBlock->bbJumpDest; assert(finallyBlock->hasHndIndex()); @@ -3729,7 +3728,7 @@ void Compiler::fgDebugCheckBlockLinks() // If this is a switch, check that the tables are consistent. // Note that we don't call GetSwitchDescMap(), because it has the side-effect // of allocating it if it is not present. - if (block->getBBJumpKind() == BBJ_SWITCH && m_switchDescMap != nullptr) + if (block->KindIs(BBJ_SWITCH) && m_switchDescMap != nullptr) { SwitchUniqueSuccSet uniqueSuccSet; if (m_switchDescMap->Lookup(block, &uniqueSuccSet)) @@ -4792,13 +4791,13 @@ void Compiler::fgDebugCheckLoopTable() // The pre-header can only be BBJ_ALWAYS or BBJ_NONE and must enter the loop. BasicBlock* e = loop.lpEntry; - if (h->getBBJumpKind() == BBJ_ALWAYS) + if (h->KindIs(BBJ_ALWAYS)) { assert(h->bbJumpDest == e); } else { - assert(h->getBBJumpKind() == BBJ_NONE); + assert(h->KindIs(BBJ_NONE)); assert(h->bbNext == e); assert(loop.lpTop == e); assert(loop.lpIsTopEntry()); @@ -4907,7 +4906,7 @@ void Compiler::fgDebugCheckLoopTable() // TODO: We might want the following assert, but there are cases where we don't move all // return blocks out of the loop. // Return blocks are not allowed inside a loop; they should have been moved elsewhere. - // assert(block->getBBJumpKind() != BBJ_RETURN); + // assert(!block->KindIs(BBJ_RETURN)); } else { diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index f6549f3b538dfe..782a92c92b6452 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -100,7 +100,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() } // If the finally's block jumps back to itself, then it is not empty. - if ((firstBlock->getBBJumpKind() == BBJ_ALWAYS) && firstBlock->bbJumpDest == firstBlock) + if (firstBlock->KindIs(BBJ_ALWAYS) && firstBlock->bbJumpDest == firstBlock) { JITDUMP("EH#%u finally has basic block that jumps to itself; skipping.\n", XTnum); XTnum++; @@ -142,7 +142,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() { BasicBlock* nextBlock = currentBlock->bbNext; - if ((currentBlock->getBBJumpKind() == BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) + if (currentBlock->KindIs(BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) { // Retarget the call finally to jump to the return // point. @@ -160,7 +160,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() JITDUMP("so that " FMT_BB " jumps to " FMT_BB "; then remove " FMT_BB "\n", currentBlock->bbNum, postTryFinallyBlock->bbNum, leaveBlock->bbNum); - noway_assert(leaveBlock->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(leaveBlock->KindIs(BBJ_ALWAYS)); currentBlock->bbJumpDest = postTryFinallyBlock; currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); @@ -373,7 +373,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // Look for blocks that are always jumps to a call finally // pair that targets the finally - if (firstTryBlock->getBBJumpKind() != BBJ_ALWAYS) + if (!firstTryBlock->KindIs(BBJ_ALWAYS)) { JITDUMP("EH#%u first try block " FMT_BB " not jump to a callfinally; skipping.\n", XTnum, firstTryBlock->bbNum); @@ -437,7 +437,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; block = block->bbNext) { - if ((block->getBBJumpKind() == BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) + if (block->KindIs(BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) { assert(block->isBBCallAlwaysPair()); @@ -536,7 +536,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() block->clearHndIndex(); } - if (block->getBBJumpKind() == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { Statement* finallyRet = block->lastStmt(); GenTree* finallyRetExpr = finallyRet->GetRootNode(); @@ -738,7 +738,7 @@ PhaseStatus Compiler::fgCloneFinally() for (const BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext) { - if (block->getBBJumpKind() == BBJ_SWITCH) + if (block->KindIs(BBJ_SWITCH)) { hasSwitch = true; break; @@ -753,7 +753,7 @@ PhaseStatus Compiler::fgCloneFinally() regionStmtCount++; } - hasFinallyRet = hasFinallyRet || (block->getBBJumpKind() == BBJ_EHFINALLYRET); + hasFinallyRet = hasFinallyRet || (block->KindIs(BBJ_EHFINALLYRET)); isAllRare = isAllRare && block->isRunRarely(); } @@ -821,11 +821,11 @@ PhaseStatus Compiler::fgCloneFinally() // through to a callfinally. BasicBlock* jumpDest = nullptr; - if ((block->getBBJumpKind() == BBJ_NONE) && (block == lastTryBlock)) + if ((block->KindIs(BBJ_NONE)) && (block == lastTryBlock)) { jumpDest = block->bbNext; } - else if (block->getBBJumpKind() == BBJ_ALWAYS) + else if (block->KindIs(BBJ_ALWAYS)) { jumpDest = block->bbJumpDest; } @@ -989,8 +989,7 @@ PhaseStatus Compiler::fgCloneFinally() { BasicBlock* const placeToMoveAfter = firstCallFinallyBlock->bbPrev; - if ((placeToMoveAfter->getBBJumpKind() == BBJ_ALWAYS) && - (placeToMoveAfter->bbJumpDest == normalCallFinallyBlock)) + if (placeToMoveAfter->KindIs(BBJ_ALWAYS) && (placeToMoveAfter->bbJumpDest == normalCallFinallyBlock)) { JITDUMP("Moving callfinally " FMT_BB " to be first in line, before " FMT_BB "\n", normalCallFinallyBlock->bbNum, firstCallFinallyBlock->bbNum); @@ -1050,8 +1049,7 @@ PhaseStatus Compiler::fgCloneFinally() // Avoid asserts when `fgNewBBinRegion` verifies the handler table, by mapping any cloned finally // return blocks to BBJ_ALWAYS (which we would do below if we didn't do it here). - BBjumpKinds bbNewJumpKind = - (block->getBBJumpKind() == BBJ_EHFINALLYRET) ? BBJ_ALWAYS : block->getBBJumpKind(); + BBjumpKinds bbNewJumpKind = (block->KindIs(BBJ_EHFINALLYRET)) ? BBJ_ALWAYS : block->getBBJumpKind(); if (block == firstBlock) { @@ -1133,13 +1131,13 @@ PhaseStatus Compiler::fgCloneFinally() { BasicBlock* newBlock = blockMap[block]; - if (block->getBBJumpKind() == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { Statement* finallyRet = newBlock->lastStmt(); GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(newBlock, finallyRet); - assert(newBlock->getBBJumpKind() == BBJ_ALWAYS); // we mapped this above already + assert(newBlock->KindIs(BBJ_ALWAYS)); // we mapped this above already newBlock->bbJumpDest = normalCallFinallyReturn; fgAddRefPred(normalCallFinallyReturn, newBlock); @@ -1196,7 +1194,7 @@ PhaseStatus Compiler::fgCloneFinally() // All preds should be BBJ_EHFINALLYRETs from the finally. for (BasicBlock* const leavePred : leaveBlock->PredBlocks()) { - assert(leavePred->getBBJumpKind() == BBJ_EHFINALLYRET); + assert(leavePred->KindIs(BBJ_EHFINALLYRET)); assert(leavePred->getHndIndex() == XTnum); } @@ -1242,7 +1240,7 @@ PhaseStatus Compiler::fgCloneFinally() BasicBlock* const hndEndIter = HBtab->ebdHndLast->bbNext; for (BasicBlock* block = hndBegIter; block != hndEndIter; block = block->bbNext) { - if (block->getBBJumpKind() == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { block->setBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } @@ -1408,7 +1406,7 @@ void Compiler::fgDebugCheckTryFinallyExits() // logically "belong" to a child region and the exit // path validity will be checked when looking at the // try blocks in that region. - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { continue; } @@ -1434,13 +1432,13 @@ void Compiler::fgDebugCheckTryFinallyExits() bool isCallToFinally = false; #if FEATURE_EH_CALLFINALLY_THUNKS - if (succBlock->getBBJumpKind() == BBJ_CALLFINALLY) + if (succBlock->KindIs(BBJ_CALLFINALLY)) { // case (a1) isCallToFinally = isFinally && (succBlock->bbJumpDest == finallyBlock); } #else - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { // case (a2) isCallToFinally = isFinally && (block->bbJumpDest == finallyBlock); @@ -1454,7 +1452,7 @@ void Compiler::fgDebugCheckTryFinallyExits() // case (b) isJumpToClonedFinally = true; } - else if (succBlock->getBBJumpKind() == BBJ_ALWAYS) + else if (succBlock->KindIs(BBJ_ALWAYS)) { if (succBlock->isEmpty()) { @@ -1467,7 +1465,7 @@ void Compiler::fgDebugCheckTryFinallyExits() } } } - else if (succBlock->getBBJumpKind() == BBJ_NONE) + else if (succBlock->KindIs(BBJ_NONE)) { if (succBlock->isEmpty()) { @@ -1900,7 +1898,7 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, { // We expect callfinallys to be invoked by a BBJ_ALWAYS at this // stage in compilation. - if (block->getBBJumpKind() != BBJ_ALWAYS) + if (!block->KindIs(BBJ_ALWAYS)) { // Possible paranoia assert here -- no flow successor of // this block should be a callfinally for this try. diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index 14f42c83254c53..fd6ef7a3567763 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -354,7 +354,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) bNext = block->bbNext; /* bNext is an unreachable BBJ_ALWAYS block */ - noway_assert(bNext->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(bNext->KindIs(BBJ_ALWAYS)); while (bNext->countOfInEdges() > 0) { @@ -403,7 +403,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbFlags & BBF_REMOVED) || bcall->getBBJumpKind() != BBJ_CALLFINALLY || + if ((bcall->bbFlags & BBF_REMOVED) || !bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; @@ -470,7 +470,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; } @@ -491,7 +491,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switchBlk) { - assert(switchBlk->getBBJumpKind() == BBJ_SWITCH); + assert(switchBlk->KindIs(BBJ_SWITCH)); BlockToSwitchDescMap* switchMap = GetSwitchDescMap(); SwitchUniqueSuccSet res; if (switchMap->Lookup(switchBlk, &res)) @@ -546,7 +546,7 @@ void Compiler::SwitchUniqueSuccSet::UpdateTarget(CompAllocator alloc, BasicBlock* from, BasicBlock* to) { - assert(switchBlk->getBBJumpKind() == BBJ_SWITCH); // Precondition. + assert(switchBlk->KindIs(BBJ_SWITCH)); // Precondition. // Is "from" still in the switch table (because it had more than one entry before?) bool fromStillPresent = false; diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index fd880a2d00348c..51f77ccc3a5f9d 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -1444,7 +1444,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) // DDB 91389: Don't throw away the (only) inlinee block // when its return type is not BBJ_RETURN. // In other words, we need its BBJ_ to perform the right thing. - if (InlineeCompiler->fgFirstBB->getBBJumpKind() == BBJ_RETURN) + if (InlineeCompiler->fgFirstBB->KindIs(BBJ_RETURN)) { // Inlinee contains just one BB. So just insert its statement list to topBlock. if (InlineeCompiler->fgFirstBB->bbStmtList != nullptr) @@ -1523,7 +1523,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) block->bbFlags |= BBF_INTERNAL; } - if (block->getBBJumpKind() == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { noway_assert((block->bbFlags & BBF_HAS_JMP) == 0); if (block->bbNext) @@ -1945,7 +1945,7 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) unsigned lclCnt = InlineeMethodInfo->locals.numArgs; bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; + bool bbIsReturn = block->KindIs(BBJ_RETURN); // If the callee contains zero-init locals, we need to explicitly initialize them if we are // in a loop or if the caller doesn't have compInitMem set. Otherwise we can rely on the diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 9814f8b9e6b0d5..bcf25c9d01ef17 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -292,7 +292,7 @@ void Compiler::fgComputeReturnBlocks() { // If this is a BBJ_RETURN block, add it to our list of all BBJ_RETURN blocks. This list is only // used to find return blocks. - if (block->getBBJumpKind() == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { fgReturnBlocks = new (this, CMK_Reachability) BasicBlockList(block, fgReturnBlocks); } @@ -362,7 +362,7 @@ void Compiler::fgComputeEnterBlocksSet() // For ARM code, prevent creating retless calls by adding the BBJ_ALWAYS to the "fgAlwaysBlks" list. for (BasicBlock* const block : Blocks()) { - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { assert(block->isBBCallAlwaysPair()); @@ -474,7 +474,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) // the target node (of BBJ_ALWAYS) since BBJ_CALLFINALLY node is getting converted to a BBJ_THROW. if (bIsBBCallAlwaysPair) { - noway_assert(block->bbNext->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(block->bbNext->KindIs(BBJ_ALWAYS)); fgClearFinallyTargetBit(block->bbNext->bbJumpDest); } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -638,7 +638,7 @@ bool Compiler::fgRemoveDeadBlocks() // For ARM code, prevent creating retless calls by adding the BBJ_ALWAYS to the "fgAlwaysBlks" list. for (BasicBlock* const block : Blocks()) { - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { assert(block->isBBCallAlwaysPair()); @@ -1827,7 +1827,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // it can be reached directly from "outside". // assert(fgFirstBB->bbJumpDest == osrEntry); - assert(fgFirstBB->getBBJumpKind() == BBJ_ALWAYS); + assert(fgFirstBB->KindIs(BBJ_ALWAYS)); if (entryJumpTarget != osrEntry) { @@ -1918,7 +1918,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) noway_assert(block->bbNext == bNext); - if (block->getBBJumpKind() != BBJ_NONE) + if (!block->KindIs(BBJ_NONE)) { return false; } @@ -2002,7 +2002,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) // (if they are valid). for (BasicBlock* const predBlock : bNext->PredBlocks()) { - if (predBlock->getBBJumpKind() == BBJ_SWITCH) + if (predBlock->KindIs(BBJ_SWITCH)) { return false; } @@ -2027,7 +2027,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) { noway_assert(block != nullptr); noway_assert((block->bbFlags & BBF_REMOVED) == 0); - noway_assert(block->getBBJumpKind() == BBJ_NONE); + noway_assert(block->KindIs(BBJ_NONE)); noway_assert(bNext == block->bbNext); noway_assert(bNext != nullptr); @@ -2234,7 +2234,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) // or if both block and bNext have non-zero weights // then we will use the max weight for the block. // - if (bNext->getBBJumpKind() == BBJ_THROW) + if (bNext->KindIs(BBJ_THROW)) { block->bbSetRunRarely(); } @@ -2345,7 +2345,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) fgReplacePred(bNext->bbJumpDest, bNext, block); /* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */ - if (bNext->getBBJumpKind() == BBJ_COND && bNext->bbJumpDest != bNext->bbNext) + if (bNext->KindIs(BBJ_COND) && bNext->bbJumpDest != bNext->bbNext) { fgReplacePred(bNext->bbNext, bNext, block); } @@ -2375,7 +2375,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; } @@ -2627,7 +2627,7 @@ void Compiler::fgUnreachableBlock(BasicBlock* block) // void Compiler::fgRemoveConditionalJump(BasicBlock* block) { - noway_assert(block->getBBJumpKind() == BBJ_COND && block->bbJumpDest == block->bbNext); + noway_assert(block->KindIs(BBJ_COND) && block->bbJumpDest == block->bbNext); assert(compRationalIRForm == block->IsLIR()); FlowEdge* flow = fgGetPredForBlock(block->bbNext, block); @@ -2735,7 +2735,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc bool optimizeJump = true; assert(bDest->isEmpty()); - assert(bDest->getBBJumpKind() == BBJ_ALWAYS); + assert(bDest->KindIs(BBJ_ALWAYS)); // We do not optimize jumps between two different try regions. // However jumping to a block that is not in any try region is OK @@ -2930,7 +2930,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) } /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ - if (bPrev->getBBJumpKind() != BBJ_NONE) + if (!bPrev->KindIs(BBJ_NONE)) { break; } @@ -2957,7 +2957,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) { /* If this block follows a BBJ_CALLFINALLY do not remove it * (because we don't know who may jump to it) */ - if (bPrev->getBBJumpKind() == BBJ_CALLFINALLY) + if (bPrev->KindIs(BBJ_CALLFINALLY)) { break; } @@ -2980,7 +2980,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) { BasicBlock* succBlock; - if (block->getBBJumpKind() == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { succBlock = block->bbJumpDest; } @@ -2997,7 +2997,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) bool okToMerge = true; // assume it's ok for (BasicBlock* const predBlock : block->PredBlocks()) { - if (predBlock->getBBJumpKind() == BBJ_EHCATCHRET) + if (predBlock->KindIs(BBJ_EHCATCHRET)) { assert(predBlock->bbJumpDest == block); okToMerge = false; // we can't get rid of the empty block @@ -3119,7 +3119,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) // bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) { - assert(block->getBBJumpKind() == BBJ_SWITCH); + assert(block->KindIs(BBJ_SWITCH)); unsigned jmpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jmpTab = block->bbJumpSwt->bbsDstTab; @@ -3134,7 +3134,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) bNewDest = bDest; // Do we have a JUMP to an empty unconditional JUMP block? - if (bDest->isEmpty() && (bDest->getBBJumpKind() == BBJ_ALWAYS) && + if (bDest->isEmpty() && bDest->KindIs(BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { bool optimizeJump = true; @@ -3502,7 +3502,7 @@ bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock* target, unsigne // // This is by no means the only kind of tail that it is beneficial to duplicate, // just the only one we recognize for now. - if (target->getBBJumpKind() != BBJ_COND) + if (!target->KindIs(BBJ_COND)) { return false; } @@ -3741,7 +3741,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // if (opts.IsOSR()) { - assert(target->getBBJumpKind() == BBJ_COND); + assert(target->KindIs(BBJ_COND)); if ((target->bbNext->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) { @@ -3829,7 +3829,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi assert(block->bbNext == bNext); assert(block->bbPrev == bPrev); - if (block->getBBJumpKind() == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { // We can't remove it if it is a branch from hot => cold if (!fgInDifferentRegions(block, bNext)) @@ -3859,7 +3859,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi else { /* remove the conditional statement at the end of block */ - noway_assert(block->getBBJumpKind() == BBJ_COND); + noway_assert(block->KindIs(BBJ_COND)); noway_assert(block->isValid()); #ifdef DEBUG @@ -4002,7 +4002,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) return false; } - if (bJump->getBBJumpKind() != BBJ_ALWAYS) + if (!bJump->KindIs(BBJ_ALWAYS)) { return false; } @@ -4021,7 +4021,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) BasicBlock* bDest = bJump->bbJumpDest; - if (bDest->getBBJumpKind() != BBJ_COND) + if (!bDest->KindIs(BBJ_COND)) { return false; } @@ -4324,7 +4324,7 @@ bool Compiler::fgOptimizeSwitchJumps() // assert(!block->IsLIR()); - if (block->getBBJumpKind() != BBJ_SWITCH) + if (!block->KindIs(BBJ_SWITCH)) { continue; } @@ -4516,7 +4516,7 @@ bool Compiler::fgExpandRarelyRunBlocks() noway_assert(tmpbb->isBBCallAlwaysPair()); bPrevPrev = tmpbb; #else - if (tmpbb->getBBJumpKind() == BBJ_CALLFINALLY) + if (tmpbb->KindIs(BBJ_CALLFINALLY)) { bPrevPrev = tmpbb; } @@ -4742,7 +4742,7 @@ bool Compiler::fgExpandRarelyRunBlocks() } /* COMPACT blocks if possible */ - if (bPrev->getBBJumpKind() == BBJ_NONE) + if (bPrev->KindIs(BBJ_NONE)) { if (fgCanCompactBlocks(bPrev, block)) { @@ -4934,7 +4934,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // if (forwardBranch) { - if (bPrev->getBBJumpKind() == BBJ_ALWAYS) + if (bPrev->KindIs(BBJ_ALWAYS)) { // We can pull up the blocks that the unconditional jump branches to // if the weight of bDest is greater or equal to the weight of block @@ -5017,9 +5017,9 @@ bool Compiler::fgReorderBlocks(bool useProfile) } } } - else // (bPrev->getBBJumpKind() == BBJ_COND) + else // (bPrev->KindIs(BBJ_COND)) { - noway_assert(bPrev->getBBJumpKind() == BBJ_COND); + noway_assert(bPrev->KindIs(BBJ_COND)); // // We will reverse branch if the taken-jump to bDest ratio (i.e. 'takenRatio') // is more than 51% @@ -5211,8 +5211,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) /* (bPrev is known to be a normal block at this point) */ if (!isRare) { - if ((bDest == block->bbNext) && (block->getBBJumpKind() == BBJ_RETURN) && - (bPrev->getBBJumpKind() == BBJ_ALWAYS)) + if ((bDest == block->bbNext) && (block->KindIs(BBJ_RETURN)) && (bPrev->KindIs(BBJ_ALWAYS))) { // This is a common case with expressions like "return Expr1 && Expr2" -- move the return // to establish fall-through. @@ -5246,7 +5245,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) const bool optimizedBranch = fgOptimizeBranch(bPrev); if (optimizedBranch) { - noway_assert(bPrev->getBBJumpKind() == BBJ_COND); + noway_assert(bPrev->KindIs(BBJ_COND)); optimizedBranches = true; } continue; @@ -5423,7 +5422,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // if (bEnd2->isBBCallAlwaysPair()) { - noway_assert(bNext->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(bNext->KindIs(BBJ_ALWAYS)); // Move bEnd2 and bNext forward bEnd2 = bNext; bNext = bNext->bbNext; @@ -5502,12 +5501,12 @@ bool Compiler::fgReorderBlocks(bool useProfile) { if (bDest != nullptr) { - if (bPrev->getBBJumpKind() == BBJ_COND) + if (bPrev->KindIs(BBJ_COND)) { printf("Decided to reverse conditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); } - else if (bPrev->getBBJumpKind() == BBJ_ALWAYS) + else if (bPrev->KindIs(BBJ_ALWAYS)) { printf("Decided to straighten unconditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); @@ -5577,7 +5576,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) noway_assert(bEnd != nullptr); // bEnd can't be a BBJ_CALLFINALLY unless it is a RETLESS call - noway_assert((bEnd->getBBJumpKind() != BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); + noway_assert(!bEnd->KindIs(BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); // bStartPrev must be set to the block that precedes bStart noway_assert(bStartPrev->bbNext == bStart); @@ -5716,7 +5715,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) BasicBlock* nearBlk = nullptr; BasicBlock* jumpBlk = nullptr; - if ((bEnd->getBBJumpKind() == BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && + if (bEnd->KindIs(BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && fgIsForwardBranch(bEnd, bPrev)) { // Set nearBlk to be the block in [startBlk..endBlk] @@ -5844,7 +5843,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) printf("block " FMT_BB, bStart->bbNum); } - if (bPrev->getBBJumpKind() == BBJ_COND) + if (bPrev->KindIs(BBJ_COND)) { printf(" by reversing conditional jump at " FMT_BB "\n", bPrev->bbNum); } @@ -5855,7 +5854,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) } #endif // DEBUG - if (bPrev->getBBJumpKind() == BBJ_COND) + if (bPrev->KindIs(BBJ_COND)) { /* Reverse the bPrev jump condition */ Statement* const condTestStmt = bPrev->lastStmt(); @@ -6103,7 +6102,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) bNext = block->bbNext; bDest = nullptr; - if (block->getBBJumpKind() == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { bDest = block->bbJumpDest; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, bDest)) @@ -6115,7 +6114,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } } - if (block->getBBJumpKind() == BBJ_NONE) + if (block->KindIs(BBJ_NONE)) { bDest = nullptr; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->bbNext)) @@ -6147,7 +6146,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) if (bDest != nullptr) { // Do we have a JUMP to an empty unconditional JUMP block? - if (bDest->isEmpty() && (bDest->getBBJumpKind() == BBJ_ALWAYS) && + if (bDest->isEmpty() && bDest->KindIs(BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { if (fgOptimizeBranchToEmptyUnconditional(block, bDest)) @@ -6166,225 +6165,226 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // (b) block jump target is elsewhere but join free, and // bNext's jump target has a join. // - if ((block->getBBJumpKind() == BBJ_COND) && // block is a BBJ_COND block - (bNext != nullptr) && // block is not the last block - (bNext->bbRefs == 1) && // No other block jumps to bNext - (bNext->getBBJumpKind() == BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block + if (block->KindIs(BBJ_COND) && // block is a BBJ_COND block + (bNext != nullptr) && // block is not the last block + (bNext->bbRefs == 1) && // No other block jumps to bNext + bNext->KindIs(BBJ_ALWAYS)) && // The next block is a BBJ_ALWAYS block bNext->isEmpty() && // and it is an empty block (bNext != bNext->bbJumpDest) && // special case for self jumps (bDest != fgFirstColdBlock) && (!fgInDifferentRegions(block, bDest))) // do not cross hot/cold sections - { - // case (a) - // - const bool isJumpAroundEmpty = (bNext->bbNext == bDest); - - // case (b) - // - // Note the asymmetric checks for refs == 1 and refs > 1 ensures that we - // differentiate the roles played by bDest and bNextJumpDest. We need some - // sense of which arrangement is preferable to avoid getting stuck in a loop - // reversing and re-reversing. - // - // Other tiebreaking criteria could be considered. - // - // Pragmatic constraints: - // - // * don't consider lexical predecessors, or we may confuse loop recognition - // * don't consider blocks of different rarities - // - BasicBlock* const bNextJumpDest = bNext->bbJumpDest; - const bool isJumpToJoinFree = !isJumpAroundEmpty && (bDest->bbRefs == 1) && - (bNextJumpDest->bbRefs > 1) && (bDest->bbNum > block->bbNum) && - (block->isRunRarely() == bDest->isRunRarely()); - - bool optimizeJump = isJumpAroundEmpty || isJumpToJoinFree; - - // We do not optimize jumps between two different try regions. - // However jumping to a block that is not in any try region is OK - // - if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) { - optimizeJump = false; - } + // case (a) + // + const bool isJumpAroundEmpty = (bNext->bbNext == bDest); - // Also consider bNext's try region - // - if (bNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bNext)) - { - optimizeJump = false; - } + // case (b) + // + // Note the asymmetric checks for refs == 1 and refs > 1 ensures that we + // differentiate the roles played by bDest and bNextJumpDest. We need some + // sense of which arrangement is preferable to avoid getting stuck in a loop + // reversing and re-reversing. + // + // Other tiebreaking criteria could be considered. + // + // Pragmatic constraints: + // + // * don't consider lexical predecessors, or we may confuse loop recognition + // * don't consider blocks of different rarities + // + BasicBlock* const bNextJumpDest = bNext->bbJumpDest; + const bool isJumpToJoinFree = !isJumpAroundEmpty && (bDest->bbRefs == 1) && + (bNextJumpDest->bbRefs > 1) && (bDest->bbNum > block->bbNum) && + (block->isRunRarely() == bDest->isRunRarely()); - // If we are optimizing using real profile weights - // then don't optimize a conditional jump to an unconditional jump - // until after we have computed the edge weights - // - if (fgIsUsingProfileWeights()) - { - // if block and bdest are in different hot/cold regions we can't do this optimization - // because we can't allow fall-through into the cold region. - if (!fgEdgeWeightsComputed || fgInDifferentRegions(block, bDest)) + bool optimizeJump = isJumpAroundEmpty || isJumpToJoinFree; + + // We do not optimize jumps between two different try regions. + // However jumping to a block that is not in any try region is OK + // + if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) { optimizeJump = false; } - } - if (optimizeJump && isJumpToJoinFree) - { - // In the join free case, we also need to move bDest right after bNext - // to create same flow as in the isJumpAroundEmpty case. + // Also consider bNext's try region // - if (!fgEhAllowsMoveBlock(bNext, bDest) || bDest->isBBCallAlwaysPair()) + if (bNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bNext)) { optimizeJump = false; } - else - { - // We don't expect bDest to already be right after bNext. - // - assert(bDest != bNext->bbNext); - - JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum, - bNext->bbNum); - // If bDest can fall through we'll need to create a jump - // block after it too. Remember where to jump to. - // - BasicBlock* const bDestNext = bDest->bbNext; + // If we are optimizing using real profile weights + // then don't optimize a conditional jump to an unconditional jump + // until after we have computed the edge weights + // + if (fgIsUsingProfileWeights()) + { + // if block and bdest are in different hot/cold regions we can't do this optimization + // because we can't allow fall-through into the cold region. + if (!fgEdgeWeightsComputed || fgInDifferentRegions(block, bDest)) + { + optimizeJump = false; + } + } - // Move bDest + if (optimizeJump && isJumpToJoinFree) + { + // In the join free case, we also need to move bDest right after bNext + // to create same flow as in the isJumpAroundEmpty case. // - if (ehIsBlockEHLast(bDest)) + if (!fgEhAllowsMoveBlock(bNext, bDest) || bDest->isBBCallAlwaysPair()) { - ehUpdateLastBlocks(bDest, bDest->bbPrev); + optimizeJump = false; } + else + { + // We don't expect bDest to already be right after bNext. + // + assert(bDest != bNext->bbNext); - fgUnlinkBlock(bDest); - fgInsertBBafter(bNext, bDest); + JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum, + bNext->bbNum); - if (ehIsBlockEHLast(bNext)) - { - ehUpdateLastBlocks(bNext, bDest); - } + // If bDest can fall through we'll need to create a jump + // block after it too. Remember where to jump to. + // + BasicBlock* const bDestNext = bDest->bbNext; - // Add fall through fixup block, if needed. - // - if (bDest->KindIs(BBJ_NONE, BBJ_COND)) - { - BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); - bFixup->inheritWeight(bDestNext); - bFixup->bbJumpDest = bDestNext; + // Move bDest + // + if (ehIsBlockEHLast(bDest)) + { + ehUpdateLastBlocks(bDest, bDest->bbPrev); + } - fgRemoveRefPred(bDestNext, bDest); - fgAddRefPred(bFixup, bDest); - fgAddRefPred(bDestNext, bFixup); - } - } - } + fgUnlinkBlock(bDest); + fgInsertBBafter(bNext, bDest); - if (optimizeJump) - { - JITDUMP("\nReversing a conditional jump around an unconditional jump (" FMT_BB " -> " FMT_BB - ", " FMT_BB " -> " FMT_BB ")\n", - block->bbNum, bDest->bbNum, bNext->bbNum, bNextJumpDest->bbNum); + if (ehIsBlockEHLast(bNext)) + { + ehUpdateLastBlocks(bNext, bDest); + } - // Reverse the jump condition - // - GenTree* test = block->lastNode(); - noway_assert(test->OperIsConditionalJump()); + // Add fall through fixup block, if needed. + // + if (bDest->KindIs(BBJ_NONE, BBJ_COND)) + { + BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); + bFixup->inheritWeight(bDestNext); + bFixup->bbJumpDest = bDestNext; - if (test->OperGet() == GT_JTRUE) - { - GenTree* cond = gtReverseCond(test->AsOp()->gtOp1); - assert(cond == test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. - test->AsOp()->gtOp1 = cond; + fgRemoveRefPred(bDestNext, bDest); + fgAddRefPred(bFixup, bDest); + fgAddRefPred(bDestNext, bFixup); + } + } } - else + + if (optimizeJump) { - gtReverseCond(test); - } + JITDUMP("\nReversing a conditional jump around an unconditional jump (" FMT_BB " -> " FMT_BB + ", " FMT_BB " -> " FMT_BB ")\n", + block->bbNum, bDest->bbNum, bNext->bbNum, bNextJumpDest->bbNum); - // Optimize the Conditional JUMP to go to the new target - block->bbJumpDest = bNext->bbJumpDest; + // Reverse the jump condition + // + GenTree* test = block->lastNode(); + noway_assert(test->OperIsConditionalJump()); - fgAddRefPred(bNext->bbJumpDest, block, fgRemoveRefPred(bNext->bbJumpDest, bNext)); + if (test->OperGet() == GT_JTRUE) + { + GenTree* cond = gtReverseCond(test->AsOp()->gtOp1); + assert(cond == + test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. + test->AsOp()->gtOp1 = cond; + } + else + { + gtReverseCond(test); + } - /* - Unlink bNext from the BasicBlock list; note that we can - do this even though other blocks could jump to it - the - reason is that elsewhere in this function we always - redirect jumps to jumps to jump to the final label, - so even if another block jumps to bNext it won't matter - once we're done since any such jump will be redirected - to the final target by the time we're done here. - */ + // Optimize the Conditional JUMP to go to the new target + block->bbJumpDest = bNext->bbJumpDest; - fgRemoveRefPred(bNext, block); - fgUnlinkBlock(bNext); + fgAddRefPred(bNext->bbJumpDest, block, fgRemoveRefPred(bNext->bbJumpDest, bNext)); - /* Mark the block as removed */ - bNext->bbFlags |= BBF_REMOVED; + /* + Unlink bNext from the BasicBlock list; note that we can + do this even though other blocks could jump to it - the + reason is that elsewhere in this function we always + redirect jumps to jumps to jump to the final label, + so even if another block jumps to bNext it won't matter + once we're done since any such jump will be redirected + to the final target by the time we're done here. + */ - // Update the loop table if we removed the bottom of a loop, for example. - fgUpdateLoopsAfterCompacting(block, bNext); + fgRemoveRefPred(bNext, block); + fgUnlinkBlock(bNext); - // If this block was aligned, unmark it - bNext->unmarkLoopAlign(this DEBUG_ARG("Optimized jump")); + /* Mark the block as removed */ + bNext->bbFlags |= BBF_REMOVED; - // If this is the first Cold basic block update fgFirstColdBlock - if (bNext == fgFirstColdBlock) - { - fgFirstColdBlock = bNext->bbNext; - } + // Update the loop table if we removed the bottom of a loop, for example. + fgUpdateLoopsAfterCompacting(block, bNext); - // - // If we removed the end of a try region or handler region - // we will need to update ebdTryLast or ebdHndLast. - // + // If this block was aligned, unmark it + bNext->unmarkLoopAlign(this DEBUG_ARG("Optimized jump")); - for (EHblkDsc* const HBtab : EHClauses(this)) - { - if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext)) + // If this is the first Cold basic block update fgFirstColdBlock + if (bNext == fgFirstColdBlock) { - fgSkipRmvdBlocks(HBtab); + fgFirstColdBlock = bNext->bbNext; } - } - // we optimized this JUMP - goto REPEAT to catch similar cases - change = true; - modified = true; + // + // If we removed the end of a try region or handler region + // we will need to update ebdTryLast or ebdHndLast. + // + + for (EHblkDsc* const HBtab : EHClauses(this)) + { + if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext)) + { + fgSkipRmvdBlocks(HBtab); + } + } + + // we optimized this JUMP - goto REPEAT to catch similar cases + change = true; + modified = true; #ifdef DEBUG - if (verbose) - { - printf("\nAfter reversing the jump:\n"); - fgDispBasicBlocks(verboseTrees); - } + if (verbose) + { + printf("\nAfter reversing the jump:\n"); + fgDispBasicBlocks(verboseTrees); + } #endif // DEBUG - /* - For a rare special case we cannot jump to REPEAT - as jumping to REPEAT will cause us to delete 'block' - because it currently appears to be unreachable. As - it is a self loop that only has a single bbRef (itself) - However since the unlinked bNext has additional bbRefs - (that we will later connect to 'block'), it is not really - unreachable. - */ - if ((bNext->bbRefs > 0) && (bNext->bbJumpDest == block) && (block->bbRefs == 1)) - { - continue; - } + /* + For a rare special case we cannot jump to REPEAT + as jumping to REPEAT will cause us to delete 'block' + because it currently appears to be unreachable. As + it is a self loop that only has a single bbRef (itself) + However since the unlinked bNext has additional bbRefs + (that we will later connect to 'block'), it is not really + unreachable. + */ + if ((bNext->bbRefs > 0) && (bNext->bbJumpDest == block) && (block->bbRefs == 1)) + { + continue; + } - goto REPEAT; + goto REPEAT; + } } - } } // // Update the switch jump table such that it follows jumps to jumps: // - if (block->getBBJumpKind() == BBJ_SWITCH) + if (block->KindIs(BBJ_SWITCH)) { if (fgOptimizeSwitchBranches(block)) { @@ -6419,11 +6419,11 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't remove the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. - if (block->countOfInEdges() == 0 && bPrev->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->countOfInEdges() == 0 && bPrev->KindIs(BBJ_CALLFINALLY)) { assert(bPrev->isBBCallAlwaysPair()); noway_assert(!(bPrev->bbFlags & BBF_RETLESS_CALL)); - noway_assert(block->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(block->KindIs(BBJ_ALWAYS)); bPrev = block; continue; } @@ -6900,7 +6900,7 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) } bool const isNoSplit = stmt == predBlock->firstStmt(); - bool const isFallThrough = (predBlock->getBBJumpKind() == BBJ_NONE); + bool const isFallThrough = (predBlock->KindIs(BBJ_NONE)); // Is this block possibly better than what we have? // @@ -7068,7 +7068,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) // ternaries in C#). // The logic below could be generalized to BBJ_SWITCH, but this currently // has almost no CQ benefit but does have a TP impact. - if ((block->getBBJumpKind() != BBJ_COND) || (block->bbNext == block->bbJumpDest)) + if (!block->KindIs(BBJ_COND) || (block->bbNext == block->bbJumpDest)) { return false; } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 317dd4a25bca2f..26c9afc1fbcf02 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -473,7 +473,7 @@ void BlockCountInstrumentor::RelocateProbes() } JITDUMP("Return " FMT_BB " is successor of possible tail call\n", block->bbNum); - assert(block->getBBJumpKind() == BBJ_RETURN); + assert(block->KindIs(BBJ_RETURN)); // Scan for critical preds, and add relocated probes to non-critical preds. // @@ -499,12 +499,12 @@ void BlockCountInstrumentor::RelocateProbes() { // Ensure this pred is not a fall through. // - if (pred->getBBJumpKind() == BBJ_NONE) + if (pred->KindIs(BBJ_NONE)) { pred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } - assert(pred->getBBJumpKind() == BBJ_ALWAYS); + assert(pred->KindIs(BBJ_ALWAYS)); } } @@ -1028,7 +1028,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) JITDUMP("No jump dest for " FMT_BB ", suspect bad code\n", block->bbNum); visitor->Badcode(); } - else if (block->getBBJumpKind() != BBJ_LEAVE) + else if (!block->KindIs(BBJ_LEAVE)) { JITDUMP("EH RET in " FMT_BB " most-nested in try, suspect bad code\n", block->bbNum); visitor->Badcode(); @@ -1552,7 +1552,7 @@ void EfficientEdgeCountInstrumentor::SplitCriticalEdges() // Importer folding may have changed the block jump kind // to BBJ_NONE. If so, warp it back to BBJ_ALWAYS. // - if (block->getBBJumpKind() == BBJ_NONE) + if (block->KindIs(BBJ_NONE)) { block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); block->bbJumpDest = target; @@ -1657,7 +1657,7 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() } JITDUMP("Return " FMT_BB " is successor of possible tail call\n", block->bbNum); - assert(block->getBBJumpKind() == BBJ_RETURN); + assert(block->KindIs(BBJ_RETURN)); // This block should have just one probe, which we no longer need. // @@ -1695,12 +1695,12 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() // Ensure this pred is not a fall through. // - if (pred->getBBJumpKind() == BBJ_NONE) + if (pred->KindIs(BBJ_NONE)) { pred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } - assert(pred->getBBJumpKind() == BBJ_ALWAYS); + assert(pred->KindIs(BBJ_ALWAYS)); } } @@ -3166,7 +3166,7 @@ void EfficientEdgeCountReconstructor::Prepare() m_unknownBlocks++; #ifdef DEBUG - if (block->getBBJumpKind() == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { nReturns++; } @@ -3233,7 +3233,7 @@ void EfficientEdgeCountReconstructor::Prepare() CLRRandom* const random = m_comp->impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomEdgeCounts()); - const bool isReturn = sourceBlock->getBBJumpKind() == BBJ_RETURN; + const bool isReturn = sourceBlock->KindIs(BBJ_RETURN); // We simulate the distribution of counts seen in StdOptimizationData.Mibc. // @@ -3949,7 +3949,7 @@ void EfficientEdgeCountReconstructor::MarkInterestingBlocks(BasicBlock* block, B // void EfficientEdgeCountReconstructor::MarkInterestingSwitches(BasicBlock* block, BlockInfo* info) { - assert(block->getBBJumpKind() == BBJ_SWITCH); + assert(block->KindIs(BBJ_SWITCH)); // Thresholds for detecting a dominant switch case. // @@ -4429,11 +4429,11 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) bSrc = bDst->bbPreds->getSourceBlock(); // Does this block flow into only one other block - if (bSrc->getBBJumpKind() == BBJ_NONE) + if (bSrc->KindIs(BBJ_NONE)) { bOnlyNext = bSrc->bbNext; } - else if (bSrc->getBBJumpKind() == BBJ_ALWAYS) + else if (bSrc->KindIs(BBJ_ALWAYS)) { bOnlyNext = bSrc->bbJumpDest; } @@ -4450,11 +4450,11 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) } // Does this block flow into only one other block - if (bDst->getBBJumpKind() == BBJ_NONE) + if (bDst->KindIs(BBJ_NONE)) { bOnlyNext = bDst->bbNext; } - else if (bDst->getBBJumpKind() == BBJ_ALWAYS) + else if (bDst->KindIs(BBJ_ALWAYS)) { bOnlyNext = bDst->bbJumpDest; } @@ -4485,7 +4485,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) // To minimize asmdiffs for now, modify weights only if splitting. if (fgFirstColdBlock != nullptr) { - if (bSrc->getBBJumpKind() == BBJ_CALLFINALLY) + if (bSrc->KindIs(BBJ_CALLFINALLY)) { newWeight = bSrc->bbWeight; } @@ -4756,7 +4756,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() bSrc = edge->getSourceBlock(); slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; - if (bSrc->getBBJumpKind() == BBJ_COND) + if (bSrc->KindIs(BBJ_COND)) { weight_t diff; FlowEdge* otherEdge; diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index 722f5f8cadfdd1..f256ca73846c84 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -332,8 +332,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // THROW heuristic // - bool const isJumpThrow = (jump->getBBJumpKind() == BBJ_THROW); - bool const isNextThrow = (next->getBBJumpKind() == BBJ_THROW); + bool const isJumpThrow = (jump->KindIs(BBJ_THROW)); + bool const isNextThrow = (next->KindIs(BBJ_THROW)); if (isJumpThrow != isNextThrow) { @@ -402,8 +402,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // RETURN heuristic // - bool const isJumpReturn = (jump->getBBJumpKind() == BBJ_RETURN); - bool const isNextReturn = (next->getBBJumpKind() == BBJ_RETURN); + bool const isJumpReturn = (jump->KindIs(BBJ_RETURN)); + bool const isNextReturn = (next->KindIs(BBJ_RETURN)); if (isJumpReturn != isNextReturn) { @@ -551,7 +551,7 @@ void ProfileSynthesis::RepairLikelihoods() } JITDUMP("\n"); - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { AssignLikelihoodCond(block); } @@ -627,7 +627,7 @@ void ProfileSynthesis::BlendLikelihoods() bool const consistent = Compiler::fgProfileWeightsEqual(sum, 1.0, epsilon); bool const zero = Compiler::fgProfileWeightsEqual(block->bbWeight, 0.0, epsilon); - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { AssignLikelihoodCond(block); } @@ -1214,8 +1214,7 @@ void ProfileSynthesis::ComputeCyclicProbabilities(SimpleLoop* loop) // // Currently we don't know which edges do this. // - if ((exitBlock->getBBJumpKind() == BBJ_COND) && - (exitBlockWeight > (missingExitWeight + currentExitWeight))) + if ((exitBlock->KindIs(BBJ_COND)) && (exitBlockWeight > (missingExitWeight + currentExitWeight))) { JITDUMP("Will adjust likelihood of the exit edge from loop exit block " FMT_BB " to reflect capping; current likelihood is " FMT_WT "\n", diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 2d5c2b3fd68a36..2ef7dbc9d38a3b 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -254,7 +254,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) BasicBlock* topFallThrough = nullptr; unsigned char lpIndexFallThrough = BasicBlock::NOT_IN_LOOP; - if (top->getBBJumpKind() == BBJ_COND) + if (top->KindIs(BBJ_COND)) { topFallThrough = top->bbNext; lpIndexFallThrough = topFallThrough->bbNatLoopNum; @@ -1728,7 +1728,7 @@ void Compiler::fgAddSyncMethodEnterExit() // non-exceptional cases for (BasicBlock* const block : Blocks()) { - if (block->getBBJumpKind() == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { fgCreateMonitorTree(lvaMonAcquired, info.compThisArg, block, false /*exit*/); } @@ -1772,7 +1772,7 @@ GenTree* Compiler::fgCreateMonitorTree(unsigned lvaMonAcquired, unsigned lvaThis } #endif - if (block->getBBJumpKind() == BBJ_RETURN && block->lastStmt()->GetRootNode()->gtOper == GT_RETURN) + if (block->KindIs(BBJ_RETURN) && block->lastStmt()->GetRootNode()->gtOper == GT_RETURN) { GenTreeUnOp* retNode = block->lastStmt()->GetRootNode()->AsUnOp(); GenTree* retExpr = retNode->gtOp1; @@ -1821,7 +1821,7 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) assert(genReturnBB != nullptr); assert(genReturnBB != block); assert(fgReturnCount <= 1); // We have a single return for synchronized methods - assert(block->getBBJumpKind() == BBJ_RETURN); + assert(block->KindIs(BBJ_RETURN)); assert((block->bbFlags & BBF_HAS_JMP) == 0); assert(block->hasTryIndex()); assert(!block->hasHndIndex()); @@ -1949,7 +1949,7 @@ bool Compiler::fgMoreThanOneReturnBlock() for (BasicBlock* const block : Blocks()) { - if (block->getBBJumpKind() == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { retCnt++; if (retCnt > 1) @@ -2596,7 +2596,7 @@ PhaseStatus Compiler::fgAddInternal() for (BasicBlock* block = fgFirstBB; block != lastBlockBeforeGenReturns->bbNext; block = block->bbNext) { - if ((block->getBBJumpKind() == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if ((block->KindIs(BBJ_RETURN)) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { merger.Record(block); } @@ -3451,7 +3451,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // so the code size for block needs be large // enough to make it worth our while // - if ((lblk == nullptr) || (lblk->getBBJumpKind() != BBJ_COND) || (fgGetCodeEstimate(block) >= 8)) + if ((lblk == nullptr) || !lblk->KindIs(BBJ_COND) || (fgGetCodeEstimate(block) >= 8)) { // This block is now a candidate for first cold block // Also remember the predecessor to this block @@ -3523,7 +3523,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // This is a slightly more complicated case, because we will // probably need to insert a block to jump to the cold section. // - if (firstColdBlock->isEmpty() && (firstColdBlock->getBBJumpKind() == BBJ_ALWAYS)) + if (firstColdBlock->isEmpty() && (firstColdBlock->KindIs(BBJ_ALWAYS))) { // We can just use this block as the transitionBlock firstColdBlock = firstColdBlock->bbNext; diff --git a/src/coreclr/jit/gschecks.cpp b/src/coreclr/jit/gschecks.cpp index 404d86e3abc0ec..0953920d6192e3 100644 --- a/src/coreclr/jit/gschecks.cpp +++ b/src/coreclr/jit/gschecks.cpp @@ -529,7 +529,7 @@ void Compiler::gsParamsToShadows() // We would have to insert assignments in all such blocks, just before GT_JMP stmnt. for (BasicBlock* const block : Blocks()) { - if (block->getBBJumpKind() != BBJ_RETURN) + if (!block->KindIs(BBJ_RETURN)) { continue; } diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index da0683be95ab4b..7b50f5428458b0 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -83,7 +83,7 @@ class OptIfConversionDsc bool OptIfConversionDsc::IfConvertCheckInnerBlockFlow(BasicBlock* block) { // Block should have a single successor or be a return. - if (!(block->GetUniqueSucc() != nullptr || (m_doElseConversion && (block->getBBJumpKind() == BBJ_RETURN)))) + if (!(block->GetUniqueSucc() != nullptr || (m_doElseConversion && (block->KindIs(BBJ_RETURN))))) { return false; } @@ -137,7 +137,7 @@ bool OptIfConversionDsc::IfConvertCheckThenFlow() { // All the Then blocks up to m_finalBlock are in a valid flow. m_flowFound = true; - if (thenBlock->getBBJumpKind() == BBJ_RETURN) + if (thenBlock->KindIs(BBJ_RETURN)) { assert(m_finalBlock == nullptr); m_mainOper = GT_RETURN; @@ -553,7 +553,7 @@ void OptIfConversionDsc::IfConvertDump() bool OptIfConversionDsc::optIfConvert() { // Does the block end by branching via a JTRUE after a compare? - if (m_startBlock->getBBJumpKind() != BBJ_COND || m_startBlock->NumSucc() != 2) + if (!m_startBlock->KindIs(BBJ_COND) || m_startBlock->NumSucc() != 2) { return false; } diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 704536165d5845..f0e46f2100a254 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -4101,7 +4101,7 @@ bool Compiler::impIsImplicitTailCallCandidate( // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. - if (!isRecursive && (compCurBB->getBBJumpKind() != BBJ_RETURN)) + if (!isRecursive && !compCurBB->KindIs(BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN @@ -4250,7 +4250,7 @@ void Compiler::impImportLeave(BasicBlock* block) impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; - assert(block->getBBJumpKind() == BBJ_LEAVE); + assert(block->KindIs(BBJ_LEAVE)); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); @@ -4344,7 +4344,7 @@ void Compiler::impImportLeave(BasicBlock* block) /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); - assert(step->getBBJumpKind() == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); if (step->bbJumpDest != nullptr) { fgRemoveRefPred(step->bbJumpDest, step); @@ -4523,7 +4523,7 @@ void Compiler::impImportLeave(BasicBlock* block) impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; - assert(block->getBBJumpKind() == BBJ_LEAVE); + assert(block->KindIs(BBJ_LEAVE)); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; @@ -4606,7 +4606,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->getBBJumpKind() == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4708,7 +4708,7 @@ void Compiler::impImportLeave(BasicBlock* block) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS - if (step->getBBJumpKind() == BBJ_EHCATCHRET) + if (step->KindIs(BBJ_EHCATCHRET)) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. @@ -4758,7 +4758,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->getBBJumpKind() == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4850,12 +4850,12 @@ void Compiler::impImportLeave(BasicBlock* block) if (stepType == ST_FinallyReturn) { - assert(step->getBBJumpKind() == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); } else { assert(stepType == ST_Catch); - assert(step->getBBJumpKind() == BBJ_EHCATCHRET); + assert(step->KindIs(BBJ_EHCATCHRET)); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ @@ -4931,7 +4931,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->getBBJumpKind() == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4992,7 +4992,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // work around this we will duplicate B0 (call it B0Dup) before resetting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { BasicBlock* dupBlock = bbNewBasicBlock(block->getBBJumpKind()); dupBlock->bbFlags = block->bbFlags; @@ -6715,7 +6715,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); - assert(compCurBB->getBBJumpKind() == BBJ_EHFILTERRET); + assert(compCurBB->KindIs(BBJ_EHFILTERRET)); /* Mark catch handler as successor */ @@ -7256,7 +7256,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } JITDUMP(" %04X", jmpAddr); - if (block->getBBJumpKind() != BBJ_LEAVE) + if (!block->KindIs(BBJ_LEAVE)) { impResetLeaveBlock(block, jmpAddr); } @@ -7302,7 +7302,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // We may have already modified `block`'s jump kind, if this is a re-importation. // - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); @@ -7311,7 +7311,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } else { - assert(block->getBBJumpKind() == BBJ_NONE); + assert(block->KindIs(BBJ_NONE)); } if (op1->gtFlags & GTF_GLOB_EFFECT) @@ -7363,12 +7363,11 @@ void Compiler::impImportBlockCode(BasicBlock* block) assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); - assertImp((block->getBBJumpKind() == BBJ_COND) // normal case - || - (block->getBBJumpKind() == foldedJumpKind)); // this can happen if we are reimporting the - // block for the second time + // BBJ_COND: normal case + // foldedJumpKind: this can happen if we are reimporting the block for the second time + assertImp(block->KindIs(BBJ_COND, foldedJumpKind)); // normal case - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { if (foldedJumpKind == BBJ_NONE) { @@ -7549,7 +7548,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // We may have already modified `block`'s jump kind, if this is a re-importation. // - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); @@ -7558,7 +7557,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } else { - assert(block->getBBJumpKind() == BBJ_NONE); + assert(block->KindIs(BBJ_NONE)); } if (op1->gtFlags & GTF_GLOB_EFFECT) @@ -7658,8 +7657,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) { printf("\nSwitch folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, - block->getBBJumpKind() == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->getBBJumpKind() == BBJ_ALWAYS) + block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->KindIs(BBJ_ALWAYS)) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -8532,10 +8531,9 @@ void Compiler::impImportBlockCode(BasicBlock* block) lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */); } - bool bbInALoop = impBlockIsInALoop(block); - bool bbIsReturn = - (block->getBBJumpKind() == BBJ_RETURN) && - (!compIsForInlining() || (impInlineInfo->iciBlock->getBBJumpKind() == BBJ_RETURN)); + bool bbInALoop = impBlockIsInALoop(block); + bool bbIsReturn = (block->KindIs(BBJ_RETURN)) && + (!compIsForInlining() || (impInlineInfo->iciBlock->KindIs(BBJ_RETURN))); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { @@ -12119,11 +12117,11 @@ void Compiler::impImport() JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; - if (entryBlock->getBBJumpKind() == BBJ_NONE) + if (entryBlock->KindIs(BBJ_NONE)) { entryBlock = entryBlock->bbNext; } - else if (opts.IsOSR() && (entryBlock->getBBJumpKind() == BBJ_ALWAYS)) + else if (opts.IsOSR() && (entryBlock->KindIs(BBJ_ALWAYS))) { entryBlock = entryBlock->bbJumpDest; } @@ -12241,7 +12239,7 @@ void Compiler::impFixPredLists() continue; } - if (finallyBlock->getBBJumpKind() != BBJ_EHFINALLYRET) + if (!finallyBlock->KindIs(BBJ_EHFINALLYRET)) { continue; } diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index fbe0978f2514b9..deff9be27e4c04 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -1095,7 +1095,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); - assert(!isExplicitTailCall || compCurBB->getBBJumpKind() == BBJ_RETURN); + assert(!isExplicitTailCall || compCurBB->KindIs(BBJ_RETURN)); // Ask VM for permission to tailcall if (canTailCall) @@ -1271,10 +1271,10 @@ var_types Compiler::impImportCall(OPCODE opcode, // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // - if (compCurBB->getBBJumpKind() != BBJ_RETURN) + if (!compCurBB->KindIs(BBJ_RETURN)) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); - assert(successor->getBBJumpKind() == BBJ_RETURN); + assert(successor->KindIs(BBJ_RETURN)); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index 15cee342aa603b..ab67048abbbc67 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -1073,7 +1073,7 @@ class IndirectCallTransformer // BasicBlock* const coldBlock = checkBlock->bbPrev; - if (coldBlock->getBBJumpKind() != BBJ_NONE) + if (!coldBlock->KindIs(BBJ_NONE)) { JITDUMP("Unexpected flow from cold path " FMT_BB "\n", coldBlock->bbNum); return; @@ -1081,7 +1081,7 @@ class IndirectCallTransformer BasicBlock* const hotBlock = coldBlock->bbPrev; - if ((hotBlock->getBBJumpKind() != BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock)) + if (!hotBlock->KindIs(BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock)) { JITDUMP("Unexpected flow from hot path " FMT_BB "\n", hotBlock->bbNum); return; diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 888058d133b62d..8606b18743e72d 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -960,7 +960,7 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** be bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex) { - assert(blockCallFinally->getBBJumpKind() == BBJ_CALLFINALLY); + assert(blockCallFinally->KindIs(BBJ_CALLFINALLY)); assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(finallyIndex < compHndBBtabCount); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); @@ -2276,7 +2276,7 @@ bool Compiler::fgNormalizeEHCase2() // Change pred branches. // - if (predBlock->getBBJumpKind() != BBJ_NONE) + if (!predBlock->KindIs(BBJ_NONE)) { fgReplaceJumpTarget(predBlock, newTryStart, insertBeforeBlk); } @@ -4056,12 +4056,12 @@ void Compiler::fgClearFinallyTargetBit(BasicBlock* block) for (BasicBlock* const predBlock : block->PredBlocks()) { - if (predBlock->getBBJumpKind() == BBJ_ALWAYS && predBlock->bbJumpDest == block) + if (predBlock->KindIs(BBJ_ALWAYS) && predBlock->bbJumpDest == block) { BasicBlock* pPrev = predBlock->bbPrev; if (pPrev != nullptr) { - if (pPrev->getBBJumpKind() == BBJ_CALLFINALLY) + if (pPrev->KindIs(BBJ_CALLFINALLY)) { // We found a BBJ_CALLFINALLY / BBJ_ALWAYS that still points to this finally target return; @@ -4113,7 +4113,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) ((xtab->ebdHndBeg->bbNext == block) && (xtab->ebdHndBeg->bbFlags & BBF_INTERNAL))); // After we've already inserted a header block, and we're // trying to decide how to split up the predecessor edges. - if (predBlock->getBBJumpKind() == BBJ_CALLFINALLY) + if (predBlock->KindIs(BBJ_CALLFINALLY)) { assert(predBlock->bbJumpDest == block); @@ -4184,7 +4184,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) // The block is a handler. Check if the pred block is from its filter. We only need to // check the end filter flag, as there is only a single filter for any handler, and we // already know predBlock is a predecessor of block. - if (predBlock->getBBJumpKind() == BBJ_EHFILTERRET) + if (predBlock->KindIs(BBJ_EHFILTERRET)) { assert(!xtab->InHndRegionBBRange(predBlock)); return false; @@ -4413,7 +4413,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) { BasicBlock* bFilterLast = HBtab->BBFilterLast(); assert(bFilterLast != nullptr); - assert(bFilterLast->getBBJumpKind() == BBJ_EHFILTERRET); + assert(bFilterLast->KindIs(BBJ_EHFILTERRET)); assert(bFilterLast->bbJumpDest == block); #ifdef DEBUG if (verbose) diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 820545508968ed..8af56fa167317d 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -4098,7 +4098,7 @@ void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, if (!varDsc->lvDisqualifySingleDefRegCandidate) // If this var is already disqualified, we can skip this { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; + bool bbIsReturn = block->KindIs(BBJ_RETURN); // TODO: Zero-inits in LSRA are created with below condition. But if filter out based on that condition // we filter a lot of interesting variables that would benefit otherwise with EH var enregistration. // bool needsExplicitZeroInit = !varDsc->lvIsParam && (info.compInitMem || diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 9c9aafe0686b65..d66ddc05a5cdcb 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -491,7 +491,7 @@ void Compiler::fgPerBlockLocalVarLiveness() // Mark the FrameListRoot as used, if applicable. - if (block->getBBJumpKind() == BBJ_RETURN && compMethodRequiresPInvokeFrame()) + if (block->KindIs(BBJ_RETURN) && compMethodRequiresPInvokeFrame()) { assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) @@ -2451,7 +2451,7 @@ void Compiler::fgInterBlockLocalVarLiveness() { // Get the set of live variables on exit from an exception region. VarSetOps::UnionD(this, exceptVars, block->bbLiveOut); - if (block->getBBJumpKind() == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { // Live on exit from finally. // We track these separately because, in addition to having EH live-out semantics, diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index c6e6dc91c3d88e..e9f4df76924747 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -1766,7 +1766,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd) unsigned loopRetCount = 0; for (BasicBlock* const blk : loop.LoopBlocks()) { - if (blk->getBBJumpKind() == BBJ_RETURN) + if (blk->KindIs(BBJ_RETURN)) { loopRetCount++; } @@ -1855,7 +1855,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd) BasicBlock* top = loop.lpTop; BasicBlock* bottom = loop.lpBottom; - if (bottom->getBBJumpKind() != BBJ_COND) + if (!bottom->KindIs(BBJ_COND)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Couldn't find termination test.\n", loopInd); return false; @@ -1945,7 +1945,7 @@ BasicBlock* Compiler::optInsertLoopChoiceConditions(LoopCloneContext* context, JITDUMP("Inserting loop " FMT_LP " loop choice conditions\n", loopNum); assert(context->HasBlockConditions(loopNum)); assert(slowHead != nullptr); - assert(insertAfter->getBBJumpKind() == BBJ_NONE); + assert(insertAfter->KindIs(BBJ_NONE)); if (context->HasBlockConditions(loopNum)) { @@ -2043,9 +2043,9 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) h2->bbNatLoopNum = ambientLoop; h2->bbFlags |= BBF_LOOP_PREHEADER; - if (h->getBBJumpKind() != BBJ_NONE) + if (!h->KindIs(BBJ_NONE)) { - assert(h->getBBJumpKind() == BBJ_ALWAYS); + assert(h->KindIs(BBJ_ALWAYS)); assert(h->bbJumpDest == loop.lpEntry); h2->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); h2->bbJumpDest = loop.lpEntry; @@ -2069,9 +2069,9 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // "newPred" will be the predecessor of the blocks of the cloned loop. BasicBlock* b = loop.lpBottom; BasicBlock* newPred = b; - if (b->getBBJumpKind() != BBJ_ALWAYS) + if (!b->KindIs(BBJ_ALWAYS)) { - assert(b->getBBJumpKind() == BBJ_COND); + assert(b->KindIs(BBJ_COND)); BasicBlock* x = b->bbNext; if (x != nullptr) @@ -2175,7 +2175,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) bool b = blockMap->Lookup(blk, &newblk); assert(b && newblk != nullptr); - assert(blk->getBBJumpKind() == newblk->getBBJumpKind()); + assert(blk->KindIs(newblk->getBBJumpKind())); // First copy the jump destination(s) from "blk". optCopyBlkDest(blk, newblk); @@ -2243,7 +2243,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // We should always have block conditions. assert(context->HasBlockConditions(loopInd)); - assert(h->getBBJumpKind() == BBJ_NONE); + assert(h->KindIs(BBJ_NONE)); assert(h->bbNext == h2); // If any condition is false, go to slowHead (which branches or falls through to e2). @@ -2254,7 +2254,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) if (slowHead->bbNext != e2) { // We can't just fall through to the slow path entry, so make it an unconditional branch. - assert(slowHead->getBBJumpKind() == BBJ_NONE); // This is how we created it above. + assert(slowHead->KindIs(BBJ_NONE)); // This is how we created it above. slowHead->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); slowHead->bbJumpDest = e2; } @@ -2266,7 +2266,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Add the fall-through path pred (either to T/E for fall-through from conditions to fast path, // or H2 if branch to E of fast path). - assert(condLast->getBBJumpKind() == BBJ_COND); + assert(condLast->KindIs(BBJ_COND)); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", condLast->bbNum, condLast->bbNext->bbNum); fgAddRefPred(condLast->bbNext, condLast); diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index b985a5a8b1229b..26f4ed946955d4 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -891,9 +891,9 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor. // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. - assert(originalSwitchBB->getBBJumpKind() == BBJ_NONE); + assert(originalSwitchBB->KindIs(BBJ_NONE)); assert(originalSwitchBB->bbNext == afterDefaultCondBlock); - assert(afterDefaultCondBlock->getBBJumpKind() == BBJ_SWITCH); + assert(afterDefaultCondBlock->KindIs(BBJ_SWITCH)); assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. @@ -1074,7 +1074,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // so fgRemoveBlock() doesn't complain. JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); - assert(currentBlock->getBBJumpKind() == BBJ_SWITCH); + assert(currentBlock->KindIs(BBJ_SWITCH)); currentBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); currentBlock->bbFlags &= ~BBF_DONT_REMOVE; comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. @@ -1159,7 +1159,7 @@ bool Lowering::TryLowerSwitchToBitTest( { assert(jumpCount >= 2); assert(targetCount >= 2); - assert(bbSwitch->getBBJumpKind() == BBJ_SWITCH); + assert(bbSwitch->KindIs(BBJ_SWITCH)); assert(switchValue->OperIs(GT_LCL_VAR)); // @@ -5296,7 +5296,7 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* JITDUMP("======= Inserting PInvoke method epilog\n"); // Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls. - assert(((returnBB == comp->genReturnBB) && (returnBB->getBBJumpKind() == BBJ_RETURN)) || + assert(((returnBB == comp->genReturnBB) && (returnBB->KindIs(BBJ_RETURN))) || returnBB->endsWithTailCallOrJmp(comp)); LIR::Range& returnBlockRange = LIR::AsRange(returnBB); diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 1b7aebaea1997b..88af18d880898e 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -964,7 +964,7 @@ void LinearScan::setBlockSequence() blockInfo[block->bbNum].hasCriticalInEdge = true; hasCriticalEdges = true; } - else if (predBlock->getBBJumpKind() == BBJ_SWITCH) + else if (predBlock->KindIs(BBJ_SWITCH)) { assert(!"Switch with single successor"); } @@ -993,7 +993,7 @@ void LinearScan::setBlockSequence() // according to the desired order. We will handle the EH successors below. const unsigned numSuccs = block->NumSucc(compiler); bool checkForCriticalOutEdge = (numSuccs > 1); - if (!checkForCriticalOutEdge && block->getBBJumpKind() == BBJ_SWITCH) + if (!checkForCriticalOutEdge && block->KindIs(BBJ_SWITCH)) { assert(!"Switch with single successor"); } @@ -1549,7 +1549,7 @@ void LinearScan::identifyCandidatesExceptionDataflow() if (block->hasEHBoundaryOut()) { VarSetOps::UnionD(compiler, exceptVars, block->bbLiveOut); - if (block->getBBJumpKind() == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { // Live on exit from finally. // We track these separately because, in addition to having EH live-out semantics, @@ -2513,7 +2513,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, // IG08: // ... // ... - if (block->getBBJumpKind() == BBJ_THROW) + if (block->KindIs(BBJ_THROW)) { JITDUMP(" - throw block; "); return nullptr; @@ -2544,7 +2544,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, assert(!predBlock->hasEHBoundaryOut()); if (isBlockVisited(predBlock)) { - if (predBlock->getBBJumpKind() == BBJ_COND) + if (predBlock->KindIs(BBJ_COND)) { // Special handling to improve matching on backedges. BasicBlock* otherBlock = (block == predBlock->bbNext) ? predBlock->bbJumpDest : predBlock->bbNext; @@ -8177,7 +8177,7 @@ void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) // Note: Only switches and JCMP/JTEST (for Arm4) have input regs (and so can be fed by copies), so those // are the only block-ending branches that need special handling. regMaskTP consumedRegs = RBM_NONE; - if (block->getBBJumpKind() == BBJ_SWITCH) + if (block->KindIs(BBJ_SWITCH)) { // At this point, Lowering has transformed any non-switch-table blocks into // cascading ifs. @@ -8216,7 +8216,7 @@ void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) // Note: GT_COPY has special handling in codegen and its generation is merged with the // node that consumes its result. So both, the input and output regs of GT_COPY must be // excluded from the set available for resolution. - else if (block->getBBJumpKind() == BBJ_COND) + else if (block->KindIs(BBJ_COND)) { GenTree* lastNode = LIR::AsRange(block).LastNode(); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 0342221537d57d..88175479c16b54 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -6126,7 +6126,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // No unique successor. compCurBB should be a return. // - assert(compCurBB->getBBJumpKind() == BBJ_RETURN); + assert(compCurBB->KindIs(BBJ_RETURN)); } else { @@ -6329,7 +6329,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) // Fast tail call: in case of fast tail calls, we need a jmp epilog and // hence mark it as BBJ_RETURN with BBF_JMP flag set. - noway_assert(compCurBB->getBBJumpKind() == BBJ_RETURN); + noway_assert(compCurBB->KindIs(BBJ_RETURN)); if (canFastTailCall) { compCurBB->bbFlags |= BBF_HAS_JMP; @@ -8032,7 +8032,7 @@ GenTree* Compiler::fgMorphConst(GenTree* tree) // of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf. bool useLazyStrCns = false; - if (compCurBB->getBBJumpKind() == BBJ_THROW) + if (compCurBB->KindIs(BBJ_THROW)) { useLazyStrCns = true; } @@ -13120,7 +13120,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) return result; } - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); @@ -13293,9 +13293,8 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); - printf(FMT_BB " becomes a %s", block->bbNum, - block->getBBJumpKind() == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->getBBJumpKind() == BBJ_ALWAYS) + printf(FMT_BB " becomes a %s", block->bbNum, block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->KindIs(BBJ_ALWAYS)) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -13356,7 +13355,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) } } } - else if (block->getBBJumpKind() == BBJ_SWITCH) + else if (block->KindIs(BBJ_SWITCH)) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); @@ -13452,9 +13451,8 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); - printf(FMT_BB " becomes a %s", block->bbNum, - block->getBBJumpKind() == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->getBBJumpKind() == BBJ_ALWAYS) + printf(FMT_BB " becomes a %s", block->bbNum, block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->KindIs(BBJ_ALWAYS)) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -13727,10 +13725,10 @@ void Compiler::fgMorphStmts(BasicBlock* block) // - a tail call dispatched via runtime help (IL stubs), in which // case there will not be any tailcall and the block will be ending // with BBJ_RETURN (as normal control flow) - noway_assert((call->IsFastTailCall() && (compCurBB->getBBJumpKind() == BBJ_RETURN) && + noway_assert((call->IsFastTailCall() && (compCurBB->KindIs(BBJ_RETURN)) && ((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) || - (call->IsTailCallViaJitHelper() && (compCurBB->getBBJumpKind() == BBJ_THROW)) || - (!call->IsTailCall() && (compCurBB->getBBJumpKind() == BBJ_RETURN))); + (call->IsTailCallViaJitHelper() && (compCurBB->KindIs(BBJ_THROW))) || + (!call->IsTailCall() && (compCurBB->KindIs(BBJ_RETURN)))); } #ifdef DEBUG @@ -13806,7 +13804,7 @@ void Compiler::fgMorphStmts(BasicBlock* block) if (fgRemoveRestOfBlock) { - if ((block->getBBJumpKind() == BBJ_COND) || (block->getBBJumpKind() == BBJ_SWITCH)) + if (block->KindIs(BBJ_COND, BBJ_SWITCH)) { Statement* first = block->firstStmt(); noway_assert(first); @@ -13814,8 +13812,8 @@ void Compiler::fgMorphStmts(BasicBlock* block) noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr); GenTree* last = lastStmt->GetRootNode(); - if (((block->getBBJumpKind() == BBJ_COND) && (last->gtOper == GT_JTRUE)) || - ((block->getBBJumpKind() == BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) + if ((block->KindIs(BBJ_COND) && (last->gtOper == GT_JTRUE)) || + (block->KindIs(BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) { GenTree* op1 = last->AsOp()->gtOp1; @@ -13923,7 +13921,7 @@ void Compiler::fgMorphBlocks() fgMorphStmts(block); // Do we need to merge the result of this block into a single return block? - if ((block->getBBJumpKind() == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if ((block->KindIs(BBJ_RETURN)) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { if ((genReturnBB != nullptr) && (genReturnBB != block)) { @@ -13979,7 +13977,7 @@ void Compiler::fgMorphBlocks() // void Compiler::fgMergeBlockReturn(BasicBlock* block) { - assert((block->getBBJumpKind() == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); + assert((block->KindIs(BBJ_RETURN)) && ((block->bbFlags & BBF_HAS_JMP) == 0)); assert((genReturnBB != nullptr) && (genReturnBB != block)); // TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN. diff --git a/src/coreclr/jit/objectalloc.cpp b/src/coreclr/jit/objectalloc.cpp index 473fe3c1c0cad4..3694e83c248f2c 100644 --- a/src/coreclr/jit/objectalloc.cpp +++ b/src/coreclr/jit/objectalloc.cpp @@ -510,7 +510,7 @@ unsigned int ObjectAllocator::MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* a // Initialize the object memory if necessary. bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; + bool bbIsReturn = block->KindIs(BBJ_RETURN); LclVarDsc* const lclDsc = comp->lvaGetDesc(lclNum); if (comp->fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 68191baedd2e53..09683dde47bc8b 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -881,14 +881,14 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() #ifdef DEBUG m_b1->bbJumpSwt = m_b2->bbJumpSwt; #endif - assert(m_b2->getBBJumpKind() == BBJ_RETURN); + assert(m_b2->KindIs(BBJ_RETURN)); assert(m_b1->bbNext == m_b2); assert(m_b3 != nullptr); } else { - assert(m_b1->getBBJumpKind() == BBJ_COND); - assert(m_b2->getBBJumpKind() == BBJ_COND); + assert(m_b1->KindIs(BBJ_COND)); + assert(m_b2->KindIs(BBJ_COND)); assert(m_b1->bbJumpDest == m_b2->bbJumpDest); assert(m_b1->bbNext == m_b2); assert(m_b2->bbNext != nullptr); @@ -1180,7 +1180,7 @@ void OptBoolsDsc::optOptimizeBoolsGcStress() return; } - assert(m_b1->getBBJumpKind() == BBJ_COND); + assert(m_b1->KindIs(BBJ_COND)); Statement* const stmt = m_b1->lastStmt(); GenTree* const cond = stmt->GetRootNode(); @@ -1469,7 +1469,7 @@ PhaseStatus Compiler::optOptimizeBools() // We're only interested in conditional jumps here - if (b1->getBBJumpKind() != BBJ_COND) + if (!b1->KindIs(BBJ_COND)) { continue; } @@ -1492,7 +1492,7 @@ PhaseStatus Compiler::optOptimizeBools() // The next block needs to be a condition or return block. - if (b2->getBBJumpKind() == BBJ_COND) + if (b2->KindIs(BBJ_COND)) { if ((b1->bbJumpDest != b2->bbJumpDest) && (b1->bbJumpDest != b2->bbNext)) { @@ -1517,7 +1517,7 @@ PhaseStatus Compiler::optOptimizeBools() } #endif } - else if (b2->getBBJumpKind() == BBJ_RETURN) + else if (b2->KindIs(BBJ_RETURN)) { // Set b3 to b1 jump destination BasicBlock* b3 = b1->bbJumpDest; @@ -1531,7 +1531,7 @@ PhaseStatus Compiler::optOptimizeBools() // b3 must be RETURN type - if (b3->getBBJumpKind() != BBJ_RETURN) + if (!b3->KindIs(BBJ_RETURN)) { continue; } diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 59d50c68501975..f073ce0f11fc63 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -741,7 +741,7 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT bool initBlockOk = (predBlock == initBlock); if (!initBlockOk) { - if ((predBlock->getBBJumpKind() == BBJ_NONE) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && + if ((predBlock->KindIs(BBJ_NONE)) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && (predBlock->countOfInEdges() == 1) && (predBlock->firstStmt() == nullptr) && (predBlock->bbPrev != nullptr) && predBlock->bbPrev->bbFallsThrough()) { @@ -1150,8 +1150,8 @@ bool Compiler::optExtractInitTestIncr( // If we are rebuilding the loop table, we would already have the pre-header block introduced // the first time, which might be empty if no hoisting has yet occurred. In this case, look a // little harder for the possible loop initialization statement. - if ((initBlock->getBBJumpKind() == BBJ_NONE) && (initBlock->bbNext == top) && - (initBlock->countOfInEdges() == 1) && (initBlock->bbPrev != nullptr) && initBlock->bbPrev->bbFallsThrough()) + if ((initBlock->KindIs(BBJ_NONE)) && (initBlock->bbNext == top) && (initBlock->countOfInEdges() == 1) && + (initBlock->bbPrev != nullptr) && initBlock->bbPrev->bbFallsThrough()) { initBlock = initBlock->bbPrev; phdrStmt = initBlock->firstStmt(); @@ -1305,7 +1305,7 @@ bool Compiler::optRecordLoop( // 5. Finding a constant initializer is optional; if the initializer is not found, or is not constant, // it is still considered a for-like loop. // - if (bottom->getBBJumpKind() == BBJ_COND) + if (bottom->KindIs(BBJ_COND)) { GenTree* init; GenTree* test; @@ -1801,7 +1801,7 @@ class LoopSearch // BasicBlock* FindEntry(BasicBlock* head, BasicBlock* top, BasicBlock* bottom) { - if (head->getBBJumpKind() == BBJ_ALWAYS) + if (head->KindIs(BBJ_ALWAYS)) { if (head->bbJumpDest->bbNum <= bottom->bbNum && head->bbJumpDest->bbNum >= top->bbNum) { @@ -2294,7 +2294,7 @@ class LoopSearch { // Need to reconnect the flow from `block` to `oldNext`. - if ((block->getBBJumpKind() == BBJ_COND) && (block->bbJumpDest == newNext)) + if ((block->KindIs(BBJ_COND)) && (block->bbJumpDest == newNext)) { // Reverse the jump condition GenTree* test = block->lastNode(); @@ -2321,7 +2321,7 @@ class LoopSearch noway_assert((newBlock == nullptr) || loopBlocks.CanRepresent(newBlock->bbNum)); } } - else if ((block->getBBJumpKind() == BBJ_ALWAYS) && (block->bbJumpDest == newNext)) + else if (block->KindIs(BBJ_ALWAYS) && (block->bbJumpDest == newNext)) { // We've made `block`'s jump target its bbNext, so remove the jump. if (!comp->fgOptimizeBranchToNext(block, newNext, block->bbPrev)) @@ -2416,7 +2416,7 @@ class LoopSearch // On non-funclet platforms (x86), the catch exit is a BBJ_ALWAYS, but we don't want that to // be considered a loop exit block, as catch handlers don't have predecessor lists and don't // show up as might be expected in the dominator tree. - if (block->getBBJumpKind() == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { if (!BasicBlock::sameHndRegion(block, exitPoint)) { @@ -2818,7 +2818,7 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R // TODO-Cleanup: This should be a static member of the BasicBlock class. void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to) { - assert(from->getBBJumpKind() == to->getBBJumpKind()); // Precondition. + assert(from->KindIs(to->getBBJumpKind())); // Precondition. // copy the jump destination(s) from "from" to "to". switch (to->getBBJumpKind()) @@ -2936,7 +2936,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // entry block. If the `head` branches to `top` because it is the BBJ_ALWAYS of a // BBJ_CALLFINALLY/BBJ_ALWAYS pair, we canonicalize by introducing a new fall-through // head block. See FindEntry() for the logic that allows this. - if ((h->getBBJumpKind() == BBJ_ALWAYS) && (h->bbJumpDest == t) && (h->bbFlags & BBF_KEEP_BBJ_ALWAYS)) + if (h->KindIs(BBJ_ALWAYS) && (h->bbJumpDest == t) && (h->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // Insert new head @@ -3030,7 +3030,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // not keeping pred lists in good shape. // BasicBlock* const t = optLoopTable[loopInd].lpTop; - assert(siblingB->getBBJumpKind() == BBJ_COND); + assert(siblingB->KindIs(BBJ_COND)); assert(siblingB->bbNext == t); JITDUMP(FMT_LP " head " FMT_BB " is also " FMT_LP " bottom\n", loopInd, h->bbNum, sibling); @@ -3207,8 +3207,8 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati // assert(h->bbNext == t); assert(h->bbFallsThrough()); - assert((h->getBBJumpKind() == BBJ_NONE) || (h->getBBJumpKind() == BBJ_COND)); - if (h->getBBJumpKind() == BBJ_COND) + assert((h->KindIs(BBJ_NONE)) || (h->KindIs(BBJ_COND))); + if (h->KindIs(BBJ_COND)) { BasicBlock* const hj = h->bbJumpDest; assert((hj->bbNum < t->bbNum) || (hj->bbNum > b->bbNum)); @@ -3360,7 +3360,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati childLoop = optLoopTable[childLoop].lpSibling) { if ((optLoopTable[childLoop].lpEntry == origE) && (optLoopTable[childLoop].lpHead == h) && - (newT->getBBJumpKind() == BBJ_NONE) && (newT->bbNext == origE)) + (newT->KindIs(BBJ_NONE)) && (newT->bbNext == origE)) { optUpdateLoopHead(childLoop, h, newT); @@ -4280,7 +4280,7 @@ PhaseStatus Compiler::optUnrollLoops() goto DONE_LOOP; } - if (block->getBBJumpKind() == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { ++loopRetCount; } @@ -4524,7 +4524,7 @@ PhaseStatus Compiler::optUnrollLoops() // // If the initBlock is a BBJ_COND drop the condition (and make initBlock a BBJ_NONE block). // - if (initBlock->getBBJumpKind() == BBJ_COND) + if (initBlock->KindIs(BBJ_COND)) { assert(dupCond); Statement* initBlockBranchStmt = initBlock->lastStmt(); @@ -4538,7 +4538,7 @@ PhaseStatus Compiler::optUnrollLoops() /* the loop must execute */ assert(!dupCond); assert(totalIter > 0); - noway_assert(initBlock->getBBJumpKind() == BBJ_NONE); + noway_assert(initBlock->KindIs(BBJ_NONE)); } // The loop will be removed, so no need to fix up the pre-header. @@ -4548,7 +4548,7 @@ PhaseStatus Compiler::optUnrollLoops() // For unrolled loops, all the unrolling preconditions require the pre-header block to fall // through into TOP. - assert(head->getBBJumpKind() == BBJ_NONE); + assert(head->KindIs(BBJ_NONE)); } // If we actually unrolled, tail is now reached @@ -4840,7 +4840,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // Does the BB end with an unconditional jump? - if (block->getBBJumpKind() != BBJ_ALWAYS || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) + if (!block->KindIs(BBJ_ALWAYS) || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // It can't be one of the ones we use for our exception magic return false; @@ -4850,7 +4850,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) BasicBlock* const bTest = block->bbJumpDest; // Does the bTest consist of 'jtrue(cond) block' ? - if (bTest->getBBJumpKind() != BBJ_COND) + if (!bTest->KindIs(BBJ_COND)) { return false; } @@ -5434,7 +5434,7 @@ void Compiler::optMarkLoopHeads() { if (blockNum <= predBlock->bbNum) { - if (predBlock->getBBJumpKind() == BBJ_CALLFINALLY) + if (predBlock->KindIs(BBJ_CALLFINALLY)) { // Loops never have BBJ_CALLFINALLY as the source of their "back edge". continue; @@ -5539,7 +5539,7 @@ void Compiler::optFindAndScaleGeneralLoopBlocks() } // We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops. - if ((bottom->getBBJumpKind() != BBJ_COND) && (bottom->getBBJumpKind() != BBJ_ALWAYS)) + if (!bottom->KindIs(BBJ_COND, BBJ_ALWAYS)) { continue; } @@ -8198,7 +8198,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) // The preheader block is part of the containing loop (if any). preHead->bbNatLoopNum = loop.lpParent; - if (fgIsUsingProfileWeights() && (head->getBBJumpKind() == BBJ_COND)) + if (fgIsUsingProfileWeights() && (head->KindIs(BBJ_COND))) { if ((head->bbWeight == BB_ZERO_WEIGHT) || (entry->bbWeight == BB_ZERO_WEIGHT)) { @@ -9181,7 +9181,7 @@ void Compiler::optRemoveRedundantZeroInits() if (tree->Data()->IsIntegralConst(0)) { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; + bool bbIsReturn = block->KindIs(BBJ_RETURN); if (!bbInALoop || bbIsReturn) { diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index cdf76a4e5a6b6b..4cf9739d6c73d5 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -44,7 +44,7 @@ PhaseStatus Compiler::optRedundantBranches() // We currently can optimize some BBJ_CONDs. // - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { bool madeChangesThisBlock = m_compiler->optRedundantRelop(block); @@ -57,7 +57,7 @@ PhaseStatus Compiler::optRedundantBranches() // a BBJ_COND, retry; perhaps one of the later optimizations // we can do has enabled one of the earlier optimizations. // - if (madeChangesThisBlock && (block->getBBJumpKind() == BBJ_COND)) + if (madeChangesThisBlock && block->KindIs(BBJ_COND)) { JITDUMP("Will retry RBO in " FMT_BB " after partial optimization\n", block->bbNum); madeChangesThisBlock |= m_compiler->optRedundantBranch(block); @@ -508,7 +508,7 @@ bool Compiler::optRedundantBranch(BasicBlock* const block) // Check the current dominator // - if (domBlock->getBBJumpKind() == BBJ_COND) + if (domBlock->KindIs(BBJ_COND)) { Statement* const domJumpStmt = domBlock->lastStmt(); GenTree* const domJumpTree = domJumpStmt->GetRootNode(); @@ -971,8 +971,8 @@ bool Compiler::optJumpThreadCheck(BasicBlock* const block, BasicBlock* const dom // bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop) { - assert(block->getBBJumpKind() == BBJ_COND); - assert(domBlock->getBBJumpKind() == BBJ_COND); + assert(block->KindIs(BBJ_COND)); + assert(domBlock->KindIs(BBJ_COND)); // If the dominating block is not the immediate dominator // we might need to duplicate a lot of code to thread @@ -990,7 +990,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl BasicBlock* idomBlock = block->bbIDom; while ((idomBlock != nullptr) && (idomBlock != domBlock)) { - if (idomBlock->getBBJumpKind() == BBJ_COND) + if (idomBlock->KindIs(BBJ_COND)) { JITDUMP(" -- " FMT_BB " not closest branching dom, so no threading\n", idomBlock->bbNum); return false; @@ -1082,7 +1082,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl // Treat switch preds as ambiguous for now. // - if (predBlock->getBBJumpKind() == BBJ_SWITCH) + if (predBlock->KindIs(BBJ_SWITCH)) { JITDUMP(FMT_BB " is a switch pred\n", predBlock->bbNum); BlockSetOps::AddElemD(this, jti.m_ambiguousPreds, predBlock->bbNum); @@ -1450,9 +1450,8 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // const bool fallThroughIsTruePred = BlockSetOps::IsMember(this, jti.m_truePreds, jti.m_fallThroughPred->bbNum); - if ((jti.m_fallThroughPred->getBBJumpKind() == BBJ_NONE) && - ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || - (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) + if ((jti.m_fallThroughPred->KindIs(BBJ_NONE)) && ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || + (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) { JITDUMP(FMT_BB " has ambiguous preds and a (%s) fall through pred and no (%s) preds.\n" "Converting fall through pred " FMT_BB " to BBJ_ALWAYS\n", @@ -1624,8 +1623,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // surviving ssa input, and update all the value numbers...) // BasicBlock* const ambBlock = jti.m_ambiguousVNBlock; - if ((ambBlock != nullptr) && (jti.m_block->getBBJumpKind() == BBJ_COND) && - (jti.m_block->GetUniquePred(this) == ambBlock)) + if ((ambBlock != nullptr) && (jti.m_block->KindIs(BBJ_COND)) && (jti.m_block->GetUniquePred(this) == ambBlock)) { JITDUMP(FMT_BB " has just one remaining predcessor " FMT_BB "\n", jti.m_block->bbNum, ambBlock->bbNum); From a8c8a6c30722b5903b6c156870936de23c78b572 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Mon, 2 Oct 2023 19:32:53 -0400 Subject: [PATCH 03/14] Style --- src/coreclr/jit/fgehopt.cpp | 2 +- src/coreclr/jit/fgopt.cpp | 337 ++++++++++++------------ src/coreclr/jit/fgprofilesynthesis.cpp | 10 +- src/coreclr/jit/flowgraph.cpp | 4 +- src/coreclr/jit/importer.cpp | 4 +- src/coreclr/jit/lower.cpp | 3 +- src/coreclr/jit/morph.cpp | 10 +- src/coreclr/jit/optimizer.cpp | 12 +- src/coreclr/jit/redundantbranchopts.cpp | 6 +- 9 files changed, 193 insertions(+), 195 deletions(-) diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 782a92c92b6452..893dae0893c479 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -821,7 +821,7 @@ PhaseStatus Compiler::fgCloneFinally() // through to a callfinally. BasicBlock* jumpDest = nullptr; - if ((block->KindIs(BBJ_NONE)) && (block == lastTryBlock)) + if (block->KindIs(BBJ_NONE) && (block == lastTryBlock)) { jumpDest = block->bbNext; } diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index bcf25c9d01ef17..d4b36c1b723c6f 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -5211,7 +5211,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) /* (bPrev is known to be a normal block at this point) */ if (!isRare) { - if ((bDest == block->bbNext) && (block->KindIs(BBJ_RETURN)) && (bPrev->KindIs(BBJ_ALWAYS))) + if ((bDest == block->bbNext) && block->KindIs(BBJ_RETURN) && bPrev->KindIs(BBJ_ALWAYS)) { // This is a common case with expressions like "return Expr1 && Expr2" -- move the return // to establish fall-through. @@ -6165,220 +6165,219 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // (b) block jump target is elsewhere but join free, and // bNext's jump target has a join. // - if (block->KindIs(BBJ_COND) && // block is a BBJ_COND block - (bNext != nullptr) && // block is not the last block - (bNext->bbRefs == 1) && // No other block jumps to bNext - bNext->KindIs(BBJ_ALWAYS)) && // The next block is a BBJ_ALWAYS block - bNext->isEmpty() && // and it is an empty block - (bNext != bNext->bbJumpDest) && // special case for self jumps + if (block->KindIs(BBJ_COND) && // block is a BBJ_COND block + (bNext != nullptr) && // block is not the last block + (bNext->bbRefs == 1) && // No other block jumps to bNext + bNext->KindIs(BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block + bNext->isEmpty() && // and it is an empty block + (bNext != bNext->bbJumpDest) && // special case for self jumps (bDest != fgFirstColdBlock) && (!fgInDifferentRegions(block, bDest))) // do not cross hot/cold sections - { - // case (a) - // - const bool isJumpAroundEmpty = (bNext->bbNext == bDest); + { + // case (a) + // + const bool isJumpAroundEmpty = (bNext->bbNext == bDest); - // case (b) - // - // Note the asymmetric checks for refs == 1 and refs > 1 ensures that we - // differentiate the roles played by bDest and bNextJumpDest. We need some - // sense of which arrangement is preferable to avoid getting stuck in a loop - // reversing and re-reversing. - // - // Other tiebreaking criteria could be considered. - // - // Pragmatic constraints: - // - // * don't consider lexical predecessors, or we may confuse loop recognition - // * don't consider blocks of different rarities - // - BasicBlock* const bNextJumpDest = bNext->bbJumpDest; - const bool isJumpToJoinFree = !isJumpAroundEmpty && (bDest->bbRefs == 1) && - (bNextJumpDest->bbRefs > 1) && (bDest->bbNum > block->bbNum) && - (block->isRunRarely() == bDest->isRunRarely()); + // case (b) + // + // Note the asymmetric checks for refs == 1 and refs > 1 ensures that we + // differentiate the roles played by bDest and bNextJumpDest. We need some + // sense of which arrangement is preferable to avoid getting stuck in a loop + // reversing and re-reversing. + // + // Other tiebreaking criteria could be considered. + // + // Pragmatic constraints: + // + // * don't consider lexical predecessors, or we may confuse loop recognition + // * don't consider blocks of different rarities + // + BasicBlock* const bNextJumpDest = bNext->bbJumpDest; + const bool isJumpToJoinFree = !isJumpAroundEmpty && (bDest->bbRefs == 1) && + (bNextJumpDest->bbRefs > 1) && (bDest->bbNum > block->bbNum) && + (block->isRunRarely() == bDest->isRunRarely()); - bool optimizeJump = isJumpAroundEmpty || isJumpToJoinFree; + bool optimizeJump = isJumpAroundEmpty || isJumpToJoinFree; - // We do not optimize jumps between two different try regions. - // However jumping to a block that is not in any try region is OK - // - if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) - { - optimizeJump = false; - } + // We do not optimize jumps between two different try regions. + // However jumping to a block that is not in any try region is OK + // + if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) + { + optimizeJump = false; + } - // Also consider bNext's try region - // - if (bNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bNext)) + // Also consider bNext's try region + // + if (bNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bNext)) + { + optimizeJump = false; + } + + // If we are optimizing using real profile weights + // then don't optimize a conditional jump to an unconditional jump + // until after we have computed the edge weights + // + if (fgIsUsingProfileWeights()) + { + // if block and bdest are in different hot/cold regions we can't do this optimization + // because we can't allow fall-through into the cold region. + if (!fgEdgeWeightsComputed || fgInDifferentRegions(block, bDest)) { optimizeJump = false; } + } - // If we are optimizing using real profile weights - // then don't optimize a conditional jump to an unconditional jump - // until after we have computed the edge weights + if (optimizeJump && isJumpToJoinFree) + { + // In the join free case, we also need to move bDest right after bNext + // to create same flow as in the isJumpAroundEmpty case. // - if (fgIsUsingProfileWeights()) + if (!fgEhAllowsMoveBlock(bNext, bDest) || bDest->isBBCallAlwaysPair()) { - // if block and bdest are in different hot/cold regions we can't do this optimization - // because we can't allow fall-through into the cold region. - if (!fgEdgeWeightsComputed || fgInDifferentRegions(block, bDest)) - { - optimizeJump = false; - } + optimizeJump = false; } - - if (optimizeJump && isJumpToJoinFree) + else { - // In the join free case, we also need to move bDest right after bNext - // to create same flow as in the isJumpAroundEmpty case. + // We don't expect bDest to already be right after bNext. // - if (!fgEhAllowsMoveBlock(bNext, bDest) || bDest->isBBCallAlwaysPair()) - { - optimizeJump = false; - } - else - { - // We don't expect bDest to already be right after bNext. - // - assert(bDest != bNext->bbNext); + assert(bDest != bNext->bbNext); - JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum, - bNext->bbNum); + JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum, + bNext->bbNum); - // If bDest can fall through we'll need to create a jump - // block after it too. Remember where to jump to. - // - BasicBlock* const bDestNext = bDest->bbNext; + // If bDest can fall through we'll need to create a jump + // block after it too. Remember where to jump to. + // + BasicBlock* const bDestNext = bDest->bbNext; - // Move bDest - // - if (ehIsBlockEHLast(bDest)) - { - ehUpdateLastBlocks(bDest, bDest->bbPrev); - } + // Move bDest + // + if (ehIsBlockEHLast(bDest)) + { + ehUpdateLastBlocks(bDest, bDest->bbPrev); + } - fgUnlinkBlock(bDest); - fgInsertBBafter(bNext, bDest); + fgUnlinkBlock(bDest); + fgInsertBBafter(bNext, bDest); - if (ehIsBlockEHLast(bNext)) - { - ehUpdateLastBlocks(bNext, bDest); - } + if (ehIsBlockEHLast(bNext)) + { + ehUpdateLastBlocks(bNext, bDest); + } - // Add fall through fixup block, if needed. - // - if (bDest->KindIs(BBJ_NONE, BBJ_COND)) - { - BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); - bFixup->inheritWeight(bDestNext); - bFixup->bbJumpDest = bDestNext; + // Add fall through fixup block, if needed. + // + if (bDest->KindIs(BBJ_NONE, BBJ_COND)) + { + BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); + bFixup->inheritWeight(bDestNext); + bFixup->bbJumpDest = bDestNext; - fgRemoveRefPred(bDestNext, bDest); - fgAddRefPred(bFixup, bDest); - fgAddRefPred(bDestNext, bFixup); - } + fgRemoveRefPred(bDestNext, bDest); + fgAddRefPred(bFixup, bDest); + fgAddRefPred(bDestNext, bFixup); } } + } - if (optimizeJump) - { - JITDUMP("\nReversing a conditional jump around an unconditional jump (" FMT_BB " -> " FMT_BB - ", " FMT_BB " -> " FMT_BB ")\n", - block->bbNum, bDest->bbNum, bNext->bbNum, bNextJumpDest->bbNum); + if (optimizeJump) + { + JITDUMP("\nReversing a conditional jump around an unconditional jump (" FMT_BB " -> " FMT_BB + ", " FMT_BB " -> " FMT_BB ")\n", + block->bbNum, bDest->bbNum, bNext->bbNum, bNextJumpDest->bbNum); - // Reverse the jump condition - // - GenTree* test = block->lastNode(); - noway_assert(test->OperIsConditionalJump()); + // Reverse the jump condition + // + GenTree* test = block->lastNode(); + noway_assert(test->OperIsConditionalJump()); - if (test->OperGet() == GT_JTRUE) - { - GenTree* cond = gtReverseCond(test->AsOp()->gtOp1); - assert(cond == - test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. - test->AsOp()->gtOp1 = cond; - } - else - { - gtReverseCond(test); - } + if (test->OperGet() == GT_JTRUE) + { + GenTree* cond = gtReverseCond(test->AsOp()->gtOp1); + assert(cond == test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. + test->AsOp()->gtOp1 = cond; + } + else + { + gtReverseCond(test); + } - // Optimize the Conditional JUMP to go to the new target - block->bbJumpDest = bNext->bbJumpDest; + // Optimize the Conditional JUMP to go to the new target + block->bbJumpDest = bNext->bbJumpDest; - fgAddRefPred(bNext->bbJumpDest, block, fgRemoveRefPred(bNext->bbJumpDest, bNext)); + fgAddRefPred(bNext->bbJumpDest, block, fgRemoveRefPred(bNext->bbJumpDest, bNext)); - /* - Unlink bNext from the BasicBlock list; note that we can - do this even though other blocks could jump to it - the - reason is that elsewhere in this function we always - redirect jumps to jumps to jump to the final label, - so even if another block jumps to bNext it won't matter - once we're done since any such jump will be redirected - to the final target by the time we're done here. - */ + /* + Unlink bNext from the BasicBlock list; note that we can + do this even though other blocks could jump to it - the + reason is that elsewhere in this function we always + redirect jumps to jumps to jump to the final label, + so even if another block jumps to bNext it won't matter + once we're done since any such jump will be redirected + to the final target by the time we're done here. + */ - fgRemoveRefPred(bNext, block); - fgUnlinkBlock(bNext); + fgRemoveRefPred(bNext, block); + fgUnlinkBlock(bNext); - /* Mark the block as removed */ - bNext->bbFlags |= BBF_REMOVED; + /* Mark the block as removed */ + bNext->bbFlags |= BBF_REMOVED; - // Update the loop table if we removed the bottom of a loop, for example. - fgUpdateLoopsAfterCompacting(block, bNext); + // Update the loop table if we removed the bottom of a loop, for example. + fgUpdateLoopsAfterCompacting(block, bNext); - // If this block was aligned, unmark it - bNext->unmarkLoopAlign(this DEBUG_ARG("Optimized jump")); + // If this block was aligned, unmark it + bNext->unmarkLoopAlign(this DEBUG_ARG("Optimized jump")); - // If this is the first Cold basic block update fgFirstColdBlock - if (bNext == fgFirstColdBlock) - { - fgFirstColdBlock = bNext->bbNext; - } + // If this is the first Cold basic block update fgFirstColdBlock + if (bNext == fgFirstColdBlock) + { + fgFirstColdBlock = bNext->bbNext; + } - // - // If we removed the end of a try region or handler region - // we will need to update ebdTryLast or ebdHndLast. - // + // + // If we removed the end of a try region or handler region + // we will need to update ebdTryLast or ebdHndLast. + // - for (EHblkDsc* const HBtab : EHClauses(this)) + for (EHblkDsc* const HBtab : EHClauses(this)) + { + if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext)) { - if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext)) - { - fgSkipRmvdBlocks(HBtab); - } + fgSkipRmvdBlocks(HBtab); } + } - // we optimized this JUMP - goto REPEAT to catch similar cases - change = true; - modified = true; + // we optimized this JUMP - goto REPEAT to catch similar cases + change = true; + modified = true; #ifdef DEBUG - if (verbose) - { - printf("\nAfter reversing the jump:\n"); - fgDispBasicBlocks(verboseTrees); - } + if (verbose) + { + printf("\nAfter reversing the jump:\n"); + fgDispBasicBlocks(verboseTrees); + } #endif // DEBUG - /* - For a rare special case we cannot jump to REPEAT - as jumping to REPEAT will cause us to delete 'block' - because it currently appears to be unreachable. As - it is a self loop that only has a single bbRef (itself) - However since the unlinked bNext has additional bbRefs - (that we will later connect to 'block'), it is not really - unreachable. - */ - if ((bNext->bbRefs > 0) && (bNext->bbJumpDest == block) && (block->bbRefs == 1)) - { - continue; - } - - goto REPEAT; + /* + For a rare special case we cannot jump to REPEAT + as jumping to REPEAT will cause us to delete 'block' + because it currently appears to be unreachable. As + it is a self loop that only has a single bbRef (itself) + However since the unlinked bNext has additional bbRefs + (that we will later connect to 'block'), it is not really + unreachable. + */ + if ((bNext->bbRefs > 0) && (bNext->bbJumpDest == block) && (block->bbRefs == 1)) + { + continue; } + + goto REPEAT; } + } } // diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index f256ca73846c84..4d6d549e03d269 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -332,8 +332,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // THROW heuristic // - bool const isJumpThrow = (jump->KindIs(BBJ_THROW)); - bool const isNextThrow = (next->KindIs(BBJ_THROW)); + bool const isJumpThrow = jump->KindIs(BBJ_THROW); + bool const isNextThrow = next->KindIs(BBJ_THROW); if (isJumpThrow != isNextThrow) { @@ -402,8 +402,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // RETURN heuristic // - bool const isJumpReturn = (jump->KindIs(BBJ_RETURN)); - bool const isNextReturn = (next->KindIs(BBJ_RETURN)); + bool const isJumpReturn = jump->KindIs(BBJ_RETURN); + bool const isNextReturn = next->KindIs(BBJ_RETURN); if (isJumpReturn != isNextReturn) { @@ -1214,7 +1214,7 @@ void ProfileSynthesis::ComputeCyclicProbabilities(SimpleLoop* loop) // // Currently we don't know which edges do this. // - if ((exitBlock->KindIs(BBJ_COND)) && (exitBlockWeight > (missingExitWeight + currentExitWeight))) + if (exitBlock->KindIs(BBJ_COND) && (exitBlockWeight > (missingExitWeight + currentExitWeight))) { JITDUMP("Will adjust likelihood of the exit edge from loop exit block " FMT_BB " to reflect capping; current likelihood is " FMT_WT "\n", diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 2ef7dbc9d38a3b..fb4399cf8618c2 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -2596,7 +2596,7 @@ PhaseStatus Compiler::fgAddInternal() for (BasicBlock* block = fgFirstBB; block != lastBlockBeforeGenReturns->bbNext; block = block->bbNext) { - if ((block->KindIs(BBJ_RETURN)) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if (block->KindIs(BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { merger.Record(block); } @@ -3523,7 +3523,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // This is a slightly more complicated case, because we will // probably need to insert a block to jump to the cold section. // - if (firstColdBlock->isEmpty() && (firstColdBlock->KindIs(BBJ_ALWAYS))) + if (firstColdBlock->isEmpty() && firstColdBlock->KindIs(BBJ_ALWAYS)) { // We can just use this block as the transitionBlock firstColdBlock = firstColdBlock->bbNext; diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index f0e46f2100a254..db5d2e3ecd4b79 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -8532,7 +8532,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } bool bbInALoop = impBlockIsInALoop(block); - bool bbIsReturn = (block->KindIs(BBJ_RETURN)) && + bool bbIsReturn = block->KindIs(BBJ_RETURN) && (!compIsForInlining() || (impInlineInfo->iciBlock->KindIs(BBJ_RETURN))); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) @@ -12121,7 +12121,7 @@ void Compiler::impImport() { entryBlock = entryBlock->bbNext; } - else if (opts.IsOSR() && (entryBlock->KindIs(BBJ_ALWAYS))) + else if (opts.IsOSR() && entryBlock->KindIs(BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 26f4ed946955d4..66bdb7a64f52b0 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -5296,8 +5296,7 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* JITDUMP("======= Inserting PInvoke method epilog\n"); // Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls. - assert(((returnBB == comp->genReturnBB) && (returnBB->KindIs(BBJ_RETURN))) || - returnBB->endsWithTailCallOrJmp(comp)); + assert(((returnBB == comp->genReturnBB) && returnBB->KindIs(BBJ_RETURN)) || returnBB->endsWithTailCallOrJmp(comp)); LIR::Range& returnBlockRange = LIR::AsRange(returnBB); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 88175479c16b54..4a2606729253f8 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -13725,10 +13725,10 @@ void Compiler::fgMorphStmts(BasicBlock* block) // - a tail call dispatched via runtime help (IL stubs), in which // case there will not be any tailcall and the block will be ending // with BBJ_RETURN (as normal control flow) - noway_assert((call->IsFastTailCall() && (compCurBB->KindIs(BBJ_RETURN)) && + noway_assert((call->IsFastTailCall() && compCurBB->KindIs(BBJ_RETURN) && ((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) || - (call->IsTailCallViaJitHelper() && (compCurBB->KindIs(BBJ_THROW))) || - (!call->IsTailCall() && (compCurBB->KindIs(BBJ_RETURN)))); + (call->IsTailCallViaJitHelper() && compCurBB->KindIs(BBJ_THROW)) || + (!call->IsTailCall() && compCurBB->KindIs(BBJ_RETURN))); } #ifdef DEBUG @@ -13921,7 +13921,7 @@ void Compiler::fgMorphBlocks() fgMorphStmts(block); // Do we need to merge the result of this block into a single return block? - if ((block->KindIs(BBJ_RETURN)) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if (block->KindIs(BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { if ((genReturnBB != nullptr) && (genReturnBB != block)) { @@ -13977,7 +13977,7 @@ void Compiler::fgMorphBlocks() // void Compiler::fgMergeBlockReturn(BasicBlock* block) { - assert((block->KindIs(BBJ_RETURN)) && ((block->bbFlags & BBF_HAS_JMP) == 0)); + assert(block->KindIs(BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); assert((genReturnBB != nullptr) && (genReturnBB != block)); // TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN. diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index f073ce0f11fc63..3d3baeef6a2017 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -741,7 +741,7 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT bool initBlockOk = (predBlock == initBlock); if (!initBlockOk) { - if ((predBlock->KindIs(BBJ_NONE)) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && + if (predBlock->KindIs(BBJ_NONE) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && (predBlock->countOfInEdges() == 1) && (predBlock->firstStmt() == nullptr) && (predBlock->bbPrev != nullptr) && predBlock->bbPrev->bbFallsThrough()) { @@ -1150,7 +1150,7 @@ bool Compiler::optExtractInitTestIncr( // If we are rebuilding the loop table, we would already have the pre-header block introduced // the first time, which might be empty if no hoisting has yet occurred. In this case, look a // little harder for the possible loop initialization statement. - if ((initBlock->KindIs(BBJ_NONE)) && (initBlock->bbNext == top) && (initBlock->countOfInEdges() == 1) && + if (initBlock->KindIs(BBJ_NONE) && (initBlock->bbNext == top) && (initBlock->countOfInEdges() == 1) && (initBlock->bbPrev != nullptr) && initBlock->bbPrev->bbFallsThrough()) { initBlock = initBlock->bbPrev; @@ -2294,7 +2294,7 @@ class LoopSearch { // Need to reconnect the flow from `block` to `oldNext`. - if ((block->KindIs(BBJ_COND)) && (block->bbJumpDest == newNext)) + if (block->KindIs(BBJ_COND) && (block->bbJumpDest == newNext)) { // Reverse the jump condition GenTree* test = block->lastNode(); @@ -3207,7 +3207,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati // assert(h->bbNext == t); assert(h->bbFallsThrough()); - assert((h->KindIs(BBJ_NONE)) || (h->KindIs(BBJ_COND))); + assert(h->KindIs(BBJ_NONE, BBJ_COND)); if (h->KindIs(BBJ_COND)) { BasicBlock* const hj = h->bbJumpDest; @@ -3360,7 +3360,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati childLoop = optLoopTable[childLoop].lpSibling) { if ((optLoopTable[childLoop].lpEntry == origE) && (optLoopTable[childLoop].lpHead == h) && - (newT->KindIs(BBJ_NONE)) && (newT->bbNext == origE)) + newT->KindIs(BBJ_NONE) && (newT->bbNext == origE)) { optUpdateLoopHead(childLoop, h, newT); @@ -8198,7 +8198,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) // The preheader block is part of the containing loop (if any). preHead->bbNatLoopNum = loop.lpParent; - if (fgIsUsingProfileWeights() && (head->KindIs(BBJ_COND))) + if (fgIsUsingProfileWeights() && head->KindIs(BBJ_COND)) { if ((head->bbWeight == BB_ZERO_WEIGHT) || (entry->bbWeight == BB_ZERO_WEIGHT)) { diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index 4cf9739d6c73d5..cbc59c30d73a3c 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -1450,8 +1450,8 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // const bool fallThroughIsTruePred = BlockSetOps::IsMember(this, jti.m_truePreds, jti.m_fallThroughPred->bbNum); - if ((jti.m_fallThroughPred->KindIs(BBJ_NONE)) && ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || - (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) + if (jti.m_fallThroughPred->KindIs(BBJ_NONE) && ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || + (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) { JITDUMP(FMT_BB " has ambiguous preds and a (%s) fall through pred and no (%s) preds.\n" "Converting fall through pred " FMT_BB " to BBJ_ALWAYS\n", @@ -1623,7 +1623,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // surviving ssa input, and update all the value numbers...) // BasicBlock* const ambBlock = jti.m_ambiguousVNBlock; - if ((ambBlock != nullptr) && (jti.m_block->KindIs(BBJ_COND)) && (jti.m_block->GetUniquePred(this) == ambBlock)) + if ((ambBlock != nullptr) && jti.m_block->KindIs(BBJ_COND) && (jti.m_block->GetUniquePred(this) == ambBlock)) { JITDUMP(FMT_BB " has just one remaining predcessor " FMT_BB "\n", jti.m_block->bbNum, ambBlock->bbNum); From 7aadbdcefdc14ad57f688d87725d05db166b3c96 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Mon, 2 Oct 2023 19:35:07 -0400 Subject: [PATCH 04/14] Convert case --- src/coreclr/jit/block.cpp | 2 +- src/coreclr/jit/block.h | 6 ++-- src/coreclr/jit/codegencommon.cpp | 2 +- src/coreclr/jit/codegenlinear.cpp | 4 +-- src/coreclr/jit/compiler.hpp | 2 +- src/coreclr/jit/fgbasic.cpp | 32 +++++++++--------- src/coreclr/jit/fgdiagnostic.cpp | 8 ++--- src/coreclr/jit/fgehopt.cpp | 14 ++++---- src/coreclr/jit/fgflow.cpp | 2 +- src/coreclr/jit/fginline.cpp | 8 ++--- src/coreclr/jit/fgopt.cpp | 36 ++++++++++----------- src/coreclr/jit/fgprofile.cpp | 12 +++---- src/coreclr/jit/fgprofilesynthesis.cpp | 6 ++-- src/coreclr/jit/flowgraph.cpp | 22 ++++++------- src/coreclr/jit/ifconversion.cpp | 2 +- src/coreclr/jit/importer.cpp | 32 +++++++++--------- src/coreclr/jit/indirectcalltransformer.cpp | 6 ++-- src/coreclr/jit/jiteh.cpp | 2 +- src/coreclr/jit/lir.cpp | 2 +- src/coreclr/jit/liveness.cpp | 4 +-- src/coreclr/jit/loopcloning.cpp | 12 +++---- src/coreclr/jit/lower.cpp | 18 +++++------ src/coreclr/jit/morph.cpp | 18 +++++------ src/coreclr/jit/optimizebools.cpp | 4 +-- src/coreclr/jit/optimizer.cpp | 22 ++++++------- src/coreclr/jit/patchpoint.cpp | 4 +-- src/coreclr/jit/redundantbranchopts.cpp | 6 ++-- src/coreclr/jit/switchrecognition.cpp | 2 +- 28 files changed, 145 insertions(+), 145 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index a5798928b59591..c2aa5ff45e3f44 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -1419,7 +1419,7 @@ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind) /* Record the jump kind in the block */ - block->setBBJumpKind(jumpKind DEBUG_ARG(this)); + block->SetBBJumpKind(jumpKind DEBUG_ARG(this)); if (jumpKind == BBJ_THROW) { diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 9a390d35eb46ef..88312967936f21 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -706,12 +706,12 @@ struct BasicBlock : private LIR::Range BBjumpKinds bbJumpKind; // jump (if any) at the end of this block public: - BBjumpKinds getBBJumpKind() const + BBjumpKinds GetBBJumpKind() const { return bbJumpKind; } - void setBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* comp)) + void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* comp)) { #ifdef DEBUG // BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout @@ -1574,7 +1574,7 @@ inline BBArrayIterator BBSwitchTargetList::end() const inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) { assert(block != nullptr); - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 6a1e1cecbc0e73..190b0f418515b2 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -376,7 +376,7 @@ void CodeGen::genMarkLabelsForCodegen() for (BasicBlock* const block : compiler->Blocks()) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_ALWAYS: // This will also handle the BBJ_ALWAYS of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. case BBJ_COND: diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index f9d5d1c7cfc040..c1b93541c14c87 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -619,7 +619,7 @@ void CodeGen::genCodeForBBlist() { // We only need the NOP if we're not going to generate any more code as part of the block end. - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_ALWAYS: case BBJ_THROW: @@ -662,7 +662,7 @@ void CodeGen::genCodeForBBlist() /* Do we need to generate a jump or return? */ - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_RETURN: genExitCode(block); diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 8ac6d7bdf47b7e..43d8e927c65f75 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -3224,7 +3224,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) fgRemoveBlockAsPred(block); // Update jump kind after the scrub. - block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); // Any block with a throw is rare block->bbSetRunRarely(); diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 254372e770c3e1..9853f3f47b26e0 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -537,7 +537,7 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, Bas assert(block != nullptr); assert(fgPredsComputed); - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_CALLFINALLY: case BBJ_COND: @@ -2771,7 +2771,7 @@ void Compiler::fgLinkBasicBlocks() for (BasicBlock* const curBBdesc : Blocks()) { - switch (curBBdesc->getBBJumpKind()) + switch (curBBdesc->GetBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: @@ -3808,7 +3808,7 @@ void Compiler::fgFindBasicBlocks() // BBJ_EHFINALLYRET that were imported to BBJ_EHFAULTRET. if ((hndBegBB->bbCatchTyp == BBCT_FAULT) && block->KindIs(BBJ_EHFINALLYRET)) { - block->setBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } } @@ -4017,7 +4017,7 @@ void Compiler::fgFixEntryFlowForOSR() fgEnsureFirstBBisScratch(); assert(fgFirstBB->KindIs(BBJ_NONE)); fgRemoveRefPred(fgFirstBB->bbNext, fgFirstBB); - fgFirstBB->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + fgFirstBB->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgFirstBB->bbJumpDest = fgOSREntryBB; FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB); edge->setLikelihood(1.0); @@ -4057,7 +4057,7 @@ void Compiler::fgCheckBasicBlockControlFlow() continue; } - switch (blk->getBBJumpKind()) + switch (blk->GetBBJumpKind()) { case BBJ_NONE: // block flows into the next one (no jump) @@ -4560,7 +4560,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) { // We'd like to use fgNewBBafter(), but we need to update the preds list before linking in the new block. // (We need the successors of 'curr' to be correct when we do this.) - BasicBlock* newBlock = bbNewBasicBlock(curr->getBBJumpKind()); + BasicBlock* newBlock = bbNewBasicBlock(curr->GetBBJumpKind()); // Start the new block with no refs. When we set the preds below, this will get updated correctly. newBlock->bbRefs = 0; @@ -4628,7 +4628,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) curr->bbFlags &= ~(BBF_HAS_JMP | BBF_RETLESS_CALL); // Default to fallthru, and add the arc for that. - curr->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + curr->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); fgAddRefPred(newBlock, curr); return newBlock; @@ -5071,7 +5071,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), // because that would violate our invariant that BBJ_CALLFINALLY blocks are followed by // BBJ_ALWAYS blocks. - bPrev->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + bPrev->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } // If this is the first Cold basic block update fgFirstColdBlock @@ -5129,7 +5129,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) #ifdef DEBUG /* Some extra checks for the empty case */ - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_NONE: break; @@ -5246,7 +5246,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } /* change all jumps to the removed block */ - switch (predBlock->getBBJumpKind()) + switch (predBlock->GetBBJumpKind()) { default: noway_assert(!"Unexpected bbJumpKind in fgRemoveBlock()"); @@ -5260,7 +5260,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (block->KindIs(BBJ_ALWAYS)) { /* bPrev now becomes a BBJ_ALWAYS */ - bPrev->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + bPrev->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bPrev->bbJumpDest = succBlock; } break; @@ -5313,7 +5313,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (bPrev != nullptr) { - switch (bPrev->getBBJumpKind()) + switch (bPrev->GetBBJumpKind()) { case BBJ_CALLFINALLY: // If prev is a BBJ_CALLFINALLY it better be marked as RETLESS @@ -5333,7 +5333,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if ((bPrev == fgFirstBB) || !bPrev->isBBCallAlwaysPairTail()) { // It's safe to change the jump type - bPrev->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + bPrev->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } } break; @@ -5378,11 +5378,11 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) if (bSrc->bbFallsThrough() && (bSrc->bbNext != bDst)) { - switch (bSrc->getBBJumpKind()) + switch (bSrc->GetBBJumpKind()) { case BBJ_NONE: - bSrc->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + bSrc->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bSrc->bbJumpDest = bDst; JITDUMP("Block " FMT_BB " ended with a BBJ_NONE, Changed to an unconditional jump to " FMT_BB "\n", bSrc->bbNum, bSrc->bbJumpDest->bbNum); @@ -5462,7 +5462,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (bSrc->bbJumpDest == bSrc->bbNext)) { - bSrc->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + bSrc->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); JITDUMP("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB " into a BBJ_NONE block\n", bSrc->bbNum, bSrc->bbNext->bbNum); diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index b8b868214ae9c8..df319152a2dd0e 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -101,7 +101,7 @@ void Compiler::fgDebugCheckUpdate() if (block->isEmpty() && !(block->bbFlags & BBF_DONT_REMOVE)) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_CALLFINALLY: case BBJ_EHFINALLYRET: @@ -1035,7 +1035,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) fprintf(fgxFile, "\n bbNum); fprintf(fgxFile, "\n ordinal=\"%d\"", blockOrdinal); - fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->getBBJumpKind()]); + fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->GetBBJumpKind()]); if (block->hasTryIndex()) { fprintf(fgxFile, "\n inTry=\"%s\"", "true"); @@ -2004,7 +2004,7 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 * } else { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: printf("-> " FMT_BB "%*s ( cond )", block->bbJumpDest->bbNum, @@ -2659,7 +2659,7 @@ bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHb bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block) { - switch (blockPred->getBBJumpKind()) + switch (blockPred->GetBBJumpKind()) { case BBJ_COND: assert(blockPred->bbNext == block || blockPred->bbJumpDest == block); diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 893dae0893c479..e5fbe43e1590f3 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -163,7 +163,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() noway_assert(leaveBlock->KindIs(BBJ_ALWAYS)); currentBlock->bbJumpDest = postTryFinallyBlock; - currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + currentBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Ref count updates. fgAddRefPred(postTryFinallyBlock, currentBlock); @@ -463,7 +463,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // Time to optimize. // // (1) Convert the callfinally to a normal jump to the handler - callFinally->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + callFinally->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Identify the leave block and the continuation BasicBlock* const leave = callFinally->bbNext; @@ -542,7 +542,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(block, finallyRet); - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = continuation; fgAddRefPred(continuation, block); fgRemoveRefPred(leave, block); @@ -1049,7 +1049,7 @@ PhaseStatus Compiler::fgCloneFinally() // Avoid asserts when `fgNewBBinRegion` verifies the handler table, by mapping any cloned finally // return blocks to BBJ_ALWAYS (which we would do below if we didn't do it here). - BBjumpKinds bbNewJumpKind = (block->KindIs(BBJ_EHFINALLYRET)) ? BBJ_ALWAYS : block->getBBJumpKind(); + BBjumpKinds bbNewJumpKind = (block->KindIs(BBJ_EHFINALLYRET)) ? BBJ_ALWAYS : block->GetBBJumpKind(); if (block == firstBlock) { @@ -1180,7 +1180,7 @@ PhaseStatus Compiler::fgCloneFinally() // This call returns to the expected spot, so // retarget it to branch to the clone. currentBlock->bbJumpDest = firstCloneBlock; - currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + currentBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Ref count updates. fgAddRefPred(firstCloneBlock, currentBlock); @@ -1242,7 +1242,7 @@ PhaseStatus Compiler::fgCloneFinally() { if (block->KindIs(BBJ_EHFINALLYRET)) { - block->setBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } } } @@ -2194,7 +2194,7 @@ PhaseStatus Compiler::fgTailMergeThrows() BasicBlock* const predBlock = predEdge->getSourceBlock(); nextPredEdge = predEdge->getNextPredEdge(); - switch (predBlock->getBBJumpKind()) + switch (predBlock->GetBBJumpKind()) { case BBJ_NONE: { diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index fd6ef7a3567763..d2669ccaca3823 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -343,7 +343,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) BasicBlock* bNext; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_CALLFINALLY: if (!(block->bbFlags & BBF_RETLESS_CALL)) diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index 51f77ccc3a5f9d..a844199697a60c 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -675,12 +675,12 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorIsIntegralConst(0)) { - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_compiler)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_compiler)); m_compiler->fgRemoveRefPred(block->bbNext, block); } else { - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(m_compiler)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(m_compiler)); m_compiler->fgRemoveRefPred(block->bbJumpDest, block); } } @@ -1530,13 +1530,13 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = bottomBlock; } else { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum); - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } fgAddRefPred(bottomBlock, block); diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index d4b36c1b723c6f..18637ac7b49caa 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -466,7 +466,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) block->bbFlags &= ~(BBF_REMOVED | BBF_INTERNAL); block->bbFlags |= BBF_IMPORTED; - block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); block->bbSetRunRarely(); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -1650,7 +1650,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // plausible flow target. Simplest is to just mark it as a throw. if (bbIsHandlerBeg(newTryEntry->bbNext)) { - newTryEntry->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + newTryEntry->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); } else { @@ -1787,7 +1787,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() GenTree* const jumpIfEntryStateZero = gtNewOperNode(GT_JTRUE, TYP_VOID, compareEntryStateToZero); fgNewStmtAtBeg(fromBlock, jumpIfEntryStateZero); - fromBlock->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); + fromBlock->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); fromBlock->bbJumpDest = toBlock; fgAddRefPred(toBlock, fromBlock); newBlock->inheritWeight(fromBlock); @@ -2268,7 +2268,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* set the right links */ - block->setBBJumpKind(bNext->getBBJumpKind() DEBUG_ARG(this)); + block->SetBBJumpKind(bNext->GetBBJumpKind() DEBUG_ARG(this)); VarSetOps::AssignAllowUninitRhs(this, block->bbLiveOut, bNext->bbLiveOut); // Update the beginning and ending IL offsets (bbCodeOffs and bbCodeOffsEnd). @@ -2328,7 +2328,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* Set the jump targets */ - switch (bNext->getBBJumpKind()) + switch (bNext->GetBBJumpKind()) { case BBJ_CALLFINALLY: // Propagate RETLESS property @@ -2634,7 +2634,7 @@ void Compiler::fgRemoveConditionalJump(BasicBlock* block) noway_assert(flow->getDupCount() == 2); // Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount. - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); --block->bbNext->bbRefs; flow->decrementDupCount(); @@ -2886,7 +2886,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) bool madeChanges = false; BasicBlock* bPrev = block->bbPrev; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_SWITCH: @@ -3312,7 +3312,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) // Change the switch jump into a BBJ_ALWAYS block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); if (jmpCnt > 1) { for (unsigned i = 1; i < jmpCnt; ++i) @@ -3377,7 +3377,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) } block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; - block->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); JITDUMP("After:\n"); DISPNODE(switchTree); @@ -3788,7 +3788,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // Fix up block's flow // - block->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); block->bbJumpDest = target->bbJumpDest; fgAddRefPred(block->bbJumpDest, block); fgRemoveRefPred(target, block); @@ -3841,7 +3841,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi if (!block->isBBCallAlwaysPairTail()) { /* the unconditional jump is to the next BB */ - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); #ifdef DEBUG if (verbose) { @@ -3967,7 +3967,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi /* Conditional is gone - simply fall into the next block */ - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); /* Update bbRefs and bbNum - Conditional predecessors to the same * block are counted twice so we have to remove one of them */ @@ -4232,7 +4232,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // We need to update the following flags of the bJump block if they were set in the bDest block bJump->bbFlags |= bDest->bbFlags & BBF_COPY_PROPAGATE; - bJump->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); + bJump->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); bJump->bbJumpDest = bDest->bbNext; /* Update bbRefs and bbPreds */ @@ -4393,7 +4393,7 @@ bool Compiler::fgOptimizeSwitchJumps() // Wire up the new control flow. // - block->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); block->bbJumpDest = dominantTarget; FlowEdge* const blockToTargetEdge = fgAddRefPred(dominantTarget, block); FlowEdge* const blockToNewBlockEdge = newBlock->bbPreds; @@ -4610,7 +4610,7 @@ bool Compiler::fgExpandRarelyRunBlocks() const char* reason = nullptr; - switch (bPrev->getBBJumpKind()) + switch (bPrev->GetBBJumpKind()) { case BBJ_ALWAYS: @@ -6454,7 +6454,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } else if (block->countOfInEdges() == 1) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: @@ -6551,7 +6551,7 @@ unsigned Compiler::fgGetCodeEstimate(BasicBlock* block) { unsigned costSz = 0; // estimate of block's code size cost - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_NONE: costSz = 0; @@ -6976,7 +6976,7 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) // Fix up the flow. // - predBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + predBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); predBlock->bbJumpDest = crossJumpTarget; fgRemoveRefPred(block, predBlock); diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 26c9afc1fbcf02..6444e45085db7d 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -501,7 +501,7 @@ void BlockCountInstrumentor::RelocateProbes() // if (pred->KindIs(BBJ_NONE)) { - pred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); + pred->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } assert(pred->KindIs(BBJ_ALWAYS)); @@ -945,7 +945,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) visitor->VisitBlock(block); nBlocks++; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_CALLFINALLY: { @@ -1554,7 +1554,7 @@ void EfficientEdgeCountInstrumentor::SplitCriticalEdges() // if (block->KindIs(BBJ_NONE)) { - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); block->bbJumpDest = target; } @@ -1697,7 +1697,7 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() // if (pred->KindIs(BBJ_NONE)) { - pred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); + pred->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } assert(pred->KindIs(BBJ_ALWAYS)); @@ -3922,7 +3922,7 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf // void EfficientEdgeCountReconstructor::MarkInterestingBlocks(BasicBlock* block, BlockInfo* info) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_SWITCH: MarkInterestingSwitches(block, info); @@ -4687,7 +4687,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() } slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; - switch (bSrc->getBBJumpKind()) + switch (bSrc->GetBBJumpKind()) { case BBJ_ALWAYS: case BBJ_EHCATCHRET: diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index 4d6d549e03d269..90d56a835ff10d 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -132,7 +132,7 @@ void ProfileSynthesis::AssignLikelihoods() for (BasicBlock* const block : m_comp->Blocks()) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -499,7 +499,7 @@ void ProfileSynthesis::RepairLikelihoods() for (BasicBlock* const block : m_comp->Blocks()) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -591,7 +591,7 @@ void ProfileSynthesis::BlendLikelihoods() { weight_t sum = SumOutgoingLikelihoods(block, &likelihoods); - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index fb4399cf8618c2..78dc4571352aa5 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -120,7 +120,7 @@ PhaseStatus Compiler::fgInsertGCPolls() JITDUMP("Selecting CALL poll in block " FMT_BB " because it is the single return block\n", block->bbNum); pollType = GCPOLL_CALL; } - else if (BBJ_SWITCH == block->getBBJumpKind()) + else if (BBJ_SWITCH == block->GetBBJumpKind()) { // We don't want to deal with all the outgoing edges of a switch block. // @@ -261,8 +261,8 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) } BasicBlock* poll = fgNewBBafter(BBJ_NONE, top, true); - bottom = fgNewBBafter(top->getBBJumpKind(), poll, true); - BBjumpKinds oldJumpKind = top->getBBJumpKind(); + bottom = fgNewBBafter(top->GetBBJumpKind(), poll, true); + BBjumpKinds oldJumpKind = top->GetBBJumpKind(); unsigned char lpIndex = top->bbNatLoopNum; // Update block flags @@ -372,7 +372,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) #endif top->bbJumpDest = bottom; - top->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); + top->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); // Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor. fgAddRefPred(bottom, poll); @@ -1287,7 +1287,7 @@ void Compiler::fgLoopCallMark() for (BasicBlock* const block : Blocks()) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_CALLFINALLY: @@ -1837,7 +1837,7 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) assert(ehDsc->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX); // Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB. - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); @@ -2309,7 +2309,7 @@ class MergedReturns // Change BBJ_RETURN to BBJ_ALWAYS targeting const return block. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); - returnBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); + returnBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); returnBlock->bbJumpDest = constReturnBlock; comp->fgAddRefPred(constReturnBlock, returnBlock); @@ -3125,7 +3125,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) // It's a jump from outside the handler; add it to the newHead preds list and remove // it from the block preds list. - switch (predBlock->getBBJumpKind()) + switch (predBlock->GetBBJumpKind()) { case BBJ_CALLFINALLY: noway_assert(predBlock->bbJumpDest == block); @@ -3503,7 +3503,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // if (prevToFirstColdBlock->bbFallsThrough()) { - switch (prevToFirstColdBlock->getBBJumpKind()) + switch (prevToFirstColdBlock->GetBBJumpKind()) { default: noway_assert(!"Unhandled jumpkind in fgDetermineFirstColdBlock()"); @@ -3548,7 +3548,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // convert it to BBJ_ALWAYS to force an explicit jump. prevToFirstColdBlock->bbJumpDest = firstColdBlock; - prevToFirstColdBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + prevToFirstColdBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); break; } } @@ -3981,7 +3981,7 @@ PhaseStatus Compiler::fgSetBlockOrder() (((src)->bbNum < (dst)->bbNum) || (((src)->bbFlags | (dst)->bbFlags) & BBF_GC_SAFE_POINT)) bool partiallyInterruptible = true; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index 7b50f5428458b0..6fd420c62a3d19 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -743,7 +743,7 @@ bool OptIfConversionDsc::optIfConvert() // Update the flow from the original block. m_comp->fgRemoveAllRefPreds(m_startBlock->bbNext, m_startBlock); - m_startBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); + m_startBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); #ifdef DEBUG if (m_comp->verbose) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index db5d2e3ecd4b79..b2abb048ad4612 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -2455,7 +2455,7 @@ GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { - block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; @@ -4322,7 +4322,7 @@ void Compiler::impImportLeave(BasicBlock* block) { assert(step == DUMMY_INIT(NULL)); callBlock = block; - callBlock->setBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY + callBlock->SetBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) { @@ -4419,7 +4419,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) { @@ -4573,7 +4573,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == nullptr) { step = block; - step->setBBJumpKind(BBJ_EHCATCHRET DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_EHCATCHRET + step->SetBBJumpKind(BBJ_EHCATCHRET DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_EHCATCHRET stepType = ST_Catch; #ifdef DEBUG @@ -4651,7 +4651,7 @@ void Compiler::impImportLeave(BasicBlock* block) // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgRemoveRefPred(block->bbJumpDest, block); block->bbJumpDest = callBlock; fgAddRefPred(callBlock, block); @@ -4673,7 +4673,7 @@ void Compiler::impImportLeave(BasicBlock* block) #else // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = block; - callBlock->setBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY + callBlock->SetBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) @@ -4908,7 +4908,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == nullptr) { - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) @@ -4994,7 +4994,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // will be treated as pair and handled correctly. if (block->KindIs(BBJ_CALLFINALLY)) { - BasicBlock* dupBlock = bbNewBasicBlock(block->getBBJumpKind()); + BasicBlock* dupBlock = bbNewBasicBlock(block->GetBBJumpKind()); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; fgAddRefPred(dupBlock->bbJumpDest, dupBlock); @@ -5024,7 +5024,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) } #endif // FEATURE_EH_FUNCLETS - block->setBBJumpKind(BBJ_LEAVE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_LEAVE DEBUG_ARG(this)); fgInitBBLookup(); fgRemoveRefPred(block->bbJumpDest, block); @@ -6002,7 +6002,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Change block to BBJ_THROW so we won't trigger importation of successors. // - block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. @@ -7307,7 +7307,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); fgRemoveRefPred(block->bbJumpDest, block); - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { @@ -7380,7 +7380,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) block->bbJumpDest->bbNum); fgRemoveRefPred(block->bbNext, block); } - block->setBBJumpKind(foldedJumpKind DEBUG_ARG(this)); + block->SetBBJumpKind(foldedJumpKind DEBUG_ARG(this)); } break; @@ -7553,7 +7553,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); fgRemoveRefPred(block->bbJumpDest, block); - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { @@ -7633,13 +7633,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (curJump != block->bbNext) { // transform the basic block into a BBJ_ALWAYS - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = curJump; } else { // transform the basic block into a BBJ_NONE - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } foundVal = true; } @@ -11279,7 +11279,7 @@ void Compiler::impImportBlock(BasicBlock* block) unsigned multRef = impCanReimport ? unsigned(~0) : 0; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index ab67048abbbc67..da1fb1933b2397 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -573,7 +573,7 @@ class IndirectCallTransformer // There's no need for a new block here. We can just append to currBlock. // checkBlock = currBlock; - checkBlock->setBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); + checkBlock->SetBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); } else { @@ -652,7 +652,7 @@ class IndirectCallTransformer if (isLastCheck && ((origCall->gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT_EXACT) != 0)) { checkBlock->bbJumpDest = nullptr; - checkBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(compiler)); + checkBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(compiler)); return; } @@ -1126,7 +1126,7 @@ class IndirectCallTransformer // not fall through to the check block. // compiler->fgRemoveRefPred(checkBlock, coldBlock); - coldBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(compiler)); + coldBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(compiler)); coldBlock->bbJumpDest = elseBlock; compiler->fgAddRefPred(elseBlock, coldBlock); } diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 8606b18743e72d..69658a7cd1dd22 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -3506,7 +3506,7 @@ void Compiler::fgVerifyHandlerTab() } // Check for legal block types - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_EHFINALLYRET: { diff --git a/src/coreclr/jit/lir.cpp b/src/coreclr/jit/lir.cpp index 7edb0515ae3239..44e810592a006a 100644 --- a/src/coreclr/jit/lir.cpp +++ b/src/coreclr/jit/lir.cpp @@ -1770,7 +1770,7 @@ void LIR::InsertBeforeTerminator(BasicBlock* block, LIR::Range&& range) assert(insertionPoint != nullptr); #if DEBUG - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: assert(insertionPoint->OperIsConditionalJump()); diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index d66ddc05a5cdcb..d32854e4224c71 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -378,7 +378,7 @@ void Compiler::fgPerBlockLocalVarLiveness() block->bbMemoryLiveIn = fullMemoryKindSet; block->bbMemoryLiveOut = fullMemoryKindSet; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_EHFINALLYRET: case BBJ_EHFAULTRET: @@ -886,7 +886,7 @@ void Compiler::fgExtendDbgLifetimes() { VarSetOps::ClearD(this, initVars); - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_NONE: PREFIX_ASSUME(block->bbNext != nullptr); diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index e9f4df76924747..721f97b47019f9 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -2047,7 +2047,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) { assert(h->KindIs(BBJ_ALWAYS)); assert(h->bbJumpDest == loop.lpEntry); - h2->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + h2->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); h2->bbJumpDest = loop.lpEntry; } @@ -2062,7 +2062,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Make 'h' fall through to 'h2' (if it didn't already). // Don't add the h->h2 edge because we're going to insert the cloning conditions between 'h' and 'h2', and // optInsertLoopChoiceConditions() will add the edge. - h->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + h->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); h->bbJumpDest = nullptr; // Make X2 after B, if necessary. (Not necessary if B is a BBJ_ALWAYS.) @@ -2116,7 +2116,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone)); for (BasicBlock* const blk : loop.LoopBlocks()) { - BasicBlock* newBlk = fgNewBBafter(blk->getBBJumpKind(), newPred, /*extendRegion*/ true); + BasicBlock* newBlk = fgNewBBafter(blk->GetBBJumpKind(), newPred, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " (copy of " FMT_BB ") after " FMT_BB "\n", newBlk->bbNum, blk->bbNum, newPred->bbNum); // Call CloneBlockState to make a copy of the block's statements (and attributes), and assert that it @@ -2175,7 +2175,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) bool b = blockMap->Lookup(blk, &newblk); assert(b && newblk != nullptr); - assert(blk->KindIs(newblk->getBBJumpKind())); + assert(blk->KindIs(newblk->GetBBJumpKind())); // First copy the jump destination(s) from "blk". optCopyBlkDest(blk, newblk); @@ -2184,7 +2184,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) optRedirectBlock(newblk, blockMap); // Add predecessor edges for the new successors, as well as the fall-through paths. - switch (newblk->getBBJumpKind()) + switch (newblk->GetBBJumpKind()) { case BBJ_NONE: fgAddRefPred(newblk->bbNext, newblk); @@ -2255,7 +2255,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) { // We can't just fall through to the slow path entry, so make it an unconditional branch. assert(slowHead->KindIs(BBJ_NONE)); // This is how we created it above. - slowHead->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + slowHead->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); slowHead->bbJumpDest = e2; } diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 66bdb7a64f52b0..94d6057803ba46 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -801,12 +801,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) noway_assert(comp->opts.OptimizationDisabled()); if (originalSwitchBB->bbNext == jumpTab[0]) { - originalSwitchBB->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); + originalSwitchBB->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = nullptr; } else { - originalSwitchBB->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); + originalSwitchBB->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = jumpTab[0]; } // Remove extra predecessor links if there was more than one case. @@ -900,7 +900,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // The GT_SWITCH code is still in originalSwitchBB (it will be removed later). // Turn originalSwitchBB into a BBJ_COND. - originalSwitchBB->setBBJumpKind(BBJ_COND DEBUG_ARG(comp)); + originalSwitchBB->SetBBJumpKind(BBJ_COND DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1]; // Fix the pred for the default case: the default block target still has originalSwitchBB @@ -957,12 +957,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) } if (afterDefaultCondBlock->bbNext == uniqueSucc) { - afterDefaultCondBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); + afterDefaultCondBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); afterDefaultCondBlock->bbJumpDest = nullptr; } else { - afterDefaultCondBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); + afterDefaultCondBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); afterDefaultCondBlock->bbJumpDest = uniqueSucc; } } @@ -1036,13 +1036,13 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // case: there is no need to compare against the case index, since it's // guaranteed to be taken (since the default case was handled first, above). - currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); + currentBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); } else { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. - currentBlock->setBBJumpKind(BBJ_COND DEBUG_ARG(comp)); + currentBlock->SetBBJumpKind(BBJ_COND DEBUG_ARG(comp)); // Now, build the conditional statement for the current case that is // being evaluated: @@ -1075,7 +1075,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); assert(currentBlock->KindIs(BBJ_SWITCH)); - currentBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); + currentBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); currentBlock->bbFlags &= ~BBF_DONT_REMOVE; comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. } @@ -1247,7 +1247,7 @@ bool Lowering::TryLowerSwitchToBitTest( // GenCondition bbSwitchCondition; - bbSwitch->setBBJumpKind(BBJ_COND DEBUG_ARG(comp)); + bbSwitch->SetBBJumpKind(BBJ_COND DEBUG_ARG(comp)); comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 4a2606729253f8..53a99febef0aaa 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -6190,7 +6190,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) // Many tailcalls will have call and ret in the same block, and thus be // BBJ_RETURN, but if the call falls through to a ret, and we are doing a // tailcall, change it here. - compCurBB->setBBJumpKind(BBJ_RETURN DEBUG_ARG(this)); + compCurBB->SetBBJumpKind(BBJ_RETURN DEBUG_ARG(this)); } GenTree* stmtExpr = fgMorphStmt->GetRootNode(); @@ -6338,7 +6338,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. - compCurBB->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + compCurBB->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); } if (isRootReplaced) @@ -7490,7 +7490,7 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa } // Finish hooking things up. - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgAddRefPred(block->bbJumpDest, block); block->bbFlags &= ~BBF_HAS_JMP; } @@ -13183,7 +13183,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (cond->AsIntCon()->gtIconVal != 0) { /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bTaken = block->bbJumpDest; bNotTaken = block->bbNext; } @@ -13199,7 +13199,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) } /* JTRUE 0 - transform the basic block into a BBJ_NONE */ - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); bTaken = block->bbNext; bNotTaken = block->bbJumpDest; } @@ -13254,7 +13254,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) FlowEdge* edge; // Now fix the weights of the edges out of 'bUpdated' - switch (bUpdated->getBBJumpKind()) + switch (bUpdated->GetBBJumpKind()) { case BBJ_NONE: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); @@ -13428,13 +13428,13 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (curJump != block->bbNext) { // transform the basic block into a BBJ_ALWAYS - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = curJump; } else { // transform the basic block into a BBJ_NONE - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } foundVal = true; } @@ -14002,7 +14002,7 @@ void Compiler::fgMergeBlockReturn(BasicBlock* block) else #endif // !TARGET_X86 { - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); fgReturnCount--; diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 09683dde47bc8b..82d2430b914454 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -587,7 +587,7 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock() // Update the flow. m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1); - m_b1->setBBJumpKind(BBJ_NONE DEBUG_ARG(m_comp)); + m_b1->SetBBJumpKind(BBJ_NONE DEBUG_ARG(m_comp)); // Fixup flags. m_b2->bbFlags |= (m_b1->bbFlags & BBF_COPY_PROPAGATE); @@ -877,7 +877,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() if (optReturnBlock) { m_b1->bbJumpDest = nullptr; - m_b1->setBBJumpKind(BBJ_RETURN DEBUG_ARG(m_comp)); + m_b1->SetBBJumpKind(BBJ_RETURN DEBUG_ARG(m_comp)); #ifdef DEBUG m_b1->bbJumpSwt = m_b2->bbJumpSwt; #endif diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 3d3baeef6a2017..bc54bcc3af06fd 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -1385,7 +1385,7 @@ void Compiler::optCheckPreds() } } noway_assert(bb); - switch (bb->getBBJumpKind()) + switch (bb->GetBBJumpKind()) { case BBJ_COND: if (bb->bbJumpDest == block) @@ -2398,7 +2398,7 @@ class LoopSearch { BasicBlock* exitPoint; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_CALLFINALLY: @@ -2738,7 +2738,7 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R BasicBlock* newJumpDest = nullptr; - switch (blk->getBBJumpKind()) + switch (blk->GetBBJumpKind()) { case BBJ_NONE: case BBJ_THROW: @@ -2818,10 +2818,10 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R // TODO-Cleanup: This should be a static member of the BasicBlock class. void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to) { - assert(from->KindIs(to->getBBJumpKind())); // Precondition. + assert(from->KindIs(to->GetBBJumpKind())); // Precondition. // copy the jump destination(s) from "from" to "to". - switch (to->getBBJumpKind()) + switch (to->GetBBJumpKind()) { case BBJ_ALWAYS: case BBJ_LEAVE: @@ -4361,7 +4361,7 @@ PhaseStatus Compiler::optUnrollLoops() for (BasicBlock* block = loop.lpTop; block != loop.lpBottom->bbNext; block = block->bbNext) { BasicBlock* newBlock = insertAfter = - fgNewBBafter(block->getBBJumpKind(), insertAfter, /*extendRegion*/ true); + fgNewBBafter(block->GetBBJumpKind(), insertAfter, /*extendRegion*/ true); blockMap.Set(block, newBlock, BlockToBlockMap::Overwrite); if (!BasicBlock::CloneBlockState(this, newBlock, block, lvar, lval)) @@ -4415,7 +4415,7 @@ PhaseStatus Compiler::optUnrollLoops() { testCopyStmt->SetRootNode(sideEffList); } - newBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + newBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } } @@ -4486,7 +4486,7 @@ PhaseStatus Compiler::optUnrollLoops() fgRemoveAllRefPreds(succ, block); } - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); block->bbStmtList = nullptr; block->bbJumpDest = nullptr; block->bbNatLoopNum = newLoopNum; @@ -4531,7 +4531,7 @@ PhaseStatus Compiler::optUnrollLoops() noway_assert(initBlockBranchStmt->GetRootNode()->OperIs(GT_JTRUE)); fgRemoveStmt(initBlock, initBlockBranchStmt); fgRemoveRefPred(initBlock->bbJumpDest, initBlock); - initBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + initBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { @@ -5077,7 +5077,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) bool foundCondTree = false; // Create a new block after `block` to put the copied condition code. - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); block->bbJumpDest = nullptr; BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true); @@ -8306,7 +8306,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) continue; } - switch (predBlock->getBBJumpKind()) + switch (predBlock->GetBBJumpKind()) { case BBJ_NONE: // This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop, diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index 2423a6d9da47aa..017509086d208a 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -145,7 +145,7 @@ class PatchpointTransformer BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_NONE, block); // Update flow and flags - block->setBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); + block->SetBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); block->bbJumpDest = remainderBlock; block->bbFlags |= BBF_INTERNAL; @@ -233,7 +233,7 @@ class PatchpointTransformer } // Update flow - block->setBBJumpKind(BBJ_THROW DEBUG_ARG(compiler)); + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(compiler)); block->bbJumpDest = nullptr; // Add helper call diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index cbc59c30d73a3c..dfbd1863cb4b6d 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -1460,7 +1460,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // Possibly defer this until after early out below. // - jti.m_fallThroughPred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + jti.m_fallThroughPred->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); jti.m_fallThroughPred->bbJumpDest = jti.m_block; modifiedFlow = true; } @@ -1532,7 +1532,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) fgRemoveStmt(jti.m_block, lastStmt); JITDUMP(" repurposing " FMT_BB " to always jump to " FMT_BB "\n", jti.m_block->bbNum, jti.m_trueTarget->bbNum); fgRemoveRefPred(jti.m_falseTarget, jti.m_block); - jti.m_block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + jti.m_block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); } else if (falsePredsWillReuseBlock) { @@ -1541,7 +1541,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) JITDUMP(" repurposing " FMT_BB " to always fall through to " FMT_BB "\n", jti.m_block->bbNum, jti.m_falseTarget->bbNum); fgRemoveRefPred(jti.m_trueTarget, jti.m_block); - jti.m_block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + jti.m_block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } // Now reroute the flow from the predecessors. diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp index 90bfa43142e75a..125c2cf2fbebee 100644 --- a/src/coreclr/jit/switchrecognition.cpp +++ b/src/coreclr/jit/switchrecognition.cpp @@ -319,7 +319,7 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* assert(isTest); // Convert firstBlock to a switch block - firstBlock->setBBJumpKind(BBJ_SWITCH DEBUG_ARG(this)); + firstBlock->SetBBJumpKind(BBJ_SWITCH DEBUG_ARG(this)); firstBlock->bbJumpDest = nullptr; firstBlock->bbCodeOffsEnd = lastBlock->bbCodeOffsEnd; firstBlock->lastStmt()->GetRootNode()->ChangeOper(GT_SWITCH); From 754743d59204ce62223c4b229ba6e9790aeeb2c1 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Mon, 2 Oct 2023 21:23:36 -0400 Subject: [PATCH 05/14] Typo --- src/coreclr/jit/fgdiagnostic.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index df319152a2dd0e..318e241d35ae09 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -192,7 +192,7 @@ void Compiler::fgDebugCheckUpdate() /* For a BBJ_CALLFINALLY block we make sure that we are followed by */ /* an BBJ_ALWAYS block with BBF_INTERNAL set */ /* or that it's a BBF_RETLESS_CALL */ - if (block->KindIs(BBJ_CALLFINALLY) == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { assert((block->bbFlags & BBF_RETLESS_CALL) || block->isBBCallAlwaysPair()); } From 0a16a8fc956fd6d3ae5b01b54954d680f5b79f8f Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Wed, 4 Oct 2023 17:20:17 -0400 Subject: [PATCH 06/14] Make bbPrev and bbNext private --- src/coreclr/jit/assertionprop.cpp | 2 +- src/coreclr/jit/block.cpp | 12 +- src/coreclr/jit/block.h | 89 +++++---- src/coreclr/jit/clrjit.natvis | 4 +- src/coreclr/jit/codegenarm.cpp | 12 +- src/coreclr/jit/codegenarm64.cpp | 4 +- src/coreclr/jit/codegencommon.cpp | 32 +-- src/coreclr/jit/codegenlinear.cpp | 36 ++-- src/coreclr/jit/codegenloongarch64.cpp | 4 +- src/coreclr/jit/codegenriscv64.cpp | 4 +- src/coreclr/jit/codegenxarch.cpp | 4 +- src/coreclr/jit/compiler.cpp | 6 +- src/coreclr/jit/compiler.h | 2 +- src/coreclr/jit/compiler.hpp | 14 +- src/coreclr/jit/fgbasic.cpp | 189 +++++++++--------- src/coreclr/jit/fgdiagnostic.cpp | 65 +++--- src/coreclr/jit/fgehopt.cpp | 79 ++++---- src/coreclr/jit/fgflow.cpp | 14 +- src/coreclr/jit/fginline.cpp | 12 +- src/coreclr/jit/fgopt.cpp | 206 ++++++++++---------- src/coreclr/jit/fgprofile.cpp | 24 +-- src/coreclr/jit/fgprofilesynthesis.cpp | 6 +- src/coreclr/jit/flowgraph.cpp | 46 ++--- src/coreclr/jit/helperexpansion.cpp | 2 +- src/coreclr/jit/ifconversion.cpp | 10 +- src/coreclr/jit/importer.cpp | 34 ++-- src/coreclr/jit/indirectcalltransformer.cpp | 4 +- src/coreclr/jit/jiteh.cpp | 56 +++--- src/coreclr/jit/liveness.cpp | 20 +- src/coreclr/jit/loopcloning.cpp | 20 +- src/coreclr/jit/lower.cpp | 18 +- src/coreclr/jit/lsra.cpp | 11 +- src/coreclr/jit/morph.cpp | 22 +-- src/coreclr/jit/optimizebools.cpp | 24 +-- src/coreclr/jit/optimizer.cpp | 116 +++++------ src/coreclr/jit/patchpoint.cpp | 2 +- src/coreclr/jit/promotionliveness.cpp | 4 +- src/coreclr/jit/rangecheck.cpp | 2 +- src/coreclr/jit/redundantbranchopts.cpp | 16 +- src/coreclr/jit/switchrecognition.cpp | 22 +-- src/coreclr/jit/unwind.cpp | 4 +- 41 files changed, 631 insertions(+), 622 deletions(-) diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index f38878e33fff83..e124236a3c189f 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -5268,7 +5268,7 @@ class AssertionPropFlowCallback { // Scenario where next block and conditional block, both point to the same block. // In such case, intersect the assertions present on both the out edges of predBlock. - assert(predBlock->bbNext == block); + assert(predBlock->GetBBNext() == block); BitVecOps::IntersectionD(apTraits, pAssertionOut, predBlock->bbAssertionOut); if (VerboseDataflow()) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index c2aa5ff45e3f44..2afe829d20fb9f 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -133,7 +133,7 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk) // these cannot cause transfer to the handler...) // TODO-Throughput: It would be nice if we could iterate just over the blocks in the try, via // something like: - // for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->bbNext; bb = bb->bbNext) + // for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->GetBBNext(); bb = bb->GetBBNext()) // (plus adding in any filter blocks outside the try whose exceptions are handled here). // That doesn't work, however: funclets have caused us to sometimes split the body of a try into // more than one sequence of contiguous blocks. We need to find a better way to do this. @@ -160,7 +160,7 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk) if (enclosingDsc->HasFilter()) { for (BasicBlock* filterBlk = enclosingDsc->ebdFilter; filterBlk != enclosingDsc->ebdHndBeg; - filterBlk = filterBlk->bbNext) + filterBlk = filterBlk->GetBBNext()) { res = new (this, CMK_FlowEdge) FlowEdge(filterBlk, res); @@ -1509,10 +1509,10 @@ bool BasicBlock::isBBCallAlwaysPair() const assert(!(this->bbFlags & BBF_RETLESS_CALL)); #endif // Some asserts that the next block is a BBJ_ALWAYS of the proper form. - assert(this->bbNext != nullptr); - assert(this->bbNext->KindIs(BBJ_ALWAYS)); - assert(this->bbNext->bbFlags & BBF_KEEP_BBJ_ALWAYS); - assert(this->bbNext->isEmpty()); + assert(this->GetBBNext() != nullptr); + assert(this->GetBBNext()->KindIs(BBJ_ALWAYS)); + assert(this->GetBBNext()->bbFlags & BBF_KEEP_BBJ_ALWAYS); + assert(this->GetBBNext()->isEmpty()); return true; } diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 88312967936f21..9d44238b309106 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -508,10 +508,49 @@ struct BasicBlock : private LIR::Range { friend class LIR; +private: BasicBlock* bbNext; // next BB in ascending PC offset order BasicBlock* bbPrev; - void setNext(BasicBlock* next) + BBjumpKinds bbJumpKind; // jump (if any) at the end of this block + +public: + BBjumpKinds GetBBJumpKind() const + { + return bbJumpKind; + } + + void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* comp)) + { +#ifdef DEBUG + // BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout + // TODO: Change assert to check if comp is in appropriate optimization phase to use BBJ_NONE + // (right now, this assertion does the null check to avoid unused variable warnings) + assert((kind != BBJ_NONE) || (comp != nullptr)); +#endif // DEBUG + bbJumpKind = kind; + } + + BasicBlock* GetBBPrev() const + { + return bbPrev; + } + + void SetBBPrev(BasicBlock* prev) + { + bbPrev = prev; + if (prev) + { + prev->bbNext = this; + } + } + + BasicBlock* GetBBNext() const + { + return bbNext; + } + + void SetBBNext(BasicBlock* next) { bbNext = next; if (next) @@ -520,6 +559,13 @@ struct BasicBlock : private LIR::Range } } + /* The following union describes the jump target(s) of this block */ + union { + unsigned bbJumpOffs; // PC offset (temporary only) + BasicBlock* bbJumpDest; // basic block + BBswtDesc* bbJumpSwt; // switch descriptor + }; + BasicBlockFlags bbFlags; static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_LOST) == 0); @@ -702,33 +748,6 @@ struct BasicBlock : private LIR::Range // a block corresponding to an exit from the try of a try/finally. bool isBBCallAlwaysPairTail() const; -private: - BBjumpKinds bbJumpKind; // jump (if any) at the end of this block - -public: - BBjumpKinds GetBBJumpKind() const - { - return bbJumpKind; - } - - void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* comp)) - { -#ifdef DEBUG - // BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout - // TODO: Change assert to check if comp is in appropriate optimization phase to use BBJ_NONE - // (right now, this assertion does the null check to avoid unused variable warnings) - assert((kind != BBJ_NONE) || (comp != nullptr)); -#endif // DEBUG - bbJumpKind = kind; - } - - /* The following union describes the jump target(s) of this block */ - union { - unsigned bbJumpOffs; // PC offset (temporary only) - BasicBlock* bbJumpDest; // basic block - BBswtDesc* bbJumpSwt; // switch descriptor - }; - bool KindIs(BBjumpKinds kind) const { return bbJumpKind == kind; @@ -1435,10 +1454,10 @@ class BasicBlockIterator { assert(m_block != nullptr); // Check that we haven't been spliced out of the list. - assert((m_block->bbNext == nullptr) || (m_block->bbNext->bbPrev == m_block)); - assert((m_block->bbPrev == nullptr) || (m_block->bbPrev->bbNext == m_block)); + assert((m_block->GetBBNext() == nullptr) || (m_block->GetBBNext()->GetBBPrev() == m_block)); + assert((m_block->GetBBPrev() == nullptr) || (m_block->GetBBPrev()->GetBBNext() == m_block)); - m_block = m_block->bbNext; + m_block = m_block->GetBBNext(); return *this; } @@ -1501,7 +1520,7 @@ class BasicBlockRangeList BasicBlockIterator end() const { - return BasicBlockIterator(m_end->bbNext); // walk until we see the block *following* the `m_end` block + return BasicBlockIterator(m_end->GetBBNext()); // walk until we see the block *following* the `m_end` block } }; @@ -1596,18 +1615,18 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) break; case BBJ_NONE: - m_succs[0] = block->bbNext; + m_succs[0] = block->GetBBNext(); m_begin = &m_succs[0]; m_end = &m_succs[1]; break; case BBJ_COND: - m_succs[0] = block->bbNext; + m_succs[0] = block->GetBBNext(); m_begin = &m_succs[0]; // If both fall-through and branch successors are identical, then only include // them once in the iteration (this is the same behavior as NumSucc()/GetSucc()). - if (block->bbJumpDest == block->bbNext) + if (block->bbJumpDest == block->GetBBNext()) { m_end = &m_succs[1]; } diff --git a/src/coreclr/jit/clrjit.natvis b/src/coreclr/jit/clrjit.natvis index 6d25673d9c9701..f75fd877e0e1ee 100644 --- a/src/coreclr/jit/clrjit.natvis +++ b/src/coreclr/jit/clrjit.natvis @@ -105,7 +105,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u varIndex++ bbLiveInMap = bbLiveInMap >> 1 - block = block->bbNext + block = block->GetBBNext() "OutVarToRegMaps" @@ -124,7 +124,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u varIndex++ bbLiveInMap = bbLiveInMap >> 1 - block = block->bbNext + block = block->GetBBNext() this->m_AvailableRegs diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index 54c4b7e20dcd55..cd05af9a6f04ab 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -123,12 +123,12 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // we would have otherwise created retless calls. assert(block->isBBCallAlwaysPair()); - assert(block->bbNext != NULL); - assert(block->bbNext->KindIs(BBJ_ALWAYS)); - assert(block->bbNext->bbJumpDest != NULL); - assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET); + assert(block->GetBBNext() != NULL); + assert(block->GetBBNext()->KindIs(BBJ_ALWAYS)); + assert(block->GetBBNext()->bbJumpDest != NULL); + assert(block->GetBBNext()->bbJumpDest->bbFlags & BBF_FINALLY_TARGET); - bbFinallyRet = block->bbNext->bbJumpDest; + bbFinallyRet = block->GetBBNext()->bbJumpDest; // Load the address where the finally funclet should return into LR. // The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do the return. @@ -143,7 +143,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // block is RETLESS. assert(!(block->bbFlags & BBF_RETLESS_CALL)); assert(block->isBBCallAlwaysPair()); - return block->bbNext; + return block->GetBBNext(); } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 6d22044c156b93..4b12859a9316db 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -2160,7 +2160,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } GetEmitter()->emitIns_J(INS_bl_local, block->bbJumpDest); - BasicBlock* const nextBlock = block->bbNext; + BasicBlock* const nextBlock = block->GetBBNext(); if (block->bbFlags & BBF_RETLESS_CALL) { @@ -2184,7 +2184,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) BasicBlock* const jumpDest = nextBlock->bbJumpDest; // Now go to where the finally funclet needs to return to. - if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) + if ((jumpDest == nextBlock->GetBBNext()) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) { // Fall-through. // TODO-ARM64-CQ: Can we get rid of this instruction, and just have the call return directly diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 8ccac405a37dd8..d631263e4a71e8 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -402,10 +402,10 @@ void CodeGen::genMarkLabelsForCodegen() { // For callfinally thunks, we need to mark the block following the callfinally/always pair, // as that's needed for identifying the range of the "duplicate finally" region in EH data. - BasicBlock* bbToLabel = block->bbNext; + BasicBlock* bbToLabel = block->GetBBNext(); if (block->isBBCallAlwaysPair()) { - bbToLabel = bbToLabel->bbNext; // skip the BBJ_ALWAYS + bbToLabel = bbToLabel->GetBBNext(); // skip the BBJ_ALWAYS } if (bbToLabel != nullptr) { @@ -446,16 +446,16 @@ void CodeGen::genMarkLabelsForCodegen() JITDUMP(" " FMT_BB " : try begin\n", HBtab->ebdTryBeg->bbNum); JITDUMP(" " FMT_BB " : hnd begin\n", HBtab->ebdHndBeg->bbNum); - if (HBtab->ebdTryLast->bbNext != nullptr) + if (HBtab->ebdTryLast->GetBBNext() != nullptr) { - HBtab->ebdTryLast->bbNext->bbFlags |= BBF_HAS_LABEL; - JITDUMP(" " FMT_BB " : try end\n", HBtab->ebdTryLast->bbNext->bbNum); + HBtab->ebdTryLast->GetBBNext()->bbFlags |= BBF_HAS_LABEL; + JITDUMP(" " FMT_BB " : try end\n", HBtab->ebdTryLast->GetBBNext()->bbNum); } - if (HBtab->ebdHndLast->bbNext != nullptr) + if (HBtab->ebdHndLast->GetBBNext() != nullptr) { - HBtab->ebdHndLast->bbNext->bbFlags |= BBF_HAS_LABEL; - JITDUMP(" " FMT_BB " : hnd end\n", HBtab->ebdHndLast->bbNext->bbNum); + HBtab->ebdHndLast->GetBBNext()->bbFlags |= BBF_HAS_LABEL; + JITDUMP(" " FMT_BB " : hnd end\n", HBtab->ebdHndLast->GetBBNext()->bbNum); } if (HBtab->HasFilter()) @@ -2302,9 +2302,9 @@ void CodeGen::genReportEH() hndBeg = compiler->ehCodeOffset(HBtab->ebdHndBeg); tryEnd = (HBtab->ebdTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize - : compiler->ehCodeOffset(HBtab->ebdTryLast->bbNext); + : compiler->ehCodeOffset(HBtab->ebdTryLast->GetBBNext()); hndEnd = (HBtab->ebdHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize - : compiler->ehCodeOffset(HBtab->ebdHndLast->bbNext); + : compiler->ehCodeOffset(HBtab->ebdHndLast->GetBBNext()); if (HBtab->HasFilter()) { @@ -2524,9 +2524,9 @@ void CodeGen::genReportEH() hndBeg = compiler->ehCodeOffset(bbHndBeg); tryEnd = (bbTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize - : compiler->ehCodeOffset(bbTryLast->bbNext); + : compiler->ehCodeOffset(bbTryLast->GetBBNext()); hndEnd = (bbHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize - : compiler->ehCodeOffset(bbHndLast->bbNext); + : compiler->ehCodeOffset(bbHndLast->GetBBNext()); if (encTab->HasFilter()) { @@ -2590,10 +2590,10 @@ void CodeGen::genReportEH() // How big is it? The BBJ_ALWAYS has a null bbEmitCookie! Look for the block after, which must be // a label or jump target, since the BBJ_CALLFINALLY doesn't fall through. - BasicBlock* bbLabel = block->bbNext; + BasicBlock* bbLabel = block->GetBBNext(); if (block->isBBCallAlwaysPair()) { - bbLabel = bbLabel->bbNext; // skip the BBJ_ALWAYS + bbLabel = bbLabel->GetBBNext(); // skip the BBJ_ALWAYS } if (bbLabel == nullptr) { @@ -5210,7 +5210,7 @@ void CodeGen::genReserveEpilog(BasicBlock* block) assert(block != nullptr); const VARSET_TP& gcrefVarsArg(GetEmitter()->emitThisGCrefVars); - bool last = (block->bbNext == nullptr); + bool last = (block->GetBBNext() == nullptr); GetEmitter()->emitCreatePlaceholderIG(IGPT_EPILOG, block, gcrefVarsArg, gcrefRegsArg, byrefRegsArg, last); } @@ -5257,7 +5257,7 @@ void CodeGen::genReserveFuncletEpilog(BasicBlock* block) JITDUMP("Reserving funclet epilog IG for block " FMT_BB "\n", block->bbNum); - bool last = (block->bbNext == nullptr); + bool last = (block->GetBBNext() == nullptr); GetEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_EPILOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur, last); } diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index c1b93541c14c87..c2b8258c4b74b4 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -170,7 +170,7 @@ void CodeGen::genCodeForBBlist() BasicBlock* block; - for (block = compiler->fgFirstBB; block != nullptr; block = block->bbNext) + for (block = compiler->fgFirstBB; block != nullptr; block = block->GetBBNext()) { #ifdef DEBUG @@ -319,7 +319,7 @@ void CodeGen::genCodeForBBlist() } #endif // We should never have a block that falls through into the Cold section - noway_assert(!block->bbPrev->bbFallsThrough()); + noway_assert(!block->GetBBPrev()->bbFallsThrough()); needLabel = true; } @@ -330,12 +330,12 @@ void CodeGen::genCodeForBBlist() // // Note: We need to have set compCurBB before calling emitAddLabel // - if ((block->bbPrev != nullptr) && block->bbPrev->KindIs(BBJ_COND) && - (block->bbWeight != block->bbPrev->bbWeight)) + if ((block->GetBBPrev() != nullptr) && block->GetBBPrev()->KindIs(BBJ_COND) && + (block->bbWeight != block->GetBBPrev()->bbWeight)) { JITDUMP("Adding label due to BB weight difference: BBJ_COND " FMT_BB " with weight " FMT_WT " different from " FMT_BB " with weight " FMT_WT "\n", - block->bbPrev->bbNum, block->bbPrev->bbWeight, block->bbNum, block->bbWeight); + block->GetBBPrev()->bbNum, block->GetBBPrev()->bbWeight, block->bbNum, block->bbWeight); needLabel = true; } @@ -519,7 +519,7 @@ void CodeGen::genCodeForBBlist() #endif // DEBUG #if defined(DEBUG) - if (block->bbNext == nullptr) + if (block->GetBBNext() == nullptr) { // Unit testing of the emitter: generate a bunch of instructions into the last block // (it's as good as any, but better than the prologue, which can only be a single instruction @@ -547,10 +547,10 @@ void CodeGen::genCodeForBBlist() /* Is this the last block, and are there any open scopes left ? */ - bool isLastBlockProcessed = (block->bbNext == nullptr); + bool isLastBlockProcessed = (block->GetBBNext() == nullptr); if (block->isBBCallAlwaysPair()) { - isLastBlockProcessed = (block->bbNext->bbNext == nullptr); + isLastBlockProcessed = (block->GetBBNext()->GetBBNext() == nullptr); } if (compiler->opts.compDbgInfo && isLastBlockProcessed) @@ -615,7 +615,7 @@ void CodeGen::genCodeForBBlist() // Note: we may be generating a few too many NOPs for the case of call preceding an epilog. Technically, // if the next block is a BBJ_RETURN, an epilog will be generated, but there may be some instructions // generated before the OS epilog starts, such as a GS cookie check. - if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext)) + if ((block->GetBBNext() == nullptr) || !BasicBlock::sameEHRegion(block, block->GetBBNext())) { // We only need the NOP if we're not going to generate any more code as part of the block end. @@ -636,7 +636,7 @@ void CodeGen::genCodeForBBlist() break; case BBJ_NONE: - if (block->bbNext == nullptr) + if (block->GetBBNext() == nullptr) { // Call immediately before the end of the code; we should never get here . instGen(INS_BREAKPOINT); // This should never get executed @@ -679,10 +679,10 @@ void CodeGen::genCodeForBBlist() // 2. If this is this is the last block of the hot section. // 3. If the subsequent block is a special throw block. // 4. On AMD64, if the next block is in a different EH region. - if ((block->bbNext == nullptr) || (block->bbNext->bbFlags & BBF_FUNCLET_BEG) || - !BasicBlock::sameEHRegion(block, block->bbNext) || - (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->bbNext)) || - block->bbNext == compiler->fgFirstColdBlock) + if ((block->GetBBNext() == nullptr) || (block->GetBBNext()->bbFlags & BBF_FUNCLET_BEG) || + !BasicBlock::sameEHRegion(block, block->GetBBNext()) || + (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->GetBBNext())) || + block->GetBBNext() == compiler->fgFirstColdBlock) { instGen(INS_BREAKPOINT); // This should never get executed } @@ -783,10 +783,10 @@ void CodeGen::genCodeForBBlist() { GetEmitter()->emitSetLoopBackEdge(block->bbJumpDest); - if (block->bbNext != nullptr) + if (block->GetBBNext() != nullptr) { - JITDUMP("Mark " FMT_BB " as label: alignment end-of-loop\n", block->bbNext->bbNum); - block->bbNext->bbFlags |= BBF_HAS_LABEL; + JITDUMP("Mark " FMT_BB " as label: alignment end-of-loop\n", block->GetBBNext()->bbNum); + block->GetBBNext()->bbFlags |= BBF_HAS_LABEL; } } #endif // FEATURE_LOOP_ALIGN @@ -818,7 +818,7 @@ void CodeGen::genCodeForBBlist() GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->KindIs(BBJ_ALWAYS))); } - if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign())) + if ((block->GetBBNext() != nullptr) && (block->GetBBNext()->isLoopAlign())) { if (compiler->opts.compJitHideAlignBehindJmp) { diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index 075b1f1c847d16..48ad9679720ba4 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1520,7 +1520,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } GetEmitter()->emitIns_J(INS_bl, block->bbJumpDest); - BasicBlock* const nextBlock = block->bbNext; + BasicBlock* const nextBlock = block->GetBBNext(); if (block->bbFlags & BBF_RETLESS_CALL) { @@ -1544,7 +1544,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) BasicBlock* const jumpDest = nextBlock->bbJumpDest; // Now go to where the finally funclet needs to return to. - if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) + if ((jumpDest == nextBlock->GetBBNext()) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) { // Fall-through. // TODO-LOONGARCH64-CQ: Can we get rid of this instruction, and just have the call return directly diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 6b858312ae328a..b02e6bb160fe71 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -1158,7 +1158,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } GetEmitter()->emitIns_J(INS_jal, block->bbJumpDest); - BasicBlock* const nextBlock = block->bbNext; + BasicBlock* const nextBlock = block->GetBBNext(); if (block->bbFlags & BBF_RETLESS_CALL) { @@ -1182,7 +1182,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) BasicBlock* const jumpDest = nextBlock->bbJumpDest; // Now go to where the finally funclet needs to return to. - if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) + if ((jumpDest == nextBlock->GetBBNext()) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) { // Fall-through. // TODO-RISCV64-CQ: Can we get rid of this instruction, and just have the call return directly diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index f5eb3cbf802564..78580fe5e28c13 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -205,7 +205,7 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg) BasicBlock* CodeGen::genCallFinally(BasicBlock* block) { - BasicBlock* const nextBlock = block->bbNext; + BasicBlock* const nextBlock = block->GetBBNext(); #if defined(FEATURE_EH_FUNCLETS) // Generate a call to the finally, like this: @@ -256,7 +256,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) BasicBlock* const jumpDest = nextBlock->bbJumpDest; // Now go to where the finally funclet needs to return to. - if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) + if ((jumpDest == nextBlock->GetBBNext()) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) { // Fall-through. // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index c3f63b48e4ab4c..8dd31ea5576636 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5291,11 +5291,11 @@ PhaseStatus Compiler::placeLoopAlignInstructions() } } - if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign())) + if ((block->GetBBNext() != nullptr) && (block->GetBBNext()->isLoopAlign())) { // Loop alignment is disabled for cold blocks assert((block->bbFlags & BBF_COLD) == 0); - BasicBlock* const loopTop = block->bbNext; + BasicBlock* const loopTop = block->GetBBNext(); bool isSpecialCallFinally = block->isBBCallAlwaysPairTail(); bool unmarkedLoopAlign = false; @@ -9614,7 +9614,7 @@ BasicBlock* dFindBlock(unsigned bbNum) BasicBlock* block = nullptr; dbBlock = nullptr; - for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext) + for (block = comp->fgFirstBB; block != nullptr; block = block->GetBBNext()) { if (block->bbNum == bbNum) { diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index c98b1331bb8311..e94b76b0a1f7fe 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -6482,7 +6482,7 @@ class Compiler // Returns "true" iff this is a "top entry" loop. bool lpIsTopEntry() const { - if (lpHead->bbNext == lpEntry) + if (lpHead->GetBBNext() == lpEntry) { assert(lpHead->bbFallsThrough()); assert(lpTop == lpEntry); diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 43d8e927c65f75..c37f20a8b528dd 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -633,7 +633,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) BasicBlock* finBeg = ehDsc->ebdHndBeg; - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) { if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { @@ -642,12 +642,12 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) assert(bcall->isBBCallAlwaysPair()); - RETURN_ON_ABORT(func(bcall->bbNext)); + RETURN_ON_ABORT(func(bcall->GetBBNext())); } RETURN_ON_ABORT(VisitEHSuccessors(comp, this, func)); - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) { if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { @@ -655,7 +655,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) } assert(bcall->isBBCallAlwaysPair()); - RETURN_ON_ABORT(VisitSuccessorEHSuccessors(comp, this, bcall->bbNext, func)); + RETURN_ON_ABORT(VisitSuccessorEHSuccessors(comp, this, bcall->GetBBNext(), func)); } break; @@ -767,7 +767,7 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func) BasicBlock* finBeg = ehDsc->ebdHndBeg; - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) { if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { @@ -776,7 +776,7 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func) assert(bcall->isBBCallAlwaysPair()); - RETURN_ON_ABORT(func(bcall->bbNext)); + RETURN_ON_ABORT(func(bcall->GetBBNext())); } break; @@ -3235,7 +3235,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) // Must do this after we update bbJumpKind of block. if (isCallAlwaysPair) { - BasicBlock* leaveBlk = block->bbNext; + BasicBlock* leaveBlk = block->GetBBNext(); noway_assert(leaveBlk->KindIs(BBJ_ALWAYS)); // leaveBlk is now unreachable, so scrub the pred lists. diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 9853f3f47b26e0..920dc3fed37067 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -212,12 +212,12 @@ BasicBlock* Compiler::fgNewBasicBlock(BBjumpKinds jumpKind) if (fgFirstBB) { - fgLastBB->setNext(block); + fgLastBB->SetBBNext(block); } else { - fgFirstBB = block; - block->bbPrev = nullptr; + fgFirstBB = block; + block->SetBBPrev(nullptr); } fgLastBB = block; @@ -694,7 +694,7 @@ BasicBlock* Compiler::fgLookupBB(unsigned addr) while (dsc->bbFlags & BBF_INTERNAL) { - dsc = dsc->bbNext; + dsc = dsc->GetBBNext(); mid++; // We skipped over too many, Set hi back to the original mid - 1 @@ -2793,7 +2793,7 @@ void Compiler::fgLinkBasicBlocks() break; } - if (!curBBdesc->bbNext) + if (!curBBdesc->GetBBNext()) { BADCODE("Fall thru the end of a method"); } @@ -2803,7 +2803,7 @@ void Compiler::fgLinkBasicBlocks() FALLTHROUGH; case BBJ_NONE: - fgAddRefPred(curBBdesc->bbNext, curBBdesc, oldEdge); + fgAddRefPred(curBBdesc->GetBBNext(), curBBdesc, oldEdge); break; case BBJ_EHFILTERRET: @@ -2839,7 +2839,7 @@ void Compiler::fgLinkBasicBlocks() /* Default case of CEE_SWITCH (next block), is at end of jumpTab[] */ - noway_assert(*(jumpPtr - 1) == curBBdesc->bbNext); + noway_assert(*(jumpPtr - 1) == curBBdesc->GetBBNext()); break; } @@ -3664,7 +3664,7 @@ void Compiler::fgFindBasicBlocks() hndBegBB->bbCatchTyp = BBCT_FILTER_HANDLER; // Mark all BBs that belong to the filter with the XTnum of the corresponding handler - for (block = filtBB; /**/; block = block->bbNext) + for (block = filtBB; /**/; block = block->GetBBNext()) { if (block == nullptr) { @@ -3685,7 +3685,7 @@ void Compiler::fgFindBasicBlocks() } } - if (!block->bbNext || block->bbNext != hndBegBB) + if (!block->GetBBNext() || block->GetBBNext() != hndBegBB) { BADCODE3("Filter does not immediately precede handler for filter", " at offset %04X", filtBB->bbCodeOffs); @@ -3753,10 +3753,10 @@ void Compiler::fgFindBasicBlocks() HBtab->ebdHandlerType = ToEHHandlerType(clause.Flags); HBtab->ebdTryBeg = tryBegBB; - HBtab->ebdTryLast = (tryEndBB == nullptr) ? fgLastBB : tryEndBB->bbPrev; + HBtab->ebdTryLast = (tryEndBB == nullptr) ? fgLastBB : tryEndBB->GetBBPrev(); HBtab->ebdHndBeg = hndBegBB; - HBtab->ebdHndLast = (hndEndBB == nullptr) ? fgLastBB : hndEndBB->bbPrev; + HBtab->ebdHndLast = (hndEndBB == nullptr) ? fgLastBB : hndEndBB->GetBBPrev(); // // Assert that all of our try/hnd blocks are setup correctly. @@ -3798,7 +3798,7 @@ void Compiler::fgFindBasicBlocks() BasicBlock* block; - for (block = hndBegBB; block && (block->bbCodeOffs < hndEndOff); block = block->bbNext) + for (block = hndBegBB; block && (block->bbCodeOffs < hndEndOff); block = block->GetBBNext()) { if (!block->hasHndIndex()) { @@ -3821,7 +3821,7 @@ void Compiler::fgFindBasicBlocks() /* Mark all blocks within the covered range of the try */ - for (block = tryBegBB; block && (block->bbCodeOffs < tryEndOff); block = block->bbNext) + for (block = tryBegBB; block && (block->bbCodeOffs < tryEndOff); block = block->GetBBNext()) { /* Mark this BB as belonging to a 'try' block */ @@ -4016,7 +4016,7 @@ void Compiler::fgFixEntryFlowForOSR() // fgEnsureFirstBBisScratch(); assert(fgFirstBB->KindIs(BBJ_NONE)); - fgRemoveRefPred(fgFirstBB->bbNext, fgFirstBB); + fgRemoveRefPred(fgFirstBB->GetBBNext(), fgFirstBB); fgFirstBB->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgFirstBB->bbJumpDest = fgOSREntryBB; FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB); @@ -4061,7 +4061,7 @@ void Compiler::fgCheckBasicBlockControlFlow() { case BBJ_NONE: // block flows into the next one (no jump) - fgControlFlowPermitted(blk, blk->bbNext); + fgControlFlowPermitted(blk, blk->GetBBNext()); break; @@ -4073,7 +4073,7 @@ void Compiler::fgCheckBasicBlockControlFlow() case BBJ_COND: // block conditionally jumps to the target - fgControlFlowPermitted(blk, blk->bbNext); + fgControlFlowPermitted(blk, blk->GetBBNext()); fgControlFlowPermitted(blk, blk->bbJumpDest); @@ -4855,7 +4855,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) assert(fgGetPredForBlock(succ, curr) != nullptr); BasicBlock* newBlock; - if (succ == curr->bbNext) + if (succ == curr->GetBBNext()) { // The successor is the fall-through path of a BBJ_COND, or // an immediately following block of a BBJ_SWITCH (which has @@ -4928,16 +4928,12 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) void Compiler::fgUnlinkBlock(BasicBlock* block) { - if (block->bbPrev) + if (block->GetBBPrev()) { - block->bbPrev->bbNext = block->bbNext; - if (block->bbNext) + block->GetBBPrev()->SetBBNext(block->GetBBNext()); + if (block == fgLastBB) { - block->bbNext->bbPrev = block->bbPrev; - } - else - { - fgLastBB = block->bbPrev; + fgLastBB = block->GetBBPrev(); } } else @@ -4946,8 +4942,8 @@ void Compiler::fgUnlinkBlock(BasicBlock* block) assert(block != fgLastBB); assert((fgFirstBBScratch == nullptr) || (fgFirstBBScratch == fgFirstBB)); - fgFirstBB = block->bbNext; - fgFirstBB->bbPrev = nullptr; + fgFirstBB = block->GetBBNext(); + fgFirstBB->SetBBPrev(nullptr); if (fgFirstBBScratch != nullptr) { @@ -4975,22 +4971,22 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) assert(bBeg != nullptr); assert(bEnd != nullptr); - BasicBlock* bPrev = bBeg->bbPrev; + BasicBlock* bPrev = bBeg->GetBBPrev(); assert(bPrev != nullptr); // Can't unlink a range starting with the first block - bPrev->setNext(bEnd->bbNext); + bPrev->SetBBNext(bEnd->GetBBNext()); /* If we removed the last block in the method then update fgLastBB */ if (fgLastBB == bEnd) { fgLastBB = bPrev; - noway_assert(fgLastBB->bbNext == nullptr); + noway_assert(fgLastBB->GetBBNext() == nullptr); } // If bEnd was the first Cold basic block update fgFirstColdBlock if (fgFirstColdBlock == bEnd) { - fgFirstColdBlock = bPrev->bbNext; + fgFirstColdBlock = bPrev->GetBBNext(); } #if defined(FEATURE_EH_FUNCLETS) @@ -4999,7 +4995,7 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) // can't cross the non-funclet/funclet region. And you can't unlink the first block // of the first funclet with this, either. (If that's necessary, it could be allowed // by updating fgFirstFuncletBB to bEnd->bbNext.) - for (BasicBlock* tempBB = bBeg; tempBB != bEnd->bbNext; tempBB = tempBB->bbNext) + for (BasicBlock* tempBB = bBeg; tempBB != bEnd->GetBBNext(); tempBB = tempBB->GetBBNext()) { assert(tempBB != fgFirstFuncletBB); } @@ -5018,7 +5014,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) PREFIX_ASSUME(block != nullptr); - BasicBlock* bPrev = block->bbPrev; + BasicBlock* bPrev = block->GetBBPrev(); JITDUMP("fgRemoveBlock " FMT_BB ", unreachable=%s\n", block->bbNum, dspBool(unreachable)); @@ -5029,7 +5025,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // the SwitchDescs might be removed. InvalidateUniqueSwitchSuccMap(); - noway_assert((block == fgFirstBB) || (bPrev && (bPrev->bbNext == block))); + noway_assert((block == fgFirstBB) || (bPrev && (bPrev->GetBBNext() == block))); noway_assert(!(block->bbFlags & BBF_DONT_REMOVE)); // Should never remove a genReturnBB, as we might have special hookups there. @@ -5050,7 +5046,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // If block was the fgFirstFuncletBB then set fgFirstFuncletBB to block->bbNext if (block == fgFirstFuncletBB) { - fgFirstFuncletBB = block->bbNext; + fgFirstFuncletBB = block->GetBBNext(); } #endif // FEATURE_EH_FUNCLETS @@ -5063,9 +5059,9 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) NO_WAY("No retless call finally blocks; need unwind target instead"); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } - else if (bPrev->KindIs(BBJ_ALWAYS) && bPrev->bbJumpDest == block->bbNext && + else if (bPrev->KindIs(BBJ_ALWAYS) && bPrev->bbJumpDest == block->GetBBNext() && !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && - (block->bbNext != fgFirstColdBlock)) + (block->GetBBNext() != fgFirstColdBlock)) { // previous block is a BBJ_ALWAYS to the next block: change to BBJ_NONE. // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), @@ -5077,7 +5073,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // If this is the first Cold basic block update fgFirstColdBlock if (block == fgFirstColdBlock) { - fgFirstColdBlock = block->bbNext; + fgFirstColdBlock = block->GetBBNext(); } /* Unlink this block from the bbNext chain */ @@ -5091,7 +5087,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) */ if (block->isBBCallAlwaysPair()) { - BasicBlock* leaveBlk = block->bbNext; + BasicBlock* leaveBlk = block->GetBBNext(); noway_assert(leaveBlk->KindIs(BBJ_ALWAYS)); leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; @@ -5160,7 +5156,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } else { - succBlock = block->bbNext; + succBlock = block->GetBBNext(); } bool skipUnmarkLoop = false; @@ -5175,14 +5171,14 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // If this is the first Cold basic block update fgFirstColdBlock if (block == fgFirstColdBlock) { - fgFirstColdBlock = block->bbNext; + fgFirstColdBlock = block->GetBBNext(); } #if defined(FEATURE_EH_FUNCLETS) // Update fgFirstFuncletBB if necessary if (block == fgFirstFuncletBB) { - fgFirstFuncletBB = block->bbNext; + fgFirstFuncletBB = block->GetBBNext(); } #endif // FEATURE_EH_FUNCLETS @@ -5273,7 +5269,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } /* Check if both side of the BBJ_COND now jump to the same block */ - if (predBlock->bbNext == succBlock) + if (predBlock->GetBBNext() == succBlock) { // Make sure we are replacing "block" with "succBlock" in predBlock->bbJumpDest. noway_assert(predBlock->bbJumpDest == block); @@ -5327,7 +5323,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // the next block. This is the safest fix. We should remove all this BBJ_CALLFINALLY/BBJ_ALWAYS // pairing. - if ((bPrev->bbJumpDest == bPrev->bbNext) && + if ((bPrev->bbJumpDest == bPrev->GetBBNext()) && !fgInDifferentRegions(bPrev, bPrev->bbJumpDest)) // We don't remove a branch from Hot -> Cold { if ((bPrev == fgFirstBB) || !bPrev->isBBCallAlwaysPairTail()) @@ -5340,7 +5336,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) case BBJ_COND: /* Check for branch to next block */ - if (bPrev->bbJumpDest == bPrev->bbNext) + if (bPrev->bbJumpDest == bPrev->GetBBNext()) { fgRemoveConditionalJump(bPrev); } @@ -5376,7 +5372,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) { /* If bSrc falls through to a block that is not bDst, we will insert a jump to bDst */ - if (bSrc->bbFallsThrough() && (bSrc->bbNext != bDst)) + if (bSrc->bbFallsThrough() && (bSrc->GetBBNext() != bDst)) { switch (bSrc->GetBBJumpKind()) { @@ -5460,12 +5456,12 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) // then change it to a BBJ_NONE block // if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && - (bSrc->bbJumpDest == bSrc->bbNext)) + (bSrc->bbJumpDest == bSrc->GetBBNext())) { bSrc->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); JITDUMP("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB " into a BBJ_NONE block\n", - bSrc->bbNum, bSrc->bbNext->bbNum); + bSrc->bbNum, bSrc->GetBBNext()->bbNum); } } } @@ -5518,7 +5514,7 @@ bool Compiler::fgRenumberBlocks() block->bbNum = num; } - if (block->bbNext == nullptr) + if (block->GetBBNext() == nullptr) { fgLastBB = block; fgBBcount = num; @@ -5594,7 +5590,7 @@ bool Compiler::fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc /* = NULL * while (true) { - bTemp = bTemp->bbNext; + bTemp = bTemp->GetBBNext(); if (bTemp == nullptr) { @@ -5641,24 +5637,20 @@ void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBloc { printf("Relocated block%s [" FMT_BB ".." FMT_BB "] inserted after " FMT_BB "%s\n", (bStart == bEnd) ? "" : "s", bStart->bbNum, bEnd->bbNum, insertAfterBlk->bbNum, - (insertAfterBlk->bbNext == nullptr) ? " at the end of method" : ""); + (insertAfterBlk->GetBBNext() == nullptr) ? " at the end of method" : ""); } #endif // DEBUG /* relink [bStart .. bEnd] into the flow graph */ - bEnd->bbNext = insertAfterBlk->bbNext; - if (insertAfterBlk->bbNext) - { - insertAfterBlk->bbNext->bbPrev = bEnd; - } - insertAfterBlk->setNext(bStart); + bEnd->SetBBNext(insertAfterBlk->GetBBNext()); + insertAfterBlk->SetBBNext(bStart); /* If insertAfterBlk was fgLastBB then update fgLastBB */ if (insertAfterBlk == fgLastBB) { fgLastBB = bEnd; - noway_assert(fgLastBB->bbNext == nullptr); + noway_assert(fgLastBB->GetBBNext() == nullptr); } } @@ -5731,7 +5723,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r #if !defined(FEATURE_EH_FUNCLETS) // In the funclets case, we still need to set some information on the handler blocks - if (bLast->bbNext == NULL) + if (bLast->GetBBNext() == NULL) { INDEBUG(reason = "region is already at the end of the method";) goto FAILURE; @@ -5756,7 +5748,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r noway_assert(inTheRange == false); inTheRange = true; } - else if (block == bLast->bbNext) + else if (block == bLast->GetBBNext()) { noway_assert(inTheRange == true); inTheRange = false; @@ -5782,12 +5774,12 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r break; } - block = block->bbNext; + block = block->GetBBNext(); } // Ensure that bStart .. bLast defined a valid range noway_assert((validRange == true) && (inTheRange == false)); - bPrev = bStart->bbPrev; + bPrev = bStart->GetBBPrev(); noway_assert(bPrev != nullptr); // Can't move a range that includes the first block of the function. JITDUMP("Relocating %s range " FMT_BB ".." FMT_BB " (EH#%u) to end of BBlist\n", @@ -5824,7 +5816,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r #endif // FEATURE_EH_FUNCLETS BasicBlock* bNext; - bNext = bLast->bbNext; + bNext = bLast->GetBBNext(); /* Temporarily unlink [bStart .. bLast] from the flow graph */ fgUnlinkRange(bStart, bLast); @@ -5873,7 +5865,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r { // If we moved a set of blocks that were at the end of // a different try region then we may need to update ebdTryLast - for (block = HBtab->ebdTryBeg; block != nullptr; block = block->bbNext) + for (block = HBtab->ebdTryBeg; block != nullptr; block = block->GetBBNext()) { if (block == bPrev) { @@ -5882,7 +5874,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r fgSetTryEnd(HBtab, bPrev); break; } - else if (block == HBtab->ebdTryLast->bbNext) + else if (block == HBtab->ebdTryLast->GetBBNext()) { // bPrev does not come after the TryBeg, thus we are larger, and // it is moving with us. @@ -5894,14 +5886,14 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r { // If we moved a set of blocks that were at the end of // a different handler region then we must update ebdHndLast - for (block = HBtab->ebdHndBeg; block != nullptr; block = block->bbNext) + for (block = HBtab->ebdHndBeg; block != nullptr; block = block->GetBBNext()) { if (block == bPrev) { fgSetHndEnd(HBtab, bPrev); break; } - else if (block == HBtab->ebdHndLast->bbNext) + else if (block == HBtab->ebdHndLast->GetBBNext()) { // bPrev does not come after the HndBeg break; @@ -5921,7 +5913,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r else { assert(fgFirstFuncletBB != - insertAfterBlk->bbNext); // We insert at the end, not at the beginning, of the funclet region. + insertAfterBlk->GetBBNext()); // We insert at the end, not at the beginning, of the funclet region. } // These asserts assume we aren't moving try regions (which we might need to do). Only @@ -5955,14 +5947,14 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r { // If we moved a set of blocks that were at the end of // a different try region then we may need to update ebdTryLast - for (block = HBtab->ebdTryBeg; block != NULL; block = block->bbNext) + for (block = HBtab->ebdTryBeg; block != NULL; block = block->GetBBNext()) { if (block == bPrev) { fgSetTryEnd(HBtab, bPrev); break; } - else if (block == HBtab->ebdTryLast->bbNext) + else if (block == HBtab->ebdTryLast->GetBBNext()) { // bPrev does not come after the TryBeg break; @@ -5973,14 +5965,14 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r { // If we moved a set of blocks that were at the end of // a different handler region then we must update ebdHndLast - for (block = HBtab->ebdHndBeg; block != NULL; block = block->bbNext) + for (block = HBtab->ebdHndBeg; block != NULL; block = block->GetBBNext()) { if (block == bPrev) { fgSetHndEnd(HBtab, bPrev); break; } - else if (block == HBtab->ebdHndLast->bbNext) + else if (block == HBtab->ebdHndLast->GetBBNext()) { // bPrev does not come after the HndBeg break; @@ -6180,16 +6172,16 @@ BasicBlock* Compiler::fgNewBBFromTreeAfter( */ void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk) { - if (insertBeforeBlk->bbPrev) + if (insertBeforeBlk->GetBBPrev()) { - fgInsertBBafter(insertBeforeBlk->bbPrev, newBlk); + fgInsertBBafter(insertBeforeBlk->GetBBPrev(), newBlk); } else { - newBlk->setNext(fgFirstBB); + newBlk->SetBBNext(fgFirstBB); - fgFirstBB = newBlk; - newBlk->bbPrev = nullptr; + fgFirstBB = newBlk; + newBlk->SetBBPrev(nullptr); } #if defined(FEATURE_EH_FUNCLETS) @@ -6212,20 +6204,13 @@ void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk) */ void Compiler::fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk) { - newBlk->bbNext = insertAfterBlk->bbNext; - - if (insertAfterBlk->bbNext) - { - insertAfterBlk->bbNext->bbPrev = newBlk; - } - - insertAfterBlk->bbNext = newBlk; - newBlk->bbPrev = insertAfterBlk; + newBlk->SetBBNext(insertAfterBlk->GetBBNext()); + insertAfterBlk->SetBBNext(newBlk); if (fgLastBB == insertAfterBlk) { fgLastBB = newBlk; - assert(fgLastBB->bbNext == nullptr); + assert(fgLastBB->GetBBNext() == nullptr); } } @@ -6256,7 +6241,7 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) } // Currently bNext is the fall through for bCur - BasicBlock* bNext = bCur->bbNext; + BasicBlock* bNext = bCur->GetBBNext(); noway_assert(bNext != nullptr); // We will set result to true if bAlt is a better fall through than bCur @@ -6382,7 +6367,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, // Assert that startBlk precedes endBlk in the block list. // We don't want to use bbNum to assert this condition, as we cannot depend on the block numbers being // sequential at all times. - for (BasicBlock* b = startBlk; b != endBlk; b = b->bbNext) + for (BasicBlock* b = startBlk; b != endBlk; b = b->GetBBNext()) { assert(b != nullptr); // We reached the end of the block list, but never found endBlk. } @@ -6411,7 +6396,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, if (nearBlk != nullptr) { // Does the nearBlk precede the startBlk? - for (blk = nearBlk; blk != nullptr; blk = blk->bbNext) + for (blk = nearBlk; blk != nullptr; blk = blk->GetBBNext()) { if (blk == startBlk) { @@ -6425,7 +6410,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, } } - for (blk = startBlk; blk != endBlk; blk = blk->bbNext) + for (blk = startBlk; blk != endBlk; blk = blk->GetBBNext()) { // The only way (blk == nullptr) could be true is if the caller passed an endBlk that preceded startBlk in the // block list, or if endBlk isn't in the block list at all. In DEBUG, we'll instead hit the similar @@ -6464,7 +6449,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, // and be in the correct EH region. This is must be guaranteed by the caller (as it is by // fgNewBBinRegion(), which passes the search range as an exact EH region block range). // Because of this assumption, we only check the EH information for blocks before the last block. - if (blk->bbNext != endBlk) + if (blk->GetBBNext() != endBlk) { // We are in the middle of the search range. We can't insert the new block in // an inner try or handler region. We can, however, set the insertion @@ -6598,10 +6583,10 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, // inserted block is marked as the entry block for the filter. Becuase this sort of split can be complex // (especially given that it must ensure that the liveness of the exception object is properly tracked), // we avoid this situation by never generating single-block filters on x86 (see impPushCatchArgOnStack). - if (insertingIntoFilter && (bestBlk == endBlk->bbPrev)) + if (insertingIntoFilter && (bestBlk == endBlk->GetBBPrev())) { assert(bestBlk != startBlk); - bestBlk = bestBlk->bbPrev; + bestBlk = bestBlk->GetBBPrev(); } #endif // defined(JIT32_GCENCODER) @@ -6756,7 +6741,7 @@ BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind, // We will put the newBB in the try region. EHblkDsc* ehDsc = ehGetDsc(tryIndex - 1); startBlk = ehDsc->ebdTryBeg; - endBlk = ehDsc->ebdTryLast->bbNext; + endBlk = ehDsc->ebdTryLast->GetBBNext(); regionIndex = tryIndex; } else if (putInFilter) @@ -6772,7 +6757,7 @@ BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind, // We will put the newBB in the handler region. EHblkDsc* ehDsc = ehGetDsc(hndIndex - 1); startBlk = ehDsc->ebdHndBeg; - endBlk = ehDsc->ebdHndLast->bbNext; + endBlk = ehDsc->ebdHndLast->GetBBNext(); regionIndex = hndIndex; } @@ -6872,7 +6857,7 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, bool putInTryRegion) { /* Insert the new block */ - BasicBlock* afterBlkNext = afterBlk->bbNext; + BasicBlock* afterBlkNext = afterBlk->GetBBNext(); (void)afterBlkNext; // prevent "unused variable" error from GCC BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false); @@ -6905,7 +6890,7 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, // Is afterBlk at the end of a try region? if (HBtab->ebdTryLast == afterBlk) { - noway_assert(afterBlkNext == newBlk->bbNext); + noway_assert(afterBlkNext == newBlk->GetBBNext()); bool extendTryRegion = false; if (newBlk->hasTryIndex()) @@ -6944,7 +6929,7 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, // Is afterBlk at the end of a handler region? if (HBtab->ebdHndLast == afterBlk) { - noway_assert(afterBlkNext == newBlk->bbNext); + noway_assert(afterBlkNext == newBlk->GetBBNext()); // Does newBlk extend this handler region? bool extendHndRegion = false; @@ -6982,7 +6967,7 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, } /* If afterBlk falls through, we insert a jump around newBlk */ - fgConnectFallThrough(afterBlk, newBlk->bbNext); + fgConnectFallThrough(afterBlk, newBlk->GetBBNext()); // If the loop table is valid, add this block to the appropriate loop. // Note we don't verify (via flow) that this block actually belongs @@ -6991,8 +6976,8 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, // if (optLoopTableValid) { - BasicBlock* const bbPrev = newBlk->bbPrev; - BasicBlock* const bbNext = newBlk->bbNext; + BasicBlock* const bbPrev = newBlk->GetBBPrev(); + BasicBlock* const bbNext = newBlk->GetBBNext(); if ((bbPrev != nullptr) && (bbNext != nullptr)) { diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index 318e241d35ae09..16378e567ce488 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -82,7 +82,7 @@ void Compiler::fgDebugCheckUpdate() BasicBlock* prev; BasicBlock* block; - for (prev = nullptr, block = fgFirstBB; block != nullptr; prev = block, block = block->bbNext) + for (prev = nullptr, block = fgFirstBB; block != nullptr; prev = block, block = block->GetBBNext()) { /* no unreachable blocks */ @@ -168,7 +168,7 @@ void Compiler::fgDebugCheckUpdate() // We are allowed to have a branch from a hot 'block' to a cold 'bbNext' // - if ((block->bbNext != nullptr) && fgInDifferentRegions(block, block->bbNext)) + if ((block->GetBBNext() != nullptr) && fgInDifferentRegions(block, block->GetBBNext())) { doAssertOnJumpToNextBlock = false; } @@ -176,7 +176,7 @@ void Compiler::fgDebugCheckUpdate() if (doAssertOnJumpToNextBlock) { - if (block->bbJumpDest == block->bbNext) + if (block->bbJumpDest == block->GetBBNext()) { noway_assert(!"Unnecessary jump to the next block!"); } @@ -199,7 +199,7 @@ void Compiler::fgDebugCheckUpdate() /* no un-compacted blocks */ - if (fgCanCompactBlocks(block, block->bbNext)) + if (fgCanCompactBlocks(block, block->GetBBNext())) { noway_assert(!"Found un-compacted blocks!"); } @@ -889,7 +889,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) "ALWAYS", "LEAVE", "CALLFINALLY", "COND", "SWITCH"}; BasicBlock* block; - for (block = fgFirstBB, blockOrdinal = 1; block != nullptr; block = block->bbNext, blockOrdinal++) + for (block = fgFirstBB, blockOrdinal = 1; block != nullptr; block = block->GetBBNext(), blockOrdinal++) { if (createDotFile) { @@ -1091,7 +1091,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { unsigned edgeNum = 1; BasicBlock* bTarget; - for (bTarget = fgFirstBB; bTarget != nullptr; bTarget = bTarget->bbNext) + for (bTarget = fgFirstBB; bTarget != nullptr; bTarget = bTarget->GetBBNext()) { double targetWeightDivisor; if (bTarget->bbWeight == BB_ZERO_WEIGHT) @@ -1214,10 +1214,10 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { // Invisible edge for bbNext chain // - if (bSource->bbNext != nullptr) + if (bSource->GetBBNext() != nullptr) { fprintf(fgxFile, " " FMT_BB " -> " FMT_BB " [style=\"invis\", weight=25];\n", bSource->bbNum, - bSource->bbNext->bbNum); + bSource->GetBBNext()->bbNum); } } @@ -1641,7 +1641,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) bool needIndent = true; BasicBlock* bbCur = rgn->m_bbStart; - BasicBlock* bbEnd = rgn->m_bbEnd->bbNext; + BasicBlock* bbEnd = rgn->m_bbEnd->GetBBNext(); Region* child = rgn->m_rgnChild; BasicBlock* childCurBB = (child == nullptr) ? nullptr : child->m_bbStart; @@ -1660,7 +1660,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { fprintf(file, "%*s" FMT_BB ";", needIndent ? indent : 0, "", bbCur->bbNum); needIndent = false; - bbCur = bbCur->bbNext; + bbCur = bbCur->GetBBNext(); } if (bbCur == bbEnd) @@ -1684,8 +1684,8 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) childCount++; - bbCur = child->m_bbEnd->bbNext; // Next, output blocks after this child. - child = child->m_rgnNext; // Move to the next child, if any. + bbCur = child->m_bbEnd->GetBBNext(); // Next, output blocks after this child. + child = child->m_rgnNext; // Move to the next child, if any. childCurBB = (child == nullptr) ? nullptr : child->m_bbStart; } } @@ -1745,7 +1745,8 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) if (ehDsc->HasFilter()) { sprintf_s(name, sizeof(name), "EH#%u filter", XTnum); - rgnGraph.Insert(name, RegionGraph::RegionType::EH, ehDsc->ebdFilter, ehDsc->ebdHndBeg->bbPrev); + rgnGraph.Insert(name, RegionGraph::RegionType::EH, ehDsc->ebdFilter, + ehDsc->ebdHndBeg->GetBBPrev()); } } } @@ -2200,7 +2201,7 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 * /* brace matching editor workaround to compensate for the following line: { */ printf("} "); } - if (HBtab->HasFilter() && block->bbNext == HBtab->ebdHndBeg) + if (HBtab->HasFilter() && block->GetBBNext() == HBtab->ebdHndBeg) { cnt += 2; /* brace matching editor workaround to compensate for the following line: { */ @@ -2256,7 +2257,7 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, int ibcColWidth = 0; - for (BasicBlock* block = firstBlock; block != nullptr; block = block->bbNext) + for (BasicBlock* block = firstBlock; block != nullptr; block = block->GetBBNext()) { if (block->hasProfileWeight()) { @@ -2340,9 +2341,9 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, for (BasicBlock* block : *fgBBOrder) { // First, do some checking on the bbPrev links - if (block->bbPrev) + if (block->GetBBPrev()) { - if (block->bbPrev->bbNext != block) + if (block->GetBBPrev()->GetBBNext() != block) { printf("bad prev link\n"); } @@ -2450,7 +2451,7 @@ void Compiler::fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock) { // Note that typically we have already called fgDispBasicBlocks() // so we don't need to print the preds and succs again here. - for (BasicBlock* block = firstBlock; block != nullptr; block = block->bbNext) + for (BasicBlock* block = firstBlock; block != nullptr; block = block->GetBBNext()) { fgDumpBlock(block); @@ -2605,7 +2606,7 @@ bool BBPredsChecker::CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHb // is marked as "returning" to the BBJ_ALWAYS block following the BBJ_CALLFINALLY // block that does a local call to the finally. This BBJ_ALWAYS is within // the try region protected by the finally (for x86, ARM), but that's ok. - BasicBlock* prevBlock = block->bbPrev; + BasicBlock* prevBlock = block->GetBBPrev(); if (prevBlock->KindIs(BBJ_CALLFINALLY) && block->KindIs(BBJ_ALWAYS) && blockPred->KindIs(BBJ_EHFINALLYRET)) { return true; @@ -2662,11 +2663,11 @@ bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block) switch (blockPred->GetBBJumpKind()) { case BBJ_COND: - assert(blockPred->bbNext == block || blockPred->bbJumpDest == block); + assert(blockPred->GetBBNext() == block || blockPred->bbJumpDest == block); return true; case BBJ_NONE: - assert(blockPred->bbNext == block); + assert(blockPred->GetBBNext() == block); return true; case BBJ_CALLFINALLY: @@ -2731,14 +2732,14 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) BasicBlock* endBlk; comp->ehGetCallFinallyBlockRange(hndIndex, &begBlk, &endBlk); - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) { if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; } - if (block == bcall->bbNext) + if (block == bcall->GetBBNext()) { return true; } @@ -2760,7 +2761,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) continue; } - if (block != bcall->bbNext) + if (block != bcall->GetBBNext()) { continue; } @@ -2789,7 +2790,7 @@ void Compiler::fgDebugCheckBBNumIncreasing() { for (BasicBlock* const block : Blocks()) { - assert(block->bbNext == nullptr || (block->bbNum < block->bbNext->bbNum)); + assert(block->GetBBNext() == nullptr || (block->bbNum < block->GetBBNext()->bbNum)); } } @@ -2865,7 +2866,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef if (checkBBNum) { // Check that bbNum is sequential - assert(block->bbNext == nullptr || (block->bbNum + 1 == block->bbNext->bbNum)); + assert(block->GetBBNext() == nullptr || (block->bbNum + 1 == block->GetBBNext()->bbNum)); } // If the block is a BBJ_COND, a BBJ_SWITCH or a @@ -3703,22 +3704,22 @@ void Compiler::fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees) // ensure that bbNext and bbPrev are consistent void Compiler::fgDebugCheckBlockLinks() { - assert(fgFirstBB->bbPrev == nullptr); + assert(fgFirstBB->GetBBPrev() == nullptr); for (BasicBlock* const block : Blocks()) { - if (block->bbNext) + if (block->GetBBNext()) { - assert(block->bbNext->bbPrev == block); + assert(block->GetBBNext()->GetBBPrev() == block); } else { assert(block == fgLastBB); } - if (block->bbPrev) + if (block->GetBBPrev()) { - assert(block->bbPrev->bbNext == block); + assert(block->GetBBPrev()->GetBBNext() == block); } else { @@ -4798,7 +4799,7 @@ void Compiler::fgDebugCheckLoopTable() else { assert(h->KindIs(BBJ_NONE)); - assert(h->bbNext == e); + assert(h->GetBBNext() == e); assert(loop.lpTop == e); assert(loop.lpIsTopEntry()); } diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index e5fbe43e1590f3..a71770ab613184 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -140,7 +140,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() while (currentBlock != endCallFinallyRangeBlock) { - BasicBlock* nextBlock = currentBlock->bbNext; + BasicBlock* nextBlock = currentBlock->GetBBNext(); if (currentBlock->KindIs(BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) { @@ -151,7 +151,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() // the finally is empty. noway_assert(currentBlock->isBBCallAlwaysPair()); - BasicBlock* const leaveBlock = currentBlock->bbNext; + BasicBlock* const leaveBlock = currentBlock->GetBBNext(); BasicBlock* const postTryFinallyBlock = leaveBlock->bbJumpDest; JITDUMP("Modifying callfinally " FMT_BB " leave " FMT_BB " finally " FMT_BB " continuation " FMT_BB @@ -172,7 +172,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() // Delete the leave block, which should be marked as // keep always and have the sole finally block as a pred. assert((leaveBlock->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0); - nextBlock = leaveBlock->bbNext; + nextBlock = leaveBlock->GetBBNext(); fgRemoveRefPred(leaveBlock, firstBlock); leaveBlock->bbFlags &= ~BBF_KEEP_BBJ_ALWAYS; fgRemoveBlock(leaveBlock, /* unreachable */ true); @@ -398,7 +398,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() if (firstTryBlock != lastTryBlock) { JITDUMP("EH#%u first try block " FMT_BB " not only block in try; skipping.\n", XTnum, - firstTryBlock->bbNext->bbNum); + firstTryBlock->GetBBNext()->bbNum); XTnum++; continue; } @@ -417,9 +417,10 @@ PhaseStatus Compiler::fgRemoveEmptyTry() BasicBlock* const callFinally = firstTryBlock; // Try must be a callalways pair of blocks. - if (firstTryBlock->bbNext != lastTryBlock) + if (firstTryBlock->GetBBNext() != lastTryBlock) { - JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n", XTnum, firstTryBlock->bbNext->bbNum); + JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n", XTnum, + firstTryBlock->GetBBNext()->bbNum); XTnum++; continue; } @@ -435,7 +436,8 @@ PhaseStatus Compiler::fgRemoveEmptyTry() bool verifiedSingleCallfinally = true; ehGetCallFinallyBlockRange(XTnum, &firstCallFinallyRangeBlock, &endCallFinallyRangeBlock); - for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; block = block->bbNext) + for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; + block = block->GetBBNext()) { if (block->KindIs(BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) { @@ -448,7 +450,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() break; } - block = block->bbNext; + block = block->GetBBNext(); } } @@ -466,7 +468,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() callFinally->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Identify the leave block and the continuation - BasicBlock* const leave = callFinally->bbNext; + BasicBlock* const leave = callFinally->GetBBNext(); BasicBlock* const continuation = leave->bbJumpDest; // (2) Cleanup the leave so it can be deleted by subsequent opts @@ -729,14 +731,14 @@ PhaseStatus Compiler::fgCloneFinally() BasicBlock* const lastBlock = HBtab->ebdHndLast; assert(firstBlock != nullptr); assert(lastBlock != nullptr); - BasicBlock* nextBlock = lastBlock->bbNext; + BasicBlock* nextBlock = lastBlock->GetBBNext(); unsigned regionBBCount = 0; unsigned regionStmtCount = 0; bool hasFinallyRet = false; bool isAllRare = true; bool hasSwitch = false; - for (const BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext) + for (const BasicBlock* block = firstBlock; block != nextBlock; block = block->GetBBNext()) { if (block->KindIs(BBJ_SWITCH)) { @@ -804,7 +806,7 @@ PhaseStatus Compiler::fgCloneFinally() BasicBlock* const lastTryBlock = HBtab->ebdTryLast; assert(firstTryBlock->getTryIndex() == XTnum); assert(bbInTryRegions(XTnum, lastTryBlock)); - BasicBlock* const beforeTryBlock = firstTryBlock->bbPrev; + BasicBlock* const beforeTryBlock = firstTryBlock->GetBBPrev(); BasicBlock* normalCallFinallyBlock = nullptr; BasicBlock* normalCallFinallyReturn = nullptr; @@ -813,7 +815,7 @@ PhaseStatus Compiler::fgCloneFinally() const bool usingProfileWeights = fgIsUsingProfileWeights(); weight_t currentWeight = BB_ZERO_WEIGHT; - for (BasicBlock* block = lastTryBlock; block != beforeTryBlock; block = block->bbPrev) + for (BasicBlock* block = lastTryBlock; block != beforeTryBlock; block = block->GetBBPrev()) { #if FEATURE_EH_CALLFINALLY_THUNKS // Blocks that transfer control to callfinallies are usually @@ -823,7 +825,7 @@ PhaseStatus Compiler::fgCloneFinally() if (block->KindIs(BBJ_NONE) && (block == lastTryBlock)) { - jumpDest = block->bbNext; + jumpDest = block->GetBBNext(); } else if (block->KindIs(BBJ_ALWAYS)) { @@ -853,7 +855,7 @@ PhaseStatus Compiler::fgCloneFinally() // Found a block that invokes the finally. // - BasicBlock* const finallyReturnBlock = jumpDest->bbNext; + BasicBlock* const finallyReturnBlock = jumpDest->GetBBNext(); BasicBlock* const postTryFinallyBlock = finallyReturnBlock->bbJumpDest; bool isUpdate = false; @@ -967,7 +969,7 @@ PhaseStatus Compiler::fgCloneFinally() BasicBlock* firstCallFinallyBlock = nullptr; for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; - block = block->bbNext) + block = block->GetBBNext()) { if (block->isBBCallAlwaysPair()) { @@ -987,7 +989,7 @@ PhaseStatus Compiler::fgCloneFinally() // but only if it's targeted by the last block in the try range. if (firstCallFinallyBlock != normalCallFinallyBlock) { - BasicBlock* const placeToMoveAfter = firstCallFinallyBlock->bbPrev; + BasicBlock* const placeToMoveAfter = firstCallFinallyBlock->GetBBPrev(); if (placeToMoveAfter->KindIs(BBJ_ALWAYS) && (placeToMoveAfter->bbJumpDest == normalCallFinallyBlock)) { @@ -995,7 +997,7 @@ PhaseStatus Compiler::fgCloneFinally() normalCallFinallyBlock->bbNum, firstCallFinallyBlock->bbNum); BasicBlock* const firstToMove = normalCallFinallyBlock; - BasicBlock* const lastToMove = normalCallFinallyBlock->bbNext; + BasicBlock* const lastToMove = normalCallFinallyBlock->GetBBNext(); fgUnlinkRange(firstToMove, lastToMove); fgMoveBlocksAfter(firstToMove, lastToMove, placeToMoveAfter); @@ -1006,7 +1008,7 @@ PhaseStatus Compiler::fgCloneFinally() fgVerifyHandlerTab(); #endif // DEBUG - assert(nextBlock == lastBlock->bbNext); + assert(nextBlock == lastBlock->GetBBNext()); // Update where the callfinally range begins, since we might // have altered this with callfinally rearrangement, and/or @@ -1043,7 +1045,7 @@ PhaseStatus Compiler::fgCloneFinally() unsigned cloneBBCount = 0; weight_t const originalWeight = firstBlock->hasProfileWeight() ? firstBlock->bbWeight : BB_ZERO_WEIGHT; - for (BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext) + for (BasicBlock* block = firstBlock; block != nextBlock; block = block->GetBBNext()) { BasicBlock* newBlock; @@ -1062,9 +1064,9 @@ PhaseStatus Compiler::fgCloneFinally() // If the clone ends up just after the finally, adjust // the stopping point for finally traversal. - if (newBlock->bbNext == nextBlock) + if (newBlock->GetBBNext() == nextBlock) { - assert(newBlock->bbPrev == lastBlock); + assert(newBlock->GetBBPrev() == lastBlock); nextBlock = newBlock; } } @@ -1127,7 +1129,7 @@ PhaseStatus Compiler::fgCloneFinally() // Redirect any branches within the newly-cloned // finally, and any finally returns to jump to the return // point. - for (BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext) + for (BasicBlock* block = firstBlock; block != nextBlock; block = block->GetBBNext()) { BasicBlock* newBlock = blockMap[block]; @@ -1159,13 +1161,13 @@ PhaseStatus Compiler::fgCloneFinally() while (currentBlock != endCallFinallyRangeBlock) { - BasicBlock* nextBlockToScan = currentBlock->bbNext; + BasicBlock* nextBlockToScan = currentBlock->GetBBNext(); if (currentBlock->isBBCallAlwaysPair()) { if (currentBlock->bbJumpDest == firstBlock) { - BasicBlock* const leaveBlock = currentBlock->bbNext; + BasicBlock* const leaveBlock = currentBlock->GetBBNext(); BasicBlock* const postTryFinallyBlock = leaveBlock->bbJumpDest; // Note we must retarget all callfinallies that have this @@ -1189,7 +1191,7 @@ PhaseStatus Compiler::fgCloneFinally() // Delete the leave block, which should be marked as // keep always. assert((leaveBlock->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0); - nextBlock = leaveBlock->bbNext; + nextBlock = leaveBlock->GetBBNext(); // All preds should be BBJ_EHFINALLYRETs from the finally. for (BasicBlock* const leavePred : leaveBlock->PredBlocks()) @@ -1237,8 +1239,8 @@ PhaseStatus Compiler::fgCloneFinally() // Change all BBJ_EHFINALLYRET to BBJ_EHFAULTRET in the now-fault region. BasicBlock* const hndBegIter = HBtab->ebdHndBeg; - BasicBlock* const hndEndIter = HBtab->ebdHndLast->bbNext; - for (BasicBlock* block = hndBegIter; block != hndEndIter; block = block->bbNext) + BasicBlock* const hndEndIter = HBtab->ebdHndLast->GetBBNext(); + for (BasicBlock* block = hndBegIter; block != hndEndIter; block = block->GetBBNext()) { if (block->KindIs(BBJ_EHFINALLYRET)) { @@ -1469,7 +1471,7 @@ void Compiler::fgDebugCheckTryFinallyExits() { if (succBlock->isEmpty()) { - BasicBlock* const succSuccBlock = succBlock->bbNext; + BasicBlock* const succSuccBlock = succBlock->GetBBNext(); // case (d) if (succSuccBlock->bbFlags & BBF_CLONED_FINALLY_BEGIN) @@ -1620,7 +1622,7 @@ void Compiler::fgAddFinallyTargetFlags() { if (block->isBBCallAlwaysPair()) { - BasicBlock* const leave = block->bbNext; + BasicBlock* const leave = block->GetBBNext(); BasicBlock* const continuation = leave->bbJumpDest; if ((continuation->bbFlags & BBF_FINALLY_TARGET) == 0) @@ -1789,7 +1791,7 @@ PhaseStatus Compiler::fgMergeFinallyChains() BasicBlock* const beginHandlerBlock = HBtab->ebdHndBeg; for (BasicBlock* currentBlock = firstCallFinallyRangeBlock; currentBlock != endCallFinallyRangeBlock; - currentBlock = currentBlock->bbNext) + currentBlock = currentBlock->GetBBNext()) { // Ignore "retless" callfinallys (where the finally doesn't return). if (currentBlock->isBBCallAlwaysPair() && (currentBlock->bbJumpDest == beginHandlerBlock)) @@ -1803,7 +1805,7 @@ PhaseStatus Compiler::fgMergeFinallyChains() callFinallyCount++; // Locate the continuation - BasicBlock* const leaveBlock = currentBlock->bbNext; + BasicBlock* const leaveBlock = currentBlock->GetBBNext(); BasicBlock* const continuationBlock = leaveBlock->bbJumpDest; // If this is the first time we've seen this @@ -1836,7 +1838,7 @@ PhaseStatus Compiler::fgMergeFinallyChains() // sure they all jump to the appropriate canonical // callfinally. for (BasicBlock* currentBlock = firstCallFinallyRangeBlock; currentBlock != endCallFinallyRangeBlock; - currentBlock = currentBlock->bbNext) + currentBlock = currentBlock->GetBBNext()) { bool merged = fgRetargetBranchesToCanonicalCallFinally(currentBlock, beginHandlerBlock, continuationMap); didMerge = didMerge || merged; @@ -1921,7 +1923,7 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, // Ok, this is a callfinally that invokes the right handler. // Get its continuation. - BasicBlock* const leaveBlock = callFinally->bbNext; + BasicBlock* const leaveBlock = callFinally->GetBBNext(); BasicBlock* const continuationBlock = leaveBlock->bbJumpDest; // Find the canonical callfinally for that continuation. @@ -1956,7 +1958,7 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, canonicalCallFinally->setBBProfileWeight(newCanonicalWeight); - BasicBlock* const canonicalLeaveBlock = canonicalCallFinally->bbNext; + BasicBlock* const canonicalLeaveBlock = canonicalCallFinally->GetBBNext(); weight_t const canonicalLeaveWeight = canonicalLeaveBlock->hasProfileWeight() ? canonicalLeaveBlock->bbWeight : BB_ZERO_WEIGHT; @@ -2099,7 +2101,7 @@ PhaseStatus Compiler::fgTailMergeThrows() // Walk blocks from last to first so that any branches we // introduce to the canonical blocks end up lexically forward // and there is less jumbled flow to sort out later. - for (BasicBlock* block = fgLastBB; block != nullptr; block = block->bbPrev) + for (BasicBlock* block = fgLastBB; block != nullptr; block = block->GetBBPrev()) { // Workaround: don't consider try entry blocks as candidates // for merging; if the canonical throw is later in the same try, @@ -2213,7 +2215,7 @@ PhaseStatus Compiler::fgTailMergeThrows() case BBJ_COND: { // Flow to non canonical block could be via fall through or jump or both. - if (predBlock->bbNext == nonCanonicalBlock) + if (predBlock->GetBBNext() == nonCanonicalBlock) { fgTailMergeThrowsFallThroughHelper(predBlock, nonCanonicalBlock, canonicalBlock, predEdge); } @@ -2289,7 +2291,7 @@ void Compiler::fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, BasicBlock* canonicalBlock, FlowEdge* predEdge) { - assert(predBlock->bbNext == nonCanonicalBlock); + assert(predBlock->GetBBNext() == nonCanonicalBlock); BasicBlock* const newBlock = fgNewBBafter(BBJ_ALWAYS, predBlock, true); @@ -2300,7 +2302,6 @@ void Compiler::fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, fgRemoveRefPred(nonCanonicalBlock, predBlock); // Wire up the new flow - predBlock->bbNext = newBlock; fgAddRefPred(newBlock, predBlock, predEdge); newBlock->bbJumpDest = canonicalBlock; diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index d2669ccaca3823..c3dcac465426ba 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -351,7 +351,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) assert(block->isBBCallAlwaysPair()); /* The block after the BBJ_CALLFINALLY block is not reachable */ - bNext = block->bbNext; + bNext = block->GetBBNext(); /* bNext is an unreachable BBJ_ALWAYS block */ noway_assert(bNext->KindIs(BBJ_ALWAYS)); @@ -370,12 +370,12 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) break; case BBJ_NONE: - fgRemoveRefPred(block->bbNext, block); + fgRemoveRefPred(block->GetBBNext(), block); break; case BBJ_COND: fgRemoveRefPred(block->bbJumpDest, block); - fgRemoveRefPred(block->bbNext, block); + fgRemoveRefPred(block->GetBBNext(), block); break; case BBJ_EHFILTERRET: @@ -401,7 +401,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) BasicBlock* finBeg = ehDsc->ebdHndBeg; - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) { if ((bcall->bbFlags & BBF_REMOVED) || !bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) @@ -410,7 +410,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) } assert(bcall->isBBCallAlwaysPair()); - fgRemoveRefPred(bcall->bbNext, block); + fgRemoveRefPred(bcall->GetBBNext(), block); } } } @@ -468,7 +468,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* BasicBlock* finBeg = ehDsc->ebdHndBeg; - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) { if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { @@ -479,7 +479,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* if (succNum == i) { - *bres = bcall->bbNext; + *bres = bcall->GetBBNext(); return; } succNum++; diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index a844199697a60c..6548769f0e7401 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -676,7 +676,7 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorIsIntegralConst(0)) { block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_compiler)); - m_compiler->fgRemoveRefPred(block->bbNext, block); + m_compiler->fgRemoveRefPred(block->GetBBNext(), block); } else { @@ -819,7 +819,7 @@ PhaseStatus Compiler::fgInline() } } - block = block->bbNext; + block = block->GetBBNext(); } while (block); @@ -840,7 +840,7 @@ PhaseStatus Compiler::fgInline() fgWalkTreePre(stmt->GetRootNodePointer(), fgDebugCheckInlineCandidates); } - block = block->bbNext; + block = block->GetBBNext(); } while (block); @@ -1526,7 +1526,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) if (block->KindIs(BBJ_RETURN)) { noway_assert((block->bbFlags & BBF_HAS_JMP) == 0); - if (block->bbNext) + if (block->GetBBNext()) { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); @@ -1548,10 +1548,10 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) InlineeCompiler->fgFirstBB->bbRefs--; // Insert inlinee's blocks into inliner's block list. - topBlock->setNext(InlineeCompiler->fgFirstBB); + topBlock->SetBBNext(InlineeCompiler->fgFirstBB); fgRemoveRefPred(bottomBlock, topBlock); fgAddRefPred(InlineeCompiler->fgFirstBB, topBlock); - InlineeCompiler->fgLastBB->setNext(bottomBlock); + InlineeCompiler->fgLastBB->SetBBNext(bottomBlock); // // Add inlinee's block count to inliner's. diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 18637ac7b49caa..7b788764275981 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -134,7 +134,7 @@ bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2) { noway_assert(b1->KindIs(BBJ_NONE, BBJ_ALWAYS, BBJ_COND)); - if (b1->KindIs(BBJ_NONE, BBJ_COND) && fgReachable(b1->bbNext, b2)) + if (b1->KindIs(BBJ_NONE, BBJ_COND) && fgReachable(b1->GetBBNext(), b2)) { return true; } @@ -367,7 +367,7 @@ void Compiler::fgComputeEnterBlocksSet() assert(block->isBBCallAlwaysPair()); // Don't remove the BBJ_ALWAYS block that is only here for the unwinder. - BlockSetOps::AddElemD(this, fgAlwaysBlks, block->bbNext->bbNum); + BlockSetOps::AddElemD(this, fgAlwaysBlks, block->GetBBNext()->bbNum); } } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -474,8 +474,8 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) // the target node (of BBJ_ALWAYS) since BBJ_CALLFINALLY node is getting converted to a BBJ_THROW. if (bIsBBCallAlwaysPair) { - noway_assert(block->bbNext->KindIs(BBJ_ALWAYS)); - fgClearFinallyTargetBit(block->bbNext->bbJumpDest); + noway_assert(block->GetBBNext()->KindIs(BBJ_ALWAYS)); + fgClearFinallyTargetBit(block->GetBBNext()->bbJumpDest); } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } @@ -490,7 +490,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) if (hasUnreachableBlocks) { // Now remove the unreachable blocks - for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) + for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->GetBBNext()) { // If we marked a block with BBF_REMOVED then we need to call fgRemoveBlock() on it @@ -506,7 +506,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) // if (block->isBBCallAlwaysPair()) { - block = block->bbNext; + block = block->GetBBNext(); } } } @@ -643,7 +643,7 @@ bool Compiler::fgRemoveDeadBlocks() assert(block->isBBCallAlwaysPair()); // Don't remove the BBJ_ALWAYS block that is only here for the unwinder. - worklist.push_back(block->bbNext); + worklist.push_back(block->GetBBNext()); } } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -1007,7 +1007,7 @@ void Compiler::fgComputeDoms() BasicBlock* block = nullptr; - for (block = fgFirstBB->bbNext; block != nullptr; block = block->bbNext) + for (block = fgFirstBB->GetBBNext(); block != nullptr; block = block->GetBBNext()) { // If any basic block has no predecessors then we flag it as processed and temporarily // mark its predecessor list to be flRoot. This makes the flowgraph connected, @@ -1175,7 +1175,7 @@ DomTreeNode* Compiler::fgBuildDomTree() // Traverse the entire block list to build the dominator tree. Skip fgFirstBB // as it is always a root of the dominator forest. - for (BasicBlock* const block : Blocks(fgFirstBB->bbNext)) + for (BasicBlock* const block : Blocks(fgFirstBB->GetBBNext())) { BasicBlock* parent = block->bbIDom; @@ -1470,7 +1470,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() for (cur = fgFirstBB; cur != nullptr; cur = nxt) { // Get hold of the next block (in case we delete 'cur') - nxt = cur->bbNext; + nxt = cur->GetBBNext(); // Should this block be removed? if (!(cur->bbFlags & BBF_IMPORTED)) @@ -1571,10 +1571,10 @@ PhaseStatus Compiler::fgPostImportationCleanup() // Find the first unremoved block before the try entry block. // BasicBlock* const oldTryEntry = HBtab->ebdTryBeg; - BasicBlock* tryEntryPrev = oldTryEntry->bbPrev; + BasicBlock* tryEntryPrev = oldTryEntry->GetBBPrev(); while ((tryEntryPrev != nullptr) && ((tryEntryPrev->bbFlags & BBF_REMOVED) != 0)) { - tryEntryPrev = tryEntryPrev->bbPrev; + tryEntryPrev = tryEntryPrev->GetBBPrev(); } // Because we've added an unremovable scratch block as @@ -1585,7 +1585,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // If there is a next block of this prev block, and that block is // contained in the current try, we'd like to make that block // the new start of the try, and keep the region. - BasicBlock* newTryEntry = tryEntryPrev->bbNext; + BasicBlock* newTryEntry = tryEntryPrev->GetBBNext(); bool updateTryEntry = false; if ((newTryEntry != nullptr) && bbInTryRegions(XTnum, newTryEntry)) @@ -1648,13 +1648,13 @@ PhaseStatus Compiler::fgPostImportationCleanup() // out of order handler, the next block may be a handler. So even though // this new try entry block is unreachable, we need to give it a // plausible flow target. Simplest is to just mark it as a throw. - if (bbIsHandlerBeg(newTryEntry->bbNext)) + if (bbIsHandlerBeg(newTryEntry->GetBBNext())) { newTryEntry->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); } else { - fgAddRefPred(newTryEntry->bbNext, newTryEntry); + fgAddRefPred(newTryEntry->GetBBNext(), newTryEntry); } JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to new " FMT_BB "\n", @@ -1916,7 +1916,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) return false; } - noway_assert(block->bbNext == bNext); + noway_assert(block->GetBBNext() == bNext); if (!block->KindIs(BBJ_NONE)) { @@ -2029,7 +2029,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) noway_assert((block->bbFlags & BBF_REMOVED) == 0); noway_assert(block->KindIs(BBJ_NONE)); - noway_assert(bNext == block->bbNext); + noway_assert(bNext == block->GetBBNext()); noway_assert(bNext != nullptr); noway_assert((bNext->bbFlags & BBF_REMOVED) == 0); noway_assert(bNext->countOfInEdges() == 1 || block->isEmpty()); @@ -2320,7 +2320,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* Unlink bNext and update all the marker pointers if necessary */ - fgUnlinkRange(block->bbNext, bNext); + fgUnlinkRange(block->GetBBNext(), bNext); // If bNext was the last block of a try or handler, update the EH table. @@ -2345,15 +2345,15 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) fgReplacePred(bNext->bbJumpDest, bNext, block); /* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */ - if (bNext->KindIs(BBJ_COND) && bNext->bbJumpDest != bNext->bbNext) + if (bNext->KindIs(BBJ_COND) && bNext->bbJumpDest != bNext->GetBBNext()) { - fgReplacePred(bNext->bbNext, bNext, block); + fgReplacePred(bNext->GetBBNext(), bNext, block); } break; case BBJ_NONE: /* Update the predecessor list for 'bNext->bbNext' */ - fgReplacePred(bNext->bbNext, bNext, block); + fgReplacePred(bNext->GetBBNext(), bNext, block); break; case BBJ_EHFILTERRET: @@ -2373,7 +2373,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) BasicBlock* finBeg = ehDsc->ebdHndBeg; - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) { if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { @@ -2381,7 +2381,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) } noway_assert(bcall->isBBCallAlwaysPair()); - fgReplacePred(bcall->bbNext, bNext, block); + fgReplacePred(bcall->GetBBNext(), bNext, block); } } } @@ -2569,7 +2569,7 @@ void Compiler::fgUnreachableBlock(BasicBlock* block) } #endif // DEBUG - noway_assert(block->bbPrev != nullptr); // Can't use this function to remove the first block + noway_assert(block->GetBBPrev() != nullptr); // Can't use this function to remove the first block #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) assert(!block->isBBCallAlwaysPairTail()); // can't remove the BBJ_ALWAYS of a BBJ_CALLFINALLY / BBJ_ALWAYS pair @@ -2627,15 +2627,15 @@ void Compiler::fgUnreachableBlock(BasicBlock* block) // void Compiler::fgRemoveConditionalJump(BasicBlock* block) { - noway_assert(block->KindIs(BBJ_COND) && block->bbJumpDest == block->bbNext); + noway_assert(block->KindIs(BBJ_COND) && block->bbJumpDest == block->GetBBNext()); assert(compRationalIRForm == block->IsLIR()); - FlowEdge* flow = fgGetPredForBlock(block->bbNext, block); + FlowEdge* flow = fgGetPredForBlock(block->GetBBNext(), block); noway_assert(flow->getDupCount() == 2); // Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount. block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); - --block->bbNext->bbRefs; + --block->GetBBNext()->bbRefs; flow->decrementDupCount(); #ifdef DEBUG @@ -2644,7 +2644,7 @@ void Compiler::fgRemoveConditionalJump(BasicBlock* block) { printf("Block " FMT_BB " becoming a BBJ_NONE to " FMT_BB " (jump target is the same whether the condition" " is true or false)\n", - block->bbNum, block->bbNext->bbNum); + block->bbNum, block->GetBBNext()->bbNum); } #endif @@ -2884,7 +2884,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) assert(block->isEmpty()); bool madeChanges = false; - BasicBlock* bPrev = block->bbPrev; + BasicBlock* bPrev = block->GetBBPrev(); switch (block->GetBBJumpKind()) { @@ -2914,8 +2914,8 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) // A GOTO cannot be to the next block since that // should have been fixed by the optimization above // An exception is made for a jump from Hot to Cold - noway_assert(block->bbJumpDest != block->bbNext || block->isBBCallAlwaysPairTail() || - fgInDifferentRegions(block, block->bbNext)); + noway_assert(block->bbJumpDest != block->GetBBNext() || block->isBBCallAlwaysPairTail() || + fgInDifferentRegions(block, block->GetBBNext())); /* Cannot remove the first BB */ if (!bPrev) @@ -2936,7 +2936,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) } // can't allow fall through into cold code - if (block->bbNext == fgFirstColdBlock) + if (block->GetBBNext() == fgFirstColdBlock) { break; } @@ -2986,7 +2986,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) } else { - succBlock = block->bbNext; + succBlock = block->GetBBNext(); } if ((succBlock != nullptr) && !BasicBlock::sameEHRegion(block, succBlock)) @@ -3073,7 +3073,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) // Make sure we don't break that invariant. if (fgIsUsingProfileWeights() && block->hasProfileWeight() && (block->bbFlags & BBF_INTERNAL) == 0) { - BasicBlock* bNext = block->bbNext; + BasicBlock* bNext = block->GetBBNext(); // Check if the next block can't maintain the invariant. if ((bNext == nullptr) || ((bNext->bbFlags & BBF_INTERNAL) != 0) || !bNext->hasProfileWeight()) @@ -3082,7 +3082,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) BasicBlock* curBB = bPrev; while ((curBB != nullptr) && (curBB->bbFlags & BBF_INTERNAL) != 0) { - curBB = curBB->bbPrev; + curBB = curBB->GetBBPrev(); } if (curBB == nullptr) { @@ -3323,7 +3323,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) return true; } - else if (block->bbJumpSwt->bbsCount == 2 && block->bbJumpSwt->bbsDstTab[1] == block->bbNext) + else if (block->bbJumpSwt->bbsCount == 2 && block->bbJumpSwt->bbsDstTab[1] == block->GetBBNext()) { /* Use a BBJ_COND(switchVal==0) for a switch with only one significant clause besides the default clause, if the @@ -3743,10 +3743,10 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* { assert(target->KindIs(BBJ_COND)); - if ((target->bbNext->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) + if ((target->GetBBNext()->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) { JITDUMP("Deferring: " FMT_BB " --> " FMT_BB "; latter looks like loop top\n", target->bbNum, - target->bbNext->bbNum); + target->GetBBNext()->bbNum); return false; } @@ -3800,7 +3800,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // The new block 'next' will inherit its weight from 'block' // next->inheritWeight(block); - next->bbJumpDest = target->bbNext; + next->bbJumpDest = target->GetBBNext(); fgAddRefPred(next, block); fgAddRefPred(next->bbJumpDest, next); @@ -3826,8 +3826,8 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi { assert(block->KindIs(BBJ_COND, BBJ_ALWAYS)); assert(block->bbJumpDest == bNext); - assert(block->bbNext == bNext); - assert(block->bbPrev == bPrev); + assert(block->GetBBNext() == bNext); + assert(block->GetBBPrev() == bPrev); if (block->KindIs(BBJ_ALWAYS)) { @@ -4026,7 +4026,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) return false; } - if (bDest->bbJumpDest != bJump->bbNext) + if (bDest->bbJumpDest != bJump->GetBBNext()) { return false; } @@ -4039,7 +4039,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) } // do not jump into another try region - BasicBlock* bDestNext = bDest->bbNext; + BasicBlock* bDestNext = bDest->GetBBNext(); if (bDestNext->hasTryIndex() && !BasicBlock::sameTryRegion(bJump, bDestNext)) { return false; @@ -4072,10 +4072,10 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) bool allProfileWeightsAreValid = false; weight_t weightJump = bJump->bbWeight; weight_t weightDest = bDest->bbWeight; - weight_t weightNext = bJump->bbNext->bbWeight; + weight_t weightNext = bJump->GetBBNext()->bbWeight; bool rareJump = bJump->isRunRarely(); bool rareDest = bDest->isRunRarely(); - bool rareNext = bJump->bbNext->isRunRarely(); + bool rareNext = bJump->GetBBNext()->isRunRarely(); // If we have profile data then we calculate the number of time // the loop will iterate into loopIterations @@ -4086,7 +4086,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // if ((bJump->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) && (bDest->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) && - (bJump->bbNext->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY))) + (bJump->GetBBNext()->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY))) { allProfileWeightsAreValid = true; @@ -4233,13 +4233,13 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) bJump->bbFlags |= bDest->bbFlags & BBF_COPY_PROPAGATE; bJump->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); - bJump->bbJumpDest = bDest->bbNext; + bJump->bbJumpDest = bDest->GetBBNext(); /* Update bbRefs and bbPreds */ // bJump now falls through into the next block // - fgAddRefPred(bJump->bbNext, bJump); + fgAddRefPred(bJump->GetBBNext(), bJump); // bJump no longer jumps to bDest // @@ -4247,7 +4247,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // bJump now jumps to bDest->bbNext // - fgAddRefPred(bDest->bbNext, bJump); + fgAddRefPred(bDest->GetBBNext(), bJump); if (weightJump > 0) { @@ -4510,7 +4510,7 @@ bool Compiler::fgExpandRarelyRunBlocks() { // If we've got a BBJ_CALLFINALLY/BBJ_ALWAYS pair, treat the BBJ_CALLFINALLY as an // additional predecessor for the BBJ_ALWAYS block - tmpbb = bPrev->bbPrev; + tmpbb = bPrev->GetBBPrev(); noway_assert(tmpbb != nullptr); #if defined(FEATURE_EH_FUNCLETS) noway_assert(tmpbb->isBBCallAlwaysPair()); @@ -4542,7 +4542,7 @@ bool Compiler::fgExpandRarelyRunBlocks() // Walk the flow graph lexically forward from pred->getBlock() // if we find (block == bPrevPrev) then // pred->getBlock() is an earlier predecessor. - for (tmpbb = pred->getSourceBlock(); tmpbb != nullptr; tmpbb = tmpbb->bbNext) + for (tmpbb = pred->getSourceBlock(); tmpbb != nullptr; tmpbb = tmpbb->GetBBNext()) { if (tmpbb == bPrevPrev) { @@ -4570,7 +4570,7 @@ bool Compiler::fgExpandRarelyRunBlocks() // bPrevPrev is lexically after bPrev and we do not // want to select it as our new block - for (tmpbb = bPrevPrev; tmpbb != nullptr; tmpbb = tmpbb->bbNext) + for (tmpbb = bPrevPrev; tmpbb != nullptr; tmpbb = tmpbb->GetBBNext()) { if (tmpbb == bPrev) { @@ -4596,7 +4596,7 @@ bool Compiler::fgExpandRarelyRunBlocks() BasicBlock* block; BasicBlock* bPrev; - for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext) + for (bPrev = fgFirstBB, block = bPrev->GetBBNext(); block != nullptr; bPrev = block, block = block->GetBBNext()) { if (bPrev->isRunRarely()) { @@ -4678,7 +4678,7 @@ bool Compiler::fgExpandRarelyRunBlocks() // Now iterate over every block to see if we can prove that a block is rarely run // (i.e. when all predecessors to the block are rarely run) // - for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext) + for (bPrev = fgFirstBB, block = bPrev->GetBBNext(); block != nullptr; bPrev = block, block = block->GetBBNext()) { // If block is not run rarely, then check to make sure that it has // at least one non-rarely run block. @@ -4728,7 +4728,7 @@ bool Compiler::fgExpandRarelyRunBlocks() // if (block->isBBCallAlwaysPair()) { - BasicBlock* bNext = block->bbNext; + BasicBlock* bNext = block->GetBBNext(); PREFIX_ASSUME(bNext != nullptr); bNext->bbSetRunRarely(); #ifdef DEBUG @@ -4832,7 +4832,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) #endif // FEATURE_EH_FUNCLETS // We can't relocate anything if we only have one block - if (fgFirstBB->bbNext == nullptr) + if (fgFirstBB->GetBBNext() == nullptr) { return false; } @@ -4880,7 +4880,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) EHblkDsc* HBtab; // Iterate over every block, remembering our previous block in bPrev - for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext) + for (bPrev = fgFirstBB, block = bPrev->GetBBNext(); block != nullptr; bPrev = block, block = block->GetBBNext()) { // // Consider relocating the rarely run blocks such that they are at the end of the method. @@ -5128,7 +5128,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) weight_t highestWeight = 0; BasicBlock* candidateBlock = nullptr; BasicBlock* lastNonFallThroughBlock = bPrev; - BasicBlock* bTmp = bPrev->bbNext; + BasicBlock* bTmp = bPrev->GetBBNext(); while (bTmp != nullptr) { @@ -5137,7 +5137,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) if (bTmp->isBBCallAlwaysPair()) { // Move bTmp forward - bTmp = bTmp->bbNext; + bTmp = bTmp->GetBBNext(); } // @@ -5164,7 +5164,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // otherwise we have a new candidateBlock // highestWeight = bTmp->bbWeight; - candidateBlock = lastNonFallThroughBlock->bbNext; + candidateBlock = lastNonFallThroughBlock->GetBBNext(); } } @@ -5173,7 +5173,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) lastNonFallThroughBlock = bTmp; } - bTmp = bTmp->bbNext; + bTmp = bTmp->GetBBNext(); } // If we didn't find a suitable block then skip this @@ -5211,7 +5211,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) /* (bPrev is known to be a normal block at this point) */ if (!isRare) { - if ((bDest == block->bbNext) && block->KindIs(BBJ_RETURN) && bPrev->KindIs(BBJ_ALWAYS)) + if ((bDest == block->GetBBNext()) && block->KindIs(BBJ_RETURN) && bPrev->KindIs(BBJ_ALWAYS)) { // This is a common case with expressions like "return Expr1 && Expr2" -- move the return // to establish fall-through. @@ -5277,7 +5277,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) BasicBlock* bStart = block; BasicBlock* bEnd = bStart; - bNext = bEnd->bbNext; + bNext = bEnd->GetBBNext(); bool connected_bDest = false; if ((backwardBranch && !isRare) || @@ -5296,7 +5296,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) { // Move bEnd and bNext forward bEnd = bNext; - bNext = bNext->bbNext; + bNext = bNext->GetBBNext(); } // @@ -5309,7 +5309,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) #if defined(FEATURE_EH_FUNCLETS) // Check if we've reached the funclets region, at the end of the function - if (fgFirstFuncletBB == bEnd->bbNext) + if (fgFirstFuncletBB == bEnd->GetBBNext()) { break; } @@ -5356,7 +5356,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // Move bEnd and bNext forward bEnd = bNext; - bNext = bNext->bbNext; + bNext = bNext->GetBBNext(); } // Set connected_bDest to true if moving blocks [bStart .. bEnd] @@ -5396,12 +5396,12 @@ bool Compiler::fgReorderBlocks(bool useProfile) bPrev2 = block; while (bPrev2 != nullptr) { - if (bPrev2->bbNext == bDest) + if (bPrev2->GetBBNext() == bDest) { break; } - bPrev2 = bPrev2->bbNext; + bPrev2 = bPrev2->GetBBNext(); } if ((bPrev2 != nullptr) && fgEhAllowsMoveBlock(bPrev, bDest)) @@ -5414,7 +5414,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // bStart2 = bDest; bEnd2 = bStart2; - bNext = bEnd2->bbNext; + bNext = bEnd2->GetBBNext(); while (true) { @@ -5425,7 +5425,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) noway_assert(bNext->KindIs(BBJ_ALWAYS)); // Move bEnd2 and bNext forward bEnd2 = bNext; - bNext = bNext->bbNext; + bNext = bNext->GetBBNext(); } // Check for the Loop exit conditions @@ -5475,7 +5475,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // Move bEnd2 and bNext forward bEnd2 = bNext; - bNext = bNext->bbNext; + bNext = bNext->GetBBNext(); } } } @@ -5579,7 +5579,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) noway_assert(!bEnd->KindIs(BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); // bStartPrev must be set to the block that precedes bStart - noway_assert(bStartPrev->bbNext == bStart); + noway_assert(bStartPrev->GetBBNext() == bStart); // Since we will be unlinking [bStart..bEnd], // we need to compute and remember if bStart is in each of @@ -5630,7 +5630,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) if (ehDsc != nullptr) { - endBlk = lastBlk->bbNext; + endBlk = lastBlk->GetBBNext(); /* Multiple (nested) try regions might start from the same BB. @@ -5650,7 +5650,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) */ while (!BasicBlock::sameTryRegion(startBlk, bStart) && (startBlk != endBlk)) { - startBlk = startBlk->bbNext; + startBlk = startBlk->GetBBNext(); } // startBlk cannot equal endBlk as it must come before endBlk @@ -5666,12 +5666,12 @@ bool Compiler::fgReorderBlocks(bool useProfile) // or if bEnd->bbNext is in a different try region // then we cannot move the blocks // - if ((bEnd->bbNext == nullptr) || !BasicBlock::sameTryRegion(startBlk, bEnd->bbNext)) + if ((bEnd->GetBBNext() == nullptr) || !BasicBlock::sameTryRegion(startBlk, bEnd->GetBBNext())) { goto CANNOT_MOVE; } - startBlk = bEnd->bbNext; + startBlk = bEnd->GetBBNext(); // Check that the new startBlk still comes before endBlk @@ -5684,7 +5684,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) BasicBlock* tmpBlk = startBlk; while ((tmpBlk != endBlk) && (tmpBlk != nullptr)) { - tmpBlk = tmpBlk->bbNext; + tmpBlk = tmpBlk->GetBBNext(); } // when tmpBlk is NULL that means startBlk is after endBlk @@ -5719,7 +5719,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) fgIsForwardBranch(bEnd, bPrev)) { // Set nearBlk to be the block in [startBlk..endBlk] - // such that nearBlk->bbNext == bEnd->JumpDest + // such that nearBlk->GetBBNext() == bEnd->JumpDest // if no such block exists then set nearBlk to NULL nearBlk = startBlk; jumpBlk = bEnd; @@ -5731,7 +5731,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) if (nearBlk != bPrev) { // Check if nearBlk satisfies our requirement - if (nearBlk->bbNext == bEnd->bbJumpDest) + if (nearBlk->GetBBNext() == bEnd->bbJumpDest) { break; } @@ -5745,7 +5745,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) } // advance nearBlk to the next block - nearBlk = nearBlk->bbNext; + nearBlk = nearBlk->GetBBNext(); } while (nearBlk != nullptr); } @@ -5783,10 +5783,10 @@ bool Compiler::fgReorderBlocks(bool useProfile) /* We couldn't move the blocks, so put everything back */ /* relink [bStart .. bEnd] into the flow graph */ - bPrev->setNext(bStart); - if (bEnd->bbNext) + bPrev->SetBBNext(bStart); + if (bEnd->GetBBNext()) { - bEnd->bbNext->bbPrev = bEnd; + bEnd->GetBBNext()->SetBBPrev(bEnd); } #ifdef DEBUG if (verbose) @@ -5880,7 +5880,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) else { noway_assert(insertAfterBlk == bPrev); - noway_assert(insertAfterBlk->bbNext == block); + noway_assert(insertAfterBlk->GetBBNext() == block); /* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */ bPrev->bbJumpDest = block; @@ -5933,7 +5933,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) fgConnectFallThrough(bPrev, block); } - BasicBlock* bSkip = bEnd->bbNext; + BasicBlock* bSkip = bEnd->GetBBNext(); /* If bEnd falls through, we must insert a jump to bNext */ fgConnectFallThrough(bEnd, bNext); @@ -5968,7 +5968,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // Set our iteration point 'block' to be the new bPrev->bbNext // It will be used as the next bPrev - block = bPrev->bbNext; + block = bPrev->GetBBNext(); } // end of for loop(bPrev,block) @@ -6068,7 +6068,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) BasicBlock* bNext; // the successor of the current block BasicBlock* bDest; // the jump target of the current block - for (block = fgFirstBB; block != nullptr; block = block->bbNext) + for (block = fgFirstBB; block != nullptr; block = block->GetBBNext()) { /* Some blocks may be already marked removed by other optimizations * (e.g worthless loop removal), without being explicitly removed @@ -6079,14 +6079,14 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) { if (bPrev) { - bPrev->setNext(block->bbNext); + bPrev->SetBBNext(block->GetBBNext()); } else { /* WEIRD first basic block is removed - should have an assert here */ noway_assert(!"First basic block marked as BBF_REMOVED???"); - fgFirstBB = block->bbNext; + fgFirstBB = block->GetBBNext(); } continue; } @@ -6099,7 +6099,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) REPEAT:; - bNext = block->bbNext; + bNext = block->GetBBNext(); bDest = nullptr; if (block->KindIs(BBJ_ALWAYS)) @@ -6110,19 +6110,19 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) change = true; modified = true; bDest = block->bbJumpDest; - bNext = block->bbNext; + bNext = block->GetBBNext(); } } if (block->KindIs(BBJ_NONE)) { bDest = nullptr; - if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->bbNext)) + if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->GetBBNext())) { change = true; modified = true; bDest = block->bbJumpDest; - bNext = block->bbNext; + bNext = block->GetBBNext(); } } @@ -6176,7 +6176,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) { // case (a) // - const bool isJumpAroundEmpty = (bNext->bbNext == bDest); + const bool isJumpAroundEmpty = (bNext->GetBBNext() == bDest); // case (b) // @@ -6241,7 +6241,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) { // We don't expect bDest to already be right after bNext. // - assert(bDest != bNext->bbNext); + assert(bDest != bNext->GetBBNext()); JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum, bNext->bbNum); @@ -6249,13 +6249,13 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // If bDest can fall through we'll need to create a jump // block after it too. Remember where to jump to. // - BasicBlock* const bDestNext = bDest->bbNext; + BasicBlock* const bDestNext = bDest->GetBBNext(); // Move bDest // if (ehIsBlockEHLast(bDest)) { - ehUpdateLastBlocks(bDest, bDest->bbPrev); + ehUpdateLastBlocks(bDest, bDest->GetBBPrev()); } fgUnlinkBlock(bDest); @@ -6333,7 +6333,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // If this is the first Cold basic block update fgFirstColdBlock if (bNext == fgFirstColdBlock) { - fgFirstColdBlock = bNext->bbNext; + fgFirstColdBlock = bNext->GetBBNext(); } // @@ -6483,7 +6483,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) if (block->isEmpty()) { - assert(bPrev == block->bbPrev); + assert(bPrev == block->GetBBPrev()); if (fgOptimizeEmptyBlock(block)) { change = true; @@ -7067,7 +7067,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) // ternaries in C#). // The logic below could be generalized to BBJ_SWITCH, but this currently // has almost no CQ benefit but does have a TP impact. - if (!block->KindIs(BBJ_COND) || (block->bbNext == block->bbJumpDest)) + if (!block->KindIs(BBJ_COND) || (block->GetBBNext() == block->bbJumpDest)) { return false; } @@ -7116,7 +7116,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) Statement* nextFirstStmt; Statement* destFirstStmt; - if (!getSuccCandidate(block->bbNext, &nextFirstStmt) || !getSuccCandidate(block->bbJumpDest, &destFirstStmt)) + if (!getSuccCandidate(block->GetBBNext(), &nextFirstStmt) || !getSuccCandidate(block->bbJumpDest, &destFirstStmt)) { return false; } @@ -7144,10 +7144,10 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) JITDUMP("We can; moving statement\n"); - fgUnlinkStmt(block->bbNext, nextFirstStmt); + fgUnlinkStmt(block->GetBBNext(), nextFirstStmt); fgInsertStmtNearEnd(block, nextFirstStmt); fgUnlinkStmt(block->bbJumpDest, destFirstStmt); - block->bbFlags |= block->bbNext->bbFlags & BBF_COPY_PROPAGATE; + block->bbFlags |= block->GetBBNext()->bbFlags & BBF_COPY_PROPAGATE; return true; } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 6444e45085db7d..3665e499daad9c 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -530,7 +530,7 @@ void BlockCountInstrumentor::RelocateProbes() // Handle case where we had a fall through critical edge // - if (pred->bbNext == intermediary) + if (pred->GetBBNext() == intermediary) { m_comp->fgRemoveRefPred(pred, block); m_comp->fgAddRefPred(intermediary, block); @@ -963,7 +963,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) { // This block should be the only pred of the continuation. // - BasicBlock* const target = block->bbNext; + BasicBlock* const target = block->GetBBNext(); assert(!BlockSetOps::IsMember(this, marked, target->bbNum)); visitor->VisitTreeEdge(block, target); stack.Push(target); @@ -3363,7 +3363,7 @@ void EfficientEdgeCountReconstructor::Solve() // The ideal solver order is likely reverse postorder over the depth-first spanning tree. // We approximate it here by running from last node to first. // - for (BasicBlock* block = m_comp->fgLastBB; (block != nullptr); block = block->bbPrev) + for (BasicBlock* block = m_comp->fgLastBB; (block != nullptr); block = block->GetBBPrev()) { BlockInfo* const info = BlockToInfo(block); @@ -4413,7 +4413,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) weight = 0; iterations++; - for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext) + for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->GetBBNext()) { if (!bDst->hasProfileWeight() && (bDst->bbPreds != nullptr)) { @@ -4431,7 +4431,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) // Does this block flow into only one other block if (bSrc->KindIs(BBJ_NONE)) { - bOnlyNext = bSrc->bbNext; + bOnlyNext = bSrc->GetBBNext(); } else if (bSrc->KindIs(BBJ_ALWAYS)) { @@ -4452,7 +4452,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) // Does this block flow into only one other block if (bDst->KindIs(BBJ_NONE)) { - bOnlyNext = bDst->bbNext; + bOnlyNext = bDst->GetBBNext(); } else if (bDst->KindIs(BBJ_ALWAYS)) { @@ -4582,7 +4582,7 @@ bool Compiler::fgComputeCalledCount(weight_t returnWeight) // while (firstILBlock->bbFlags & BBF_INTERNAL) { - firstILBlock = firstILBlock->bbNext; + firstILBlock = firstILBlock->GetBBNext(); } } @@ -4655,7 +4655,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() JITDUMP("Initial weight assignments\n\n"); // Now we will compute the initial m_edgeWeightMin and m_edgeWeightMax values - for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext) + for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->GetBBNext()) { weight_t bDstWeight = bDst->bbWeight; @@ -4746,7 +4746,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() hasIncompleteEdgeWeights = false; JITDUMP("\n -- step 1 --\n"); - for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext) + for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->GetBBNext()) { for (FlowEdge* const edge : bDst->PredEdges()) { @@ -4761,13 +4761,13 @@ PhaseStatus Compiler::fgComputeEdgeWeights() weight_t diff; FlowEdge* otherEdge; BasicBlock* otherDst; - if (bSrc->bbNext == bDst) + if (bSrc->GetBBNext() == bDst) { otherDst = bSrc->bbJumpDest; } else { - otherDst = bSrc->bbNext; + otherDst = bSrc->GetBBNext(); } otherEdge = fgGetPredForBlock(otherDst, bSrc); @@ -4842,7 +4842,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() JITDUMP("\n -- step 2 --\n"); - for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext) + for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->GetBBNext()) { weight_t bDstWeight = bDst->bbWeight; diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index 90d56a835ff10d..0977e202677d2e 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -290,7 +290,7 @@ bool ProfileSynthesis::IsLoopExitEdge(FlowEdge* edge) // void ProfileSynthesis::AssignLikelihoodNext(BasicBlock* block) { - FlowEdge* const edge = m_comp->fgGetPredForBlock(block->bbNext, block); + FlowEdge* const edge = m_comp->fgGetPredForBlock(block->GetBBNext(), block); edge->setLikelihood(1.0); } @@ -317,7 +317,7 @@ void ProfileSynthesis::AssignLikelihoodJump(BasicBlock* block) void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) { BasicBlock* const jump = block->bbJumpDest; - BasicBlock* const next = block->bbNext; + BasicBlock* const next = block->GetBBNext(); // Watch for degenerate case // @@ -1221,7 +1221,7 @@ void ProfileSynthesis::ComputeCyclicProbabilities(SimpleLoop* loop) exitBlock->bbNum, exitEdge->getLikelihood()); BasicBlock* const jump = exitBlock->bbJumpDest; - BasicBlock* const next = exitBlock->bbNext; + BasicBlock* const next = exitBlock->GetBBNext(); FlowEdge* const jumpEdge = m_comp->fgGetPredForBlock(jump, exitBlock); FlowEdge* const nextEdge = m_comp->fgGetPredForBlock(next, exitBlock); weight_t const exitLikelihood = (missingExitWeight + currentExitWeight) / exitBlockWeight; diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 78dc4571352aa5..574bd8800d0214 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -80,7 +80,7 @@ PhaseStatus Compiler::fgInsertGCPolls() // Walk through the blocks and hunt for a block that needs a GC Poll // - for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) + for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->GetBBNext()) { compCurBB = block; @@ -256,7 +256,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) if (top->KindIs(BBJ_COND)) { - topFallThrough = top->bbNext; + topFallThrough = top->GetBBNext(); lpIndexFallThrough = topFallThrough->bbNatLoopNum; } @@ -384,7 +384,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) switch (oldJumpKind) { case BBJ_NONE: - fgReplacePred(bottom->bbNext, top, bottom); + fgReplacePred(bottom->GetBBNext(), top, bottom); break; case BBJ_RETURN: case BBJ_THROW: @@ -392,8 +392,8 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) break; case BBJ_COND: // replace predecessor in the fall through block. - noway_assert(bottom->bbNext); - fgReplacePred(bottom->bbNext, top, bottom); + noway_assert(bottom->GetBBNext()); + fgReplacePred(bottom->GetBBNext(), top, bottom); // fall through for the jump target FALLTHROUGH; @@ -1562,7 +1562,7 @@ void Compiler::fgAddSyncMethodEnterExit() // Create a block for the start of the try region, where the monitor enter call // will go. BasicBlock* const tryBegBB = fgSplitBlockAtEnd(fgFirstBB); - BasicBlock* const tryNextBB = tryBegBB->bbNext; + BasicBlock* const tryNextBB = tryBegBB->GetBBNext(); BasicBlock* const tryLastBB = fgLastBB; // If we have profile data the new block will inherit the next block's weight @@ -1577,8 +1577,8 @@ void Compiler::fgAddSyncMethodEnterExit() assert(!tryLastBB->bbFallsThrough()); BasicBlock* faultBB = fgNewBBafter(BBJ_EHFAULTRET, tryLastBB, false); - assert(tryLastBB->bbNext == faultBB); - assert(faultBB->bbNext == nullptr); + assert(tryLastBB->GetBBNext() == faultBB); + assert(faultBB->GetBBNext() == nullptr); assert(faultBB == fgLastBB); faultBB->bbRefs = 1; @@ -1633,7 +1633,7 @@ void Compiler::fgAddSyncMethodEnterExit() // to point to the new try handler. BasicBlock* tmpBB; - for (tmpBB = tryBegBB->bbNext; tmpBB != faultBB; tmpBB = tmpBB->bbNext) + for (tmpBB = tryBegBB->GetBBNext(); tmpBB != faultBB; tmpBB = tmpBB->GetBBNext()) { if (!tmpBB->hasTryIndex()) { @@ -2154,7 +2154,7 @@ class MergedReturns BasicBlock* newReturnBB = comp->fgNewBBinRegion(BBJ_RETURN); comp->fgReturnCount++; - noway_assert(newReturnBB->bbNext == nullptr); + noway_assert(newReturnBB->GetBBNext() == nullptr); JITDUMP("\n newReturnBB [" FMT_BB "] created\n", newReturnBB->bbNum); @@ -2594,7 +2594,7 @@ PhaseStatus Compiler::fgAddInternal() // Visit the BBJ_RETURN blocks and merge as necessary. - for (BasicBlock* block = fgFirstBB; block != lastBlockBeforeGenReturns->bbNext; block = block->bbNext) + for (BasicBlock* block = fgFirstBB; block != lastBlockBeforeGenReturns->GetBBNext(); block = block->GetBBNext()) { if (block->KindIs(BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { @@ -3004,12 +3004,12 @@ BasicBlock* Compiler::fgLastBBInMainFunction() if (fgFirstFuncletBB != nullptr) { - return fgFirstFuncletBB->bbPrev; + return fgFirstFuncletBB->GetBBPrev(); } #endif // FEATURE_EH_FUNCLETS - assert(fgLastBB->bbNext == nullptr); + assert(fgLastBB->GetBBNext() == nullptr); return fgLastBB; } @@ -3062,7 +3062,7 @@ BasicBlock* Compiler::fgGetDomSpeculatively(const BasicBlock* block) /***************************************************************************************************** * * Function to return the first basic block after the main part of the function. With funclets, it is - * the block of the first funclet. Otherwise it is NULL if there are no funclets (fgLastBB->bbNext). + * the block of the first funclet. Otherwise it is NULL if there are no funclets (fgLastBB->GetBBNext()). * This is equivalent to fgLastBBInMainFunction()->bbNext * An exclusive end of the main method. */ @@ -3078,7 +3078,7 @@ BasicBlock* Compiler::fgEndBBAfterMainFunction() #endif // FEATURE_EH_FUNCLETS - assert(fgLastBB->bbNext == nullptr); + assert(fgLastBB->GetBBNext() == nullptr); return nullptr; } @@ -3302,7 +3302,7 @@ PhaseStatus Compiler::fgCreateFunclets() // bool Compiler::fgFuncletsAreCold() { - for (BasicBlock* block = fgFirstFuncletBB; block != nullptr; block = block->bbNext) + for (BasicBlock* block = fgFirstFuncletBB; block != nullptr; block = block->GetBBNext()) { if (!block->isRunRarely()) { @@ -3365,7 +3365,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() if (forceSplit) { - firstColdBlock = fgFirstBB->bbNext; + firstColdBlock = fgFirstBB->GetBBNext(); prevToFirstColdBlock = fgFirstBB; JITDUMP("JitStressProcedureSplitting is enabled: Splitting after the first basic block\n"); } @@ -3373,7 +3373,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() { bool inFuncletSection = false; - for (lblk = nullptr, block = fgFirstBB; block != nullptr; lblk = block, block = block->bbNext) + for (lblk = nullptr, block = fgFirstBB; block != nullptr; lblk = block, block = block->GetBBNext()) { bool blockMustBeInHotSection = false; @@ -3413,7 +3413,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() if (fgFuncletsAreCold()) { firstColdBlock = fgFirstFuncletBB; - prevToFirstColdBlock = fgFirstFuncletBB->bbPrev; + prevToFirstColdBlock = fgFirstFuncletBB->GetBBPrev(); } break; @@ -3486,7 +3486,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // Cold section is 5 bytes in size. // Ignore if stress-splitting. // - if (!forceSplit && firstColdBlock->bbNext == nullptr) + if (!forceSplit && firstColdBlock->GetBBNext() == nullptr) { // If the size of the cold block is 7 or less // then we will keep it in the Hot section. @@ -3515,7 +3515,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // assert(prevToFirstColdBlock->isBBCallAlwaysPair()); firstColdBlock = - firstColdBlock->bbNext; // Note that this assignment could make firstColdBlock == nullptr + firstColdBlock->GetBBNext(); // Note that this assignment could make firstColdBlock == nullptr break; case BBJ_COND: @@ -3526,7 +3526,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() if (firstColdBlock->isEmpty() && firstColdBlock->KindIs(BBJ_ALWAYS)) { // We can just use this block as the transitionBlock - firstColdBlock = firstColdBlock->bbNext; + firstColdBlock = firstColdBlock->GetBBNext(); // Note that this assignment could make firstColdBlock == NULL } else @@ -3554,7 +3554,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() } } - for (block = firstColdBlock; block != nullptr; block = block->bbNext) + for (block = firstColdBlock; block != nullptr; block = block->GetBBNext()) { block->bbFlags |= BBF_COLD; block->unmarkLoopAlign(this DEBUG_ARG("Loop alignment disabled for cold blocks")); diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index 529bbfdb244924..baccc44b0a2803 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -824,7 +824,7 @@ template bbNext) + for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->GetBBNext()) { if (skipRarelyRunBlocks && block->isRunRarely()) { diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index 8489917bb7b542..7d84bad99d7610 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -122,7 +122,7 @@ bool OptIfConversionDsc::IfConvertCheckInnerBlockFlow(BasicBlock* block) bool OptIfConversionDsc::IfConvertCheckThenFlow() { m_flowFound = false; - BasicBlock* thenBlock = m_startBlock->bbNext; + BasicBlock* thenBlock = m_startBlock->GetBBNext(); for (int thenLimit = 0; thenLimit < m_checkLimit; thenLimit++) { @@ -385,7 +385,7 @@ void OptIfConversionDsc::IfConvertDump() { assert(m_startBlock != nullptr); m_comp->fgDumpBlock(m_startBlock); - for (BasicBlock* dumpBlock = m_startBlock->bbNext; dumpBlock != m_finalBlock; + for (BasicBlock* dumpBlock = m_startBlock->GetBBNext(); dumpBlock != m_finalBlock; dumpBlock = dumpBlock->GetUniqueSucc()) { m_comp->fgDumpBlock(dumpBlock); @@ -575,7 +575,7 @@ bool OptIfConversionDsc::optIfConvert() } // Check the Then and Else blocks have a single operation each. - if (!IfConvertCheckStmts(m_startBlock->bbNext, &m_thenOperation)) + if (!IfConvertCheckStmts(m_startBlock->GetBBNext(), &m_thenOperation)) { return false; } @@ -742,7 +742,7 @@ bool OptIfConversionDsc::optIfConvert() } // Update the flow from the original block. - m_comp->fgRemoveAllRefPreds(m_startBlock->bbNext, m_startBlock); + m_comp->fgRemoveAllRefPreds(m_startBlock->GetBBNext(), m_startBlock); m_startBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); #ifdef DEBUG @@ -789,7 +789,7 @@ PhaseStatus Compiler::optIfConversion() { OptIfConversionDsc optIfConversionDsc(this, block); madeChanges |= optIfConversionDsc.optIfConvert(); - block = block->bbPrev; + block = block->GetBBPrev(); } #endif diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 91dbf2e5e76874..b4f3d385455b4d 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -1953,7 +1953,7 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H impPushOnStack(tree, typeInfo(clsHnd)); - return hndBlk->bbNext; + return hndBlk->GetBBNext(); } } @@ -7298,14 +7298,14 @@ void Compiler::impImportBlockCode(BasicBlock* block) BADCODE("invalid type for brtrue/brfalse"); } - if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) + if (opts.OptimizationEnabled() && (block->bbJumpDest == block->GetBBNext())) { // We may have already modified `block`'s jump kind, if this is a re-importation. // if (block->KindIs(BBJ_COND)) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", - block->bbNum, block->bbNext->bbNum); + block->bbNum, block->GetBBNext()->bbNum); fgRemoveRefPred(block->bbJumpDest, block); block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } @@ -7371,14 +7371,14 @@ void Compiler::impImportBlockCode(BasicBlock* block) { if (foldedJumpKind == BBJ_NONE) { - JITDUMP("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum); + JITDUMP("\nThe block falls through into the next " FMT_BB "\n", block->GetBBNext()->bbNum); fgRemoveRefPred(block->bbJumpDest, block); } else { JITDUMP("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->bbJumpDest->bbNum); - fgRemoveRefPred(block->bbNext, block); + fgRemoveRefPred(block->GetBBNext(), block); } block->SetBBJumpKind(foldedJumpKind DEBUG_ARG(this)); } @@ -7544,14 +7544,14 @@ void Compiler::impImportBlockCode(BasicBlock* block) assertImp((genActualType(op1) == genActualType(op2)) || (varTypeIsI(op1) && varTypeIsI(op2)) || (varTypeIsFloating(op1) && varTypeIsFloating(op2))); - if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) + if (opts.OptimizationEnabled() && (block->bbJumpDest == block->GetBBNext())) { // We may have already modified `block`'s jump kind, if this is a re-importation. // if (block->KindIs(BBJ_COND)) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", - block->bbNum, block->bbNext->bbNum); + block->bbNum, block->GetBBNext()->bbNum); fgRemoveRefPred(block->bbJumpDest, block); block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } @@ -7630,7 +7630,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { - if (curJump != block->bbNext) + if (curJump != block->GetBBNext()) { // transform the basic block into a BBJ_ALWAYS block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); @@ -11135,7 +11135,7 @@ void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart) // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdFilter! - const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB); + const bool isSingleBlockFilter = (filterBB->GetBBNext() == hndBegBB); filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter); impImportBlockPending(filterBB); @@ -11289,12 +11289,12 @@ void Compiler::impImportBlock(BasicBlock* block) /* Note if the next block has more than one ancestor */ - multRef |= block->bbNext->bbRefs; + multRef |= block->GetBBNext()->bbRefs; /* Does the next block have temps assigned? */ - baseTmp = block->bbNext->bbStkTempsIn; - tgtBlock = block->bbNext; + baseTmp = block->GetBBNext()->bbStkTempsIn; + tgtBlock = block->GetBBNext(); if (baseTmp != NO_BASE_TMP) { @@ -11315,9 +11315,9 @@ void Compiler::impImportBlock(BasicBlock* block) break; case BBJ_NONE: - multRef |= block->bbNext->bbRefs; - baseTmp = block->bbNext->bbStkTempsIn; - tgtBlock = block->bbNext; + multRef |= block->GetBBNext()->bbRefs; + baseTmp = block->GetBBNext()->bbStkTempsIn; + tgtBlock = block->GetBBNext(); break; case BBJ_SWITCH: @@ -12119,7 +12119,7 @@ void Compiler::impImport() if (entryBlock->KindIs(BBJ_NONE)) { - entryBlock = entryBlock->bbNext; + entryBlock = entryBlock->GetBBNext(); } else if (opts.IsOSR() && entryBlock->KindIs(BBJ_ALWAYS)) { @@ -12253,7 +12253,7 @@ void Compiler::impFixPredLists() continue; } - BasicBlock* const continuation = predBlock->bbNext; + BasicBlock* const continuation = predBlock->GetBBNext(); fgAddRefPred(continuation, finallyBlock); if (!added) diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index da1fb1933b2397..d77d9052748103 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -1071,7 +1071,7 @@ class IndirectCallTransformer // Find the hot/cold predecessors. (Consider: just record these when // we did the scouting). // - BasicBlock* const coldBlock = checkBlock->bbPrev; + BasicBlock* const coldBlock = checkBlock->GetBBPrev(); if (!coldBlock->KindIs(BBJ_NONE)) { @@ -1079,7 +1079,7 @@ class IndirectCallTransformer return; } - BasicBlock* const hotBlock = coldBlock->bbPrev; + BasicBlock* const hotBlock = coldBlock->GetBBPrev(); if (!hotBlock->KindIs(BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock)) { diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 9de15947cfdd66..7e8664ee7ac595 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -32,7 +32,7 @@ BasicBlock* EHblkDsc::BBFilterLast() noway_assert(ebdHndBeg != nullptr); // The last block of the filter is the block immediately preceding the first block of the handler. - return ebdHndBeg->bbPrev; + return ebdHndBeg->GetBBPrev(); } BasicBlock* EHblkDsc::ExFlowBlock() @@ -107,7 +107,7 @@ bool EHblkDsc::HasFinallyOrFaultHandler() bool EHblkDsc::InBBRange(BasicBlock* pBlk, BasicBlock* pStart, BasicBlock* pEnd) { - for (BasicBlock* pWalk = pStart; pWalk != pEnd; pWalk = pWalk->bbNext) + for (BasicBlock* pWalk = pStart; pWalk != pEnd; pWalk = pWalk->GetBBNext()) { if (pWalk == pBlk) { @@ -119,7 +119,7 @@ bool EHblkDsc::InBBRange(BasicBlock* pBlk, BasicBlock* pStart, BasicBlock* pEnd) bool EHblkDsc::InTryRegionBBRange(BasicBlock* pBlk) { - return InBBRange(pBlk, ebdTryBeg, ebdTryLast->bbNext); + return InBBRange(pBlk, ebdTryBeg, ebdTryLast->GetBBNext()); } bool EHblkDsc::InFilterRegionBBRange(BasicBlock* pBlk) @@ -129,7 +129,7 @@ bool EHblkDsc::InFilterRegionBBRange(BasicBlock* pBlk) bool EHblkDsc::InHndRegionBBRange(BasicBlock* pBlk) { - return InBBRange(pBlk, ebdHndBeg, ebdHndLast->bbNext); + return InBBRange(pBlk, ebdHndBeg, ebdHndLast->GetBBNext()); } unsigned EHblkDsc::ebdGetEnclosingRegionIndex(bool* inTryRegion) @@ -836,7 +836,7 @@ void Compiler::ehUpdateForDeletedBlock(BasicBlock* block) return; } - BasicBlock* bPrev = block->bbPrev; + BasicBlock* bPrev = block->GetBBPrev(); assert(bPrev != nullptr); ehUpdateLastBlocks(block, bPrev); @@ -865,7 +865,7 @@ bool Compiler::ehCanDeleteEmptyBlock(BasicBlock* block) if (ehIsBlockEHLast(block)) { - BasicBlock* bPrev = block->bbPrev; + BasicBlock* bPrev = block->GetBBPrev(); if ((bPrev != nullptr) && ehIsBlockEHLast(bPrev)) { return false; @@ -941,18 +941,18 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** be if (inTryRegion) { *begBlk = ehDsc->ebdTryBeg; - *endBlk = ehDsc->ebdTryLast->bbNext; + *endBlk = ehDsc->ebdTryLast->GetBBNext(); } else { *begBlk = ehDsc->ebdHndBeg; - *endBlk = ehDsc->ebdHndLast->bbNext; + *endBlk = ehDsc->ebdHndLast->GetBBNext(); } } #else // !FEATURE_EH_CALLFINALLY_THUNKS EHblkDsc* ehDsc = ehGetDsc(finallyIndex); *begBlk = ehDsc->ebdTryBeg; - *endBlk = ehDsc->ebdTryLast->bbNext; + *endBlk = ehDsc->ebdTryLast->GetBBNext(); #endif // !FEATURE_EH_CALLFINALLY_THUNKS } @@ -1320,10 +1320,10 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab) bLast = nullptr; // Find the first non-removed block after the 'try' region to end our iteration. - bEnd = handlerTab->ebdTryLast->bbNext; + bEnd = handlerTab->ebdTryLast->GetBBNext(); while ((bEnd != nullptr) && (bEnd->bbFlags & BBF_REMOVED)) { - bEnd = bEnd->bbNext; + bEnd = bEnd->GetBBNext(); } // Update bLast to account for any removed blocks @@ -1335,7 +1335,7 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab) bLast = block; } - block = block->bbNext; + block = block->GetBBNext(); if (block == bEnd) { @@ -1349,10 +1349,10 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab) bLast = nullptr; // Find the first non-removed block after the handler region to end our iteration. - bEnd = handlerTab->ebdHndLast->bbNext; + bEnd = handlerTab->ebdHndLast->GetBBNext(); while ((bEnd != nullptr) && (bEnd->bbFlags & BBF_REMOVED)) { - bEnd = bEnd->bbNext; + bEnd = bEnd->GetBBNext(); } // Update bLast to account for any removed blocks @@ -1364,7 +1364,7 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab) bLast = block; } - block = block->bbNext; + block = block->GetBBNext(); if (block == bEnd) { break; @@ -2281,7 +2281,7 @@ bool Compiler::fgNormalizeEHCase2() fgReplaceJumpTarget(predBlock, newTryStart, insertBeforeBlk); } - if ((predBlock->bbNext == newTryStart) && predBlock->bbFallsThrough()) + if ((predBlock->GetBBNext() == newTryStart) && predBlock->bbFallsThrough()) { fgRemoveRefPred(insertBeforeBlk, predBlock); fgAddRefPred(newTryStart, predBlock); @@ -2295,7 +2295,7 @@ bool Compiler::fgNormalizeEHCase2() // outwards in enclosing try index order, and we'll get to them later. // Move the insert block backwards, to the one we just inserted. - insertBeforeBlk = insertBeforeBlk->bbPrev; + insertBeforeBlk = insertBeforeBlk->GetBBPrev(); assert(insertBeforeBlk == newTryStart); modified = true; @@ -3428,7 +3428,8 @@ void Compiler::fgVerifyHandlerTab() { BasicBlock* blockEnd; - for (block = HBtab->ebdTryBeg, blockEnd = HBtab->ebdTryLast->bbNext; block != blockEnd; block = block->bbNext) + for (block = HBtab->ebdTryBeg, blockEnd = HBtab->ebdTryLast->GetBBNext(); block != blockEnd; + block = block->GetBBNext()) { if (blockTryIndex[block->bbNum] == 0) { @@ -3436,8 +3437,9 @@ void Compiler::fgVerifyHandlerTab() } } - for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), blockEnd = HBtab->ebdHndLast->bbNext; - block != blockEnd; block = block->bbNext) + for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), + blockEnd = HBtab->ebdHndLast->GetBBNext(); + block != blockEnd; block = block->GetBBNext()) { if (blockHndIndex[block->bbNum] == 0) { @@ -3465,8 +3467,8 @@ void Compiler::fgVerifyHandlerTab() BasicBlock* blockEnd; for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), - blockEnd = HBtab->ebdHndLast->bbNext; - block != blockEnd; block = block->bbNext) + blockEnd = HBtab->ebdHndLast->GetBBNext(); + block != blockEnd; block = block->GetBBNext()) { if (blockTryIndex[block->bbNum] == 0) { @@ -4058,7 +4060,7 @@ void Compiler::fgClearFinallyTargetBit(BasicBlock* block) { if (predBlock->KindIs(BBJ_ALWAYS) && predBlock->bbJumpDest == block) { - BasicBlock* pPrev = predBlock->bbPrev; + BasicBlock* pPrev = predBlock->GetBBPrev(); if (pPrev != nullptr) { if (pPrev->KindIs(BBJ_CALLFINALLY)) @@ -4110,7 +4112,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) if (xtab->HasFinallyHandler()) { assert((xtab->ebdHndBeg == block) || // The normal case - ((xtab->ebdHndBeg->bbNext == block) && + ((xtab->ebdHndBeg->GetBBNext() == block) && (xtab->ebdHndBeg->bbFlags & BBF_INTERNAL))); // After we've already inserted a header block, and we're // trying to decide how to split up the predecessor edges. if (predBlock->KindIs(BBJ_CALLFINALLY)) @@ -4347,9 +4349,9 @@ bool Compiler::fgRelocateEHRegions() */ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) { - assert(block->bbPrev != nullptr); + assert(block->GetBBPrev() != nullptr); - BasicBlock* bPrev = block->bbPrev; + BasicBlock* bPrev = block->GetBBPrev(); bPrev->copyEHRegion(block); @@ -4466,7 +4468,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) void Compiler::fgExtendEHRegionAfter(BasicBlock* block) { - BasicBlock* newBlk = block->bbNext; + BasicBlock* newBlk = block->GetBBNext(); assert(newBlk != nullptr); newBlk->copyEHRegion(block); diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index d32854e4224c71..787312f519e593 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -365,7 +365,7 @@ void Compiler::fgPerBlockLocalVarLiveness() } } - for (block = fgFirstBB; block; block = block->bbNext) + for (block = fgFirstBB; block; block = block->GetBBNext()) { // Strictly speaking, the assignments for the "Def" cases aren't necessary here. // The empty set would do as well. Use means "use-before-def", so as long as that's @@ -407,7 +407,7 @@ void Compiler::fgPerBlockLocalVarLiveness() // memory that is not a GC Heap def. byrefStatesMatchGcHeapStates = true; - for (block = fgFirstBB; block; block = block->bbNext) + for (block = fgFirstBB; block; block = block->GetBBNext()) { VarSetOps::ClearD(this, fgCurUseSet); VarSetOps::ClearD(this, fgCurDefSet); @@ -889,8 +889,8 @@ void Compiler::fgExtendDbgLifetimes() switch (block->GetBBJumpKind()) { case BBJ_NONE: - PREFIX_ASSUME(block->bbNext != nullptr); - VarSetOps::UnionD(this, initVars, block->bbNext->bbScope); + PREFIX_ASSUME(block->GetBBNext() != nullptr); + VarSetOps::UnionD(this, initVars, block->GetBBNext()->bbScope); break; case BBJ_ALWAYS: @@ -903,15 +903,15 @@ void Compiler::fgExtendDbgLifetimes() if (!(block->bbFlags & BBF_RETLESS_CALL)) { assert(block->isBBCallAlwaysPair()); - PREFIX_ASSUME(block->bbNext != nullptr); - VarSetOps::UnionD(this, initVars, block->bbNext->bbScope); + PREFIX_ASSUME(block->GetBBNext() != nullptr); + VarSetOps::UnionD(this, initVars, block->GetBBNext()->bbScope); } VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope); break; case BBJ_COND: - PREFIX_ASSUME(block->bbNext != nullptr); - VarSetOps::UnionD(this, initVars, block->bbNext->bbScope); + PREFIX_ASSUME(block->GetBBNext() != nullptr); + VarSetOps::UnionD(this, initVars, block->GetBBNext()->bbScope); VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope); break; @@ -1305,11 +1305,11 @@ class LiveVarAnalysis m_memoryLiveIn = emptyMemoryKindSet; m_memoryLiveOut = emptyMemoryKindSet; - for (BasicBlock* block = m_compiler->fgLastBB; block; block = block->bbPrev) + for (BasicBlock* block = m_compiler->fgLastBB; block; block = block->GetBBPrev()) { // sometimes block numbers are not monotonically increasing which // would cause us not to identify backedges - if (block->bbNext && block->bbNext->bbNum <= block->bbNum) + if (block->GetBBNext() && block->GetBBNext()->bbNum <= block->bbNum) { m_hasPossibleBackEdge = true; } diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index f976f1d46adf65..a64b7e3609e80c 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -1800,7 +1800,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd) // that block; this is one of those cases. This could be fixed fairly easily; for example, // we could add a dummy nop block after the (cloned) loop bottom, in the same handler scope as the // loop. This is just a corner to cut to get this working faster. - BasicBlock* bbAfterLoop = loop.lpBottom->bbNext; + BasicBlock* bbAfterLoop = loop.lpBottom->GetBBNext(); if (bbAfterLoop != nullptr && bbIsHandlerBeg(bbAfterLoop)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Next block after bottom is a handler start.\n", loopInd); @@ -2074,7 +2074,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) { assert(b->KindIs(BBJ_COND)); - BasicBlock* x = b->bbNext; + BasicBlock* x = b->GetBBNext(); if (x != nullptr) { JITDUMP("Create branch around cloned loop\n"); @@ -2188,7 +2188,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) switch (newblk->GetBBJumpKind()) { case BBJ_NONE: - fgAddRefPred(newblk->bbNext, newblk); + fgAddRefPred(newblk->GetBBNext(), newblk); break; case BBJ_ALWAYS: @@ -2197,7 +2197,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) break; case BBJ_COND: - fgAddRefPred(newblk->bbNext, newblk); + fgAddRefPred(newblk->GetBBNext(), newblk); fgAddRefPred(newblk->bbJumpDest, newblk); break; @@ -2245,14 +2245,14 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) assert(context->HasBlockConditions(loopInd)); assert(h->KindIs(BBJ_NONE)); - assert(h->bbNext == h2); + assert(h->GetBBNext() == h2); // If any condition is false, go to slowHead (which branches or falls through to e2). BasicBlock* e2 = nullptr; bool foundIt = blockMap->Lookup(loop.lpEntry, &e2); assert(foundIt && e2 != nullptr); - if (slowHead->bbNext != e2) + if (slowHead->GetBBNext() != e2) { // We can't just fall through to the slow path entry, so make it an unconditional branch. assert(slowHead->KindIs(BBJ_NONE)); // This is how we created it above. @@ -2268,8 +2268,8 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Add the fall-through path pred (either to T/E for fall-through from conditions to fast path, // or H2 if branch to E of fast path). assert(condLast->KindIs(BBJ_COND)); - JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", condLast->bbNum, condLast->bbNext->bbNum); - fgAddRefPred(condLast->bbNext, condLast); + JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", condLast->bbNum, condLast->GetBBNext()->bbNum); + fgAddRefPred(condLast->GetBBNext(), condLast); // Don't unroll loops that we've cloned -- the unroller expects any loop it should unroll to // initialize the loop counter immediately before entering the loop, but we've left a shared @@ -2921,8 +2921,8 @@ bool Compiler::optCheckLoopCloningGDVTestProfitable(GenTreeOp* guard, LoopCloneV // Check for (4) // - BasicBlock* const hotSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->bbJumpDest : typeTestBlock->bbNext; - BasicBlock* const coldSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->bbNext : typeTestBlock->bbJumpDest; + BasicBlock* const hotSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->bbJumpDest : typeTestBlock->GetBBNext(); + BasicBlock* const coldSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->GetBBNext() : typeTestBlock->bbJumpDest; if (!hotSuccessor->hasProfileWeight() || !coldSuccessor->hasProfileWeight()) { diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 07f1f95ff4ebd5..82cddeab266984 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -799,7 +799,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) { JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum); noway_assert(comp->opts.OptimizationDisabled()); - if (originalSwitchBB->bbNext == jumpTab[0]) + if (originalSwitchBB->GetBBNext() == jumpTab[0]) { originalSwitchBB->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = nullptr; @@ -847,7 +847,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) var_types tempLclType = temp->TypeGet(); BasicBlock* defaultBB = jumpTab[jumpCnt - 1]; - BasicBlock* followingBB = originalSwitchBB->bbNext; + BasicBlock* followingBB = originalSwitchBB->GetBBNext(); /* Is the number of cases right for a test and jump switch? */ const bool fFirstCaseFollows = (followingBB == jumpTab[0]); @@ -892,7 +892,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. assert(originalSwitchBB->KindIs(BBJ_NONE)); - assert(originalSwitchBB->bbNext == afterDefaultCondBlock); + assert(originalSwitchBB->GetBBNext() == afterDefaultCondBlock); assert(afterDefaultCondBlock->KindIs(BBJ_SWITCH)); assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. @@ -955,7 +955,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) assert(jumpTab[i] == uniqueSucc); (void)comp->fgRemoveRefPred(uniqueSucc, afterDefaultCondBlock); } - if (afterDefaultCondBlock->bbNext == uniqueSucc) + if (afterDefaultCondBlock->GetBBNext() == uniqueSucc) { afterDefaultCondBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); afterDefaultCondBlock->bbJumpDest = nullptr; @@ -1064,7 +1064,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // There is a fall-through to the following block. In the loop // above, we deleted all the predecessor edges from the switch. // In this case, we need to add one back. - comp->fgAddRefPred(currentBlock->bbNext, currentBlock); + comp->fgAddRefPred(currentBlock->GetBBNext(), currentBlock); } if (!fUsedAfterDefaultCondBlock) @@ -1221,7 +1221,7 @@ bool Lowering::TryLowerSwitchToBitTest( // impacts register allocation. // - if ((bbSwitch->bbNext != bbCase0) && (bbSwitch->bbNext != bbCase1)) + if ((bbSwitch->GetBBNext() != bbCase0) && (bbSwitch->GetBBNext() != bbCase1)) { return false; } @@ -1252,7 +1252,7 @@ bool Lowering::TryLowerSwitchToBitTest( comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); - if (bbSwitch->bbNext == bbCase0) + if (bbSwitch->GetBBNext() == bbCase0) { // GenCondition::C generates JC so we jump to bbCase1 when the bit is set bbSwitchCondition = GenCondition::C; @@ -1263,7 +1263,7 @@ bool Lowering::TryLowerSwitchToBitTest( } else { - assert(bbSwitch->bbNext == bbCase1); + assert(bbSwitch->GetBBNext() == bbCase1); // GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set bbSwitchCondition = GenCondition::NC; @@ -1288,7 +1288,7 @@ bool Lowering::TryLowerSwitchToBitTest( // // Fallback to AND(RSZ(bitTable, switchValue), 1) // - GenTree* tstCns = comp->gtNewIconNode(bbSwitch->bbNext != bbCase0 ? 0 : 1, bitTableType); + GenTree* tstCns = comp->gtNewIconNode(bbSwitch->GetBBNext() != bbCase0 ? 0 : 1, bitTableType); GenTree* shift = comp->gtNewOperNode(GT_RSZ, bitTableType, bitTableIcon, switchValue); GenTree* one = comp->gtNewIconNode(1, bitTableType); GenTree* andOp = comp->gtNewOperNode(GT_AND, bitTableType, shift, one); diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 88af18d880898e..820cd08a00c25e 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -1028,7 +1028,7 @@ void LinearScan::setBlockSequence() // For layout order, simply use bbNext if (isTraversalLayoutOrder()) { - nextBlock = block->bbNext; + nextBlock = block->GetBBNext(); continue; } @@ -1483,15 +1483,15 @@ void LinearScan::recordVarLocationsAtStartOfBB(BasicBlock* bb) varDsc->SetRegNum(newRegNum); count++; - BasicBlock* prevReportedBlock = bb->bbPrev; - if (bb->bbPrev != nullptr && bb->bbPrev->isBBCallAlwaysPairTail()) + BasicBlock* prevReportedBlock = bb->GetBBPrev(); + if (bb->GetBBPrev() != nullptr && bb->GetBBPrev()->isBBCallAlwaysPairTail()) { // For callf+always pair we generate the code for the always // block in genCallFinally and skip it, so we don't report // anything for it (it has only trivial instructions, so that // does not matter much). So whether we need to rehome or not // depends on what we reported at the end of the callf block. - prevReportedBlock = bb->bbPrev->bbPrev; + prevReportedBlock = bb->GetBBPrev()->GetBBPrev(); } if (prevReportedBlock != nullptr && VarSetOps::IsMember(compiler, prevReportedBlock->bbLiveOut, varIndex)) @@ -2547,7 +2547,8 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, if (predBlock->KindIs(BBJ_COND)) { // Special handling to improve matching on backedges. - BasicBlock* otherBlock = (block == predBlock->bbNext) ? predBlock->bbJumpDest : predBlock->bbNext; + BasicBlock* otherBlock = + (block == predBlock->GetBBNext()) ? predBlock->bbJumpDest : predBlock->GetBBNext(); noway_assert(otherBlock != nullptr); if (isBlockVisited(otherBlock) && !blockInfo[otherBlock->bbNum].hasEHBoundaryIn) { diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 31166db9d122c6..6833ee79a09172 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -7482,7 +7482,7 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa // block removal on it. fgEnsureFirstBBisScratch(); fgFirstBB->bbFlags |= BBF_DONT_REMOVE; - block->bbJumpDest = fgFirstBB->bbNext; + block->bbJumpDest = fgFirstBB->GetBBNext(); } // Finish hooking things up. @@ -13154,7 +13154,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); - noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); + noway_assert((block->GetBBNext()->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); if (condTree != cond) { @@ -13181,7 +13181,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bTaken = block->bbJumpDest; - bNotTaken = block->bbNext; + bNotTaken = block->GetBBNext(); } else { @@ -13196,7 +13196,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) /* JTRUE 0 - transform the basic block into a BBJ_NONE */ block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); - bTaken = block->bbNext; + bTaken = block->GetBBNext(); bNotTaken = block->bbJumpDest; } @@ -13253,24 +13253,24 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) switch (bUpdated->GetBBJumpKind()) { case BBJ_NONE: - edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); + edge = fgGetPredForBlock(bUpdated->GetBBNext(), bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); - edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); + edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->GetBBNext()); break; case BBJ_COND: - edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); + edge = fgGetPredForBlock(bUpdated->GetBBNext(), bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); - edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); + edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->GetBBNext()); FALLTHROUGH; case BBJ_ALWAYS: edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); - edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); + edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->GetBBNext()); break; default: @@ -13421,7 +13421,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { - if (curJump != block->bbNext) + if (curJump != block->GetBBNext()) { // transform the basic block into a BBJ_ALWAYS block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); @@ -13925,7 +13925,7 @@ void Compiler::fgMorphBlocks() } } - block = block->bbNext; + block = block->GetBBNext(); } while (block != nullptr); // We are done with the global morphing phase diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 82d2430b914454..28319dccbad4ae 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -106,7 +106,7 @@ class OptBoolsDsc // B3: GT_RETURN (BBJ_RETURN) // B4: GT_RETURN (BBJ_RETURN) // -// Case 2: if B1.bbJumpDest == B2->bbNext, it transforms +// Case 2: if B1.bbJumpDest == B2->GetBBNext(), it transforms // B1 : brtrue(t1, B3) // B2 : brtrue(t2, Bx) // B3 : @@ -136,7 +136,7 @@ bool OptBoolsDsc::optOptimizeBoolsCondBlock() m_sameTarget = true; } - else if (m_b1->bbJumpDest == m_b2->bbNext) + else if (m_b1->bbJumpDest == m_b2->GetBBNext()) { // Given the following sequence of blocks : // B1: brtrue(t1, B3) @@ -480,13 +480,13 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock() m_t3 = nullptr; bool foundEndOfOrConditions = false; - if ((m_b1->bbNext == m_b2) && (m_b1->bbJumpDest == m_b2->bbNext)) + if ((m_b1->GetBBNext() == m_b2) && (m_b1->bbJumpDest == m_b2->GetBBNext())) { // Found the end of two (or more) conditions being ORed together. // The final condition has been inverted. foundEndOfOrConditions = true; } - else if ((m_b1->bbNext == m_b2) && (m_b1->bbJumpDest == m_b2->bbJumpDest)) + else if ((m_b1->GetBBNext() == m_b2) && (m_b1->bbJumpDest == m_b2->bbJumpDest)) { // Found two conditions connected together. } @@ -848,7 +848,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() } else { - edge2 = m_comp->fgGetPredForBlock(m_b2->bbNext, m_b2); + edge2 = m_comp->fgGetPredForBlock(m_b2->GetBBNext(), m_b2); m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1); @@ -882,7 +882,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() m_b1->bbJumpSwt = m_b2->bbJumpSwt; #endif assert(m_b2->KindIs(BBJ_RETURN)); - assert(m_b1->bbNext == m_b2); + assert(m_b1->GetBBNext() == m_b2); assert(m_b3 != nullptr); } else @@ -890,8 +890,8 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() assert(m_b1->KindIs(BBJ_COND)); assert(m_b2->KindIs(BBJ_COND)); assert(m_b1->bbJumpDest == m_b2->bbJumpDest); - assert(m_b1->bbNext == m_b2); - assert(m_b2->bbNext != nullptr); + assert(m_b1->GetBBNext() == m_b2); + assert(m_b2->GetBBNext() != nullptr); } if (!optReturnBlock) @@ -900,7 +900,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() // // Replace pred 'm_b2' for 'm_b2->bbNext' with 'm_b1' // Remove pred 'm_b2' for 'm_b2->bbJumpDest' - m_comp->fgReplacePred(m_b2->bbNext, m_b2, m_b1); + m_comp->fgReplacePred(m_b2->GetBBNext(), m_b2, m_b1); m_comp->fgRemoveRefPred(m_b2->bbJumpDest, m_b2); } @@ -1463,7 +1463,7 @@ PhaseStatus Compiler::optOptimizeBools() numPasses++; change = false; - for (BasicBlock* b1 = fgFirstBB; b1 != nullptr; b1 = retry ? b1 : b1->bbNext) + for (BasicBlock* b1 = fgFirstBB; b1 != nullptr; b1 = retry ? b1 : b1->GetBBNext()) { retry = false; @@ -1476,7 +1476,7 @@ PhaseStatus Compiler::optOptimizeBools() // If there is no next block, we're done - BasicBlock* b2 = b1->bbNext; + BasicBlock* b2 = b1->GetBBNext(); if (b2 == nullptr) { break; @@ -1494,7 +1494,7 @@ PhaseStatus Compiler::optOptimizeBools() if (b2->KindIs(BBJ_COND)) { - if ((b1->bbJumpDest != b2->bbJumpDest) && (b1->bbJumpDest != b2->bbNext)) + if ((b1->bbJumpDest != b2->bbJumpDest) && (b1->bbJumpDest != b2->GetBBNext())) { continue; } diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 75f4c7ed4cb83e..91433579efd5e6 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -490,7 +490,7 @@ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmar { reportBefore(); /* The loop has a new head - Just update the loop table */ - loop.lpHead = block->bbPrev; + loop.lpHead = block->GetBBPrev(); } reportAfter(); @@ -741,9 +741,9 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT bool initBlockOk = (predBlock == initBlock); if (!initBlockOk) { - if (predBlock->KindIs(BBJ_NONE) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && + if (predBlock->KindIs(BBJ_NONE) && (predBlock->GetBBNext() == optLoopTable[loopInd].lpEntry) && (predBlock->countOfInEdges() == 1) && (predBlock->firstStmt() == nullptr) && - (predBlock->bbPrev != nullptr) && predBlock->bbPrev->bbFallsThrough()) + (predBlock->GetBBPrev() != nullptr) && predBlock->GetBBPrev()->bbFallsThrough()) { initBlockOk = true; } @@ -1150,10 +1150,10 @@ bool Compiler::optExtractInitTestIncr( // If we are rebuilding the loop table, we would already have the pre-header block introduced // the first time, which might be empty if no hoisting has yet occurred. In this case, look a // little harder for the possible loop initialization statement. - if (initBlock->KindIs(BBJ_NONE) && (initBlock->bbNext == top) && (initBlock->countOfInEdges() == 1) && - (initBlock->bbPrev != nullptr) && initBlock->bbPrev->bbFallsThrough()) + if (initBlock->KindIs(BBJ_NONE) && (initBlock->GetBBNext() == top) && (initBlock->countOfInEdges() == 1) && + (initBlock->GetBBPrev() != nullptr) && initBlock->GetBBPrev()->bbFallsThrough()) { - initBlock = initBlock->bbPrev; + initBlock = initBlock->GetBBPrev(); phdrStmt = initBlock->firstStmt(); } } @@ -1377,7 +1377,7 @@ void Compiler::optCheckPreds() { // make sure this pred is part of the BB list BasicBlock* bb; - for (bb = fgFirstBB; bb; bb = bb->bbNext) + for (bb = fgFirstBB; bb; bb = bb->GetBBNext()) { if (bb == predBlock) { @@ -1394,7 +1394,7 @@ void Compiler::optCheckPreds() } FALLTHROUGH; case BBJ_NONE: - noway_assert(bb->bbNext == block); + noway_assert(bb->GetBBNext() == block); break; case BBJ_EHFILTERRET: case BBJ_ALWAYS: @@ -1888,7 +1888,8 @@ class LoopSearch // otherwise the loop is still valid and this may be a (flow-wise) back-edge // of an outer loop. For the dominance test, if `predBlock` is a new block, use // its unique predecessor since the dominator tree has info for that. - BasicBlock* effectivePred = (predBlock->bbNum > oldBlockMaxNum ? predBlock->bbPrev : predBlock); + BasicBlock* effectivePred = + (predBlock->bbNum > oldBlockMaxNum ? predBlock->GetBBPrev() : predBlock); if (comp->fgDominate(entry, effectivePred)) { // Outer loop back-edge @@ -1923,14 +1924,14 @@ class LoopSearch isFirstVisit = true; } - if (isFirstVisit && (predBlock->bbNext != nullptr) && - (PositionNum(predBlock->bbNext) == predBlock->bbNum)) + if (isFirstVisit && (predBlock->GetBBNext() != nullptr) && + (PositionNum(predBlock->GetBBNext()) == predBlock->bbNum)) { // We've created a new block immediately after `predBlock` to // reconnect what was fall-through. Mark it as in-loop also; // it needs to stay with `prev` and if it exits the loop we'd // just need to re-create it if we tried to move it out. - loopBlocks.Insert(predBlock->bbNext->bbNum); + loopBlocks.Insert(predBlock->GetBBNext()->bbNum); } } } @@ -1960,9 +1961,9 @@ class LoopSearch // This must be a block we inserted to connect fall-through after moving blocks. // To determine if it's in the loop or not, use the number of its unique predecessor // block. - assert(block->bbPreds->getSourceBlock() == block->bbPrev); + assert(block->bbPreds->getSourceBlock() == block->GetBBPrev()); assert(block->bbPreds->getNextPredEdge() == nullptr); - return block->bbPrev->bbNum; + return block->GetBBPrev()->bbNum; } return block->bbNum; } @@ -1982,9 +1983,9 @@ class LoopSearch // Compaction (if it needs to happen) will require an insertion point. BasicBlock* moveAfter = nullptr; - for (BasicBlock* previous = top->bbPrev; previous != bottom;) + for (BasicBlock* previous = top->GetBBPrev(); previous != bottom;) { - BasicBlock* block = previous->bbNext; + BasicBlock* block = previous->GetBBNext(); if (loopBlocks.IsMember(block->bbNum)) { @@ -2008,11 +2009,11 @@ class LoopSearch // If so, give up on recognition of this loop. // BasicBlock* lastNonLoopBlock = block; - BasicBlock* nextLoopBlock = block->bbNext; + BasicBlock* nextLoopBlock = block->GetBBNext(); while ((nextLoopBlock != nullptr) && !loopBlocks.IsMember(nextLoopBlock->bbNum)) { lastNonLoopBlock = nextLoopBlock; - nextLoopBlock = nextLoopBlock->bbNext; + nextLoopBlock = nextLoopBlock->GetBBNext(); } if (nextLoopBlock == nullptr) @@ -2048,7 +2049,7 @@ class LoopSearch } // Now physically move the blocks. - BasicBlock* moveBefore = moveAfter->bbNext; + BasicBlock* moveBefore = moveAfter->GetBBNext(); comp->fgUnlinkRange(block, lastNonLoopBlock); comp->fgMoveBlocksAfter(block, lastNonLoopBlock, moveAfter); @@ -2135,7 +2136,7 @@ class LoopSearch // BasicBlock* TryAdvanceInsertionPoint(BasicBlock* oldMoveAfter) { - BasicBlock* newMoveAfter = oldMoveAfter->bbNext; + BasicBlock* newMoveAfter = oldMoveAfter->GetBBNext(); if (!BasicBlock::sameEHRegion(oldMoveAfter, newMoveAfter)) { @@ -2324,7 +2325,7 @@ class LoopSearch else if (block->KindIs(BBJ_ALWAYS) && (block->bbJumpDest == newNext)) { // We've made `block`'s jump target its bbNext, so remove the jump. - if (!comp->fgOptimizeBranchToNext(block, newNext, block->bbPrev)) + if (!comp->fgOptimizeBranchToNext(block, newNext, block->GetBBPrev())) { // If optimizing away the goto-next failed for some reason, mark it KEEP_BBJ_ALWAYS to // prevent assertions from complaining about it. @@ -2463,7 +2464,7 @@ class LoopSearch break; } - if (block->bbFallsThrough() && !loopBlocks.IsMember(block->bbNext->bbNum)) + if (block->bbFallsThrough() && !loopBlocks.IsMember(block->GetBBNext()->bbNum)) { // Found a fall-through exit. lastExit = block; @@ -2502,9 +2503,9 @@ void Compiler::optFindNaturalLoops() LoopSearch search(this); - for (BasicBlock* head = fgFirstBB; head->bbNext != nullptr; head = head->bbNext) + for (BasicBlock* head = fgFirstBB; head->GetBBNext() != nullptr; head = head->GetBBNext()) { - BasicBlock* top = head->bbNext; + BasicBlock* top = head->GetBBNext(); // Blocks that are rarely run have a zero bbWeight and should never be optimized here. if (top->bbWeight == BB_ZERO_WEIGHT) @@ -2733,7 +2734,7 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R if (addPreds && blk->bbFallsThrough()) { - fgAddRefPred(blk->bbNext, blk); + fgAddRefPred(blk->GetBBNext(), blk); } BasicBlock* newJumpDest = nullptr; @@ -3031,7 +3032,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // BasicBlock* const t = optLoopTable[loopInd].lpTop; assert(siblingB->KindIs(BBJ_COND)); - assert(siblingB->bbNext == t); + assert(siblingB->GetBBNext() == t); JITDUMP(FMT_LP " head " FMT_BB " is also " FMT_LP " bottom\n", loopInd, h->bbNum, sibling); @@ -3205,7 +3206,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati // Because of this, introducing a block before t automatically gives us // the right flow out of h. // - assert(h->bbNext == t); + assert(h->GetBBNext() == t); assert(h->bbFallsThrough()); assert(h->KindIs(BBJ_NONE, BBJ_COND)); if (h->KindIs(BBJ_COND)) @@ -3329,8 +3330,8 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati } } - assert(h->bbNext == newT); - assert(newT->bbNext == t); + assert(h->GetBBNext() == newT); + assert(newT->GetBBNext() == t); // With the Option::Current we are changing which block is loop top. // Make suitable updates. @@ -3360,7 +3361,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati childLoop = optLoopTable[childLoop].lpSibling) { if ((optLoopTable[childLoop].lpEntry == origE) && (optLoopTable[childLoop].lpHead == h) && - newT->KindIs(BBJ_NONE) && (newT->bbNext == origE)) + newT->KindIs(BBJ_NONE) && (newT->GetBBNext() == origE)) { optUpdateLoopHead(childLoop, h, newT); @@ -3434,7 +3435,7 @@ BasicBlock* Compiler::optLoopEntry(BasicBlock* preHeader) if (preHeader->KindIs(BBJ_NONE)) { - return preHeader->bbNext; + return preHeader->GetBBNext(); } else { @@ -4347,7 +4348,7 @@ PhaseStatus Compiler::optUnrollLoops() BlockToBlockMap blockMap(getAllocator(CMK_LoopOpt)); BasicBlock* insertAfter = bottom; - BasicBlock* const tail = bottom->bbNext; + BasicBlock* const tail = bottom->GetBBNext(); BasicBlock::loopNumber newLoopNum = loop.lpParent; bool anyNestedLoopsUnrolledThisLoop = false; int lval; @@ -4358,7 +4359,7 @@ PhaseStatus Compiler::optUnrollLoops() // Note: we can't use the loop.LoopBlocks() iterator, as it captures loop.lpBottom->bbNext at the // beginning of iteration, and we insert blocks before that. So we need to evaluate lpBottom->bbNext // every iteration. - for (BasicBlock* block = loop.lpTop; block != loop.lpBottom->bbNext; block = block->bbNext) + for (BasicBlock* block = loop.lpTop; block != loop.lpBottom->GetBBNext(); block = block->GetBBNext()) { BasicBlock* newBlock = insertAfter = fgNewBBafter(block->GetBBJumpKind(), insertAfter, /*extendRegion*/ true); @@ -4370,8 +4371,7 @@ PhaseStatus Compiler::optUnrollLoops() // to clone a block in the loop, splice out and forget all the blocks we cloned so far: // put the loop blocks back to how they were before we started cloning blocks, // and abort unrolling the loop. - bottom->bbNext = tail; - tail->bbPrev = bottom; + bottom->SetBBNext(tail); loop.lpFlags |= LPFLG_DONT_UNROLL; // Mark it so we don't try to unroll it again. INDEBUG(++unrollFailures); JITDUMP("Failed to unroll loop " FMT_LP ": block cloning failed on " FMT_BB "\n", lnum, @@ -4422,7 +4422,7 @@ PhaseStatus Compiler::optUnrollLoops() // Now redirect any branches within the newly-cloned iteration. // Don't include `bottom` in the iteration, since we've already changed the // newBlock->bbJumpKind, above. - for (BasicBlock* block = loop.lpTop; block != loop.lpBottom; block = block->bbNext) + for (BasicBlock* block = loop.lpTop; block != loop.lpBottom; block = block->GetBBNext()) { BasicBlock* newBlock = blockMap[block]; optCopyBlkDest(block, newBlock); @@ -4434,7 +4434,7 @@ PhaseStatus Compiler::optUnrollLoops() // After doing this, all the newly cloned blocks now have proper flow and pred lists. // BasicBlock* const clonedTop = blockMap[loop.lpTop]; - fgAddRefPred(clonedTop, clonedTop->bbPrev); + fgAddRefPred(clonedTop, clonedTop->GetBBPrev()); /* update the new value for the unrolled iterator */ @@ -4478,7 +4478,7 @@ PhaseStatus Compiler::optUnrollLoops() // for (BasicBlock* succ : block->Succs(this)) { - if ((block == bottom) && (succ == bottom->bbNext)) + if ((block == bottom) && (succ == bottom->GetBBNext())) { continue; } @@ -4735,7 +4735,7 @@ bool Compiler::optReachWithoutCall(BasicBlock* topBB, BasicBlock* botBB) } } - curBB = curBB->bbNext; + curBB = curBB->GetBBNext(); } // If we didn't find any blocks that contained a gc safe point and @@ -4860,14 +4860,14 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // BasicBlock* const bTop = bTest->bbJumpDest; - if (bTop != block->bbNext) + if (bTop != block->GetBBNext()) { return false; } // Since bTest is a BBJ_COND it will have a bbNext // - BasicBlock* const bJoin = bTest->bbNext; + BasicBlock* const bJoin = bTest->GetBBNext(); noway_assert(bJoin != nullptr); // 'block' must be in the same try region as the condition, since we're going to insert a duplicated condition @@ -4879,7 +4879,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) return false; } - // The duplicated condition block will branch to bTest->bbNext, so that also better be in the + // The duplicated condition block will branch to bTest->GetBBNext(), so that also better be in the // same try region (or no try region) to avoid generating illegal flow. if (bJoin->hasTryIndex() && !BasicBlock::sameTryRegion(block, bJoin)) { @@ -5216,15 +5216,15 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) weight_t const testToAfterWeight = weightTop * testToAfterLikelihood; FlowEdge* const edgeTestToNext = fgGetPredForBlock(bTop, bTest); - FlowEdge* const edgeTestToAfter = fgGetPredForBlock(bTest->bbNext, bTest); + FlowEdge* const edgeTestToAfter = fgGetPredForBlock(bTest->GetBBNext(), bTest); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (iterate loop)\n", bTest->bbNum, bTop->bbNum, testToNextWeight); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (exit loop)\n", bTest->bbNum, - bTest->bbNext->bbNum, testToAfterWeight); + bTest->GetBBNext()->bbNum, testToAfterWeight); edgeTestToNext->setEdgeWeights(testToNextWeight, testToNextWeight, bTop); - edgeTestToAfter->setEdgeWeights(testToAfterWeight, testToAfterWeight, bTest->bbNext); + edgeTestToAfter->setEdgeWeights(testToAfterWeight, testToAfterWeight, bTest->GetBBNext()); // Adjust edges out of block, using the same distribution. // @@ -5236,15 +5236,15 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) weight_t const blockToNextWeight = weightBlock * blockToNextLikelihood; weight_t const blockToAfterWeight = weightBlock * blockToAfterLikelihood; - FlowEdge* const edgeBlockToNext = fgGetPredForBlock(bNewCond->bbNext, bNewCond); + FlowEdge* const edgeBlockToNext = fgGetPredForBlock(bNewCond->GetBBNext(), bNewCond); FlowEdge* const edgeBlockToAfter = fgGetPredForBlock(bNewCond->bbJumpDest, bNewCond); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (enter loop)\n", bNewCond->bbNum, - bNewCond->bbNext->bbNum, blockToNextWeight); + bNewCond->GetBBNext()->bbNum, blockToNextWeight); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (avoid loop)\n", bNewCond->bbNum, bNewCond->bbJumpDest->bbNum, blockToAfterWeight); - edgeBlockToNext->setEdgeWeights(blockToNextWeight, blockToNextWeight, bNewCond->bbNext); + edgeBlockToNext->setEdgeWeights(blockToNextWeight, blockToNextWeight, bNewCond->GetBBNext()); edgeBlockToAfter->setEdgeWeights(blockToAfterWeight, blockToAfterWeight, bNewCond->bbJumpDest); #ifdef DEBUG @@ -5253,7 +5253,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) if ((activePhaseChecks & PhaseChecks::CHECK_PROFILE) == PhaseChecks::CHECK_PROFILE) { const ProfileChecks checks = (ProfileChecks)JitConfig.JitProfileChecks(); - const bool nextProfileOk = fgDebugCheckIncomingProfileData(bNewCond->bbNext, checks); + const bool nextProfileOk = fgDebugCheckIncomingProfileData(bNewCond->GetBBNext(), checks); const bool jumpProfileOk = fgDebugCheckIncomingProfileData(bNewCond->bbJumpDest, checks); if (hasFlag(checks, ProfileChecks::RAISE_ASSERT)) @@ -5269,7 +5269,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) if (verbose) { printf("\nDuplicated loop exit block at " FMT_BB " for loop (" FMT_BB " - " FMT_BB ")\n", bNewCond->bbNum, - bNewCond->bbNext->bbNum, bTest->bbNum); + bNewCond->GetBBNext()->bbNum, bTest->bbNum); printf("Estimated code size expansion is %d\n", estDupCostSz); fgDumpBlock(bNewCond); @@ -6215,7 +6215,7 @@ bool Compiler::optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, break; } - beg = beg->bbNext; + beg = beg->GetBBNext(); } return false; @@ -6278,7 +6278,7 @@ bool Compiler::optIsVarAssgLoop(unsigned lnum, unsigned var) return true; } - return optIsVarAssigned(optLoopTable[lnum].lpHead->bbNext, optLoopTable[lnum].lpBottom, nullptr, var); + return optIsVarAssigned(optLoopTable[lnum].lpHead->GetBBNext(), optLoopTable[lnum].lpBottom, nullptr, var); } } @@ -7987,7 +7987,7 @@ bool Compiler::optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* loopVnInv // void Compiler::fgSetEHRegionForNewLoopHead(BasicBlock* newHead, BasicBlock* top) { - assert(newHead->bbNext == top); + assert(newHead->GetBBNext() == top); assert(!fgIsFirstBlockOfFilterOrHandler(top)); if ((top->bbFlags & BBF_TRY_BEG) != 0) @@ -8200,13 +8200,13 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) { // Allow for either the fall-through or branch to target 'entry'. BasicBlock* skipLoopBlock; - if (head->bbNext == entry) + if (head->GetBBNext() == entry) { skipLoopBlock = head->bbJumpDest; } else { - skipLoopBlock = head->bbNext; + skipLoopBlock = head->GetBBNext(); } assert(skipLoopBlock != entry); @@ -8302,7 +8302,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) case BBJ_NONE: // This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop, // meaning it must be fall-through to 'entry', and we must have a top-entry loop. - noway_assert((entry == top) && (predBlock == head) && (predBlock->bbNext == preHead)); + noway_assert((entry == top) && (predBlock == head) && (predBlock->GetBBNext() == preHead)); fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; @@ -8311,11 +8311,11 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) if (predBlock->bbJumpDest == entry) { predBlock->bbJumpDest = preHead; - noway_assert(predBlock->bbNext != preHead); + noway_assert(predBlock->GetBBNext() != preHead); } else { - noway_assert((entry == top) && (predBlock == head) && (predBlock->bbNext == preHead)); + noway_assert((entry == top) && (predBlock == head) && (predBlock->GetBBNext() == preHead)); } fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index 017509086d208a..d870740f2d367f 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -52,7 +52,7 @@ class PatchpointTransformer } int count = 0; - for (BasicBlock* const block : compiler->Blocks(compiler->fgFirstBB->bbNext)) + for (BasicBlock* const block : compiler->Blocks(compiler->fgFirstBB->GetBBNext())) { if (block->bbFlags & BBF_PATCHPOINT) { diff --git a/src/coreclr/jit/promotionliveness.cpp b/src/coreclr/jit/promotionliveness.cpp index 77078bddb4c297..c8e004c88f4b55 100644 --- a/src/coreclr/jit/promotionliveness.cpp +++ b/src/coreclr/jit/promotionliveness.cpp @@ -299,9 +299,9 @@ void PromotionLiveness::InterBlockLiveness() { changed = false; - for (BasicBlock* block = m_compiler->fgLastBB; block != nullptr; block = block->bbPrev) + for (BasicBlock* block = m_compiler->fgLastBB; block != nullptr; block = block->GetBBPrev()) { - m_hasPossibleBackEdge |= block->bbNext && (block->bbNext->bbNum <= block->bbNum); + m_hasPossibleBackEdge |= block->GetBBNext() && (block->GetBBNext()->bbNum <= block->bbNum); changed |= PerBlockLiveness(block); } diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp index faf0641451d4fd..38f632dffa1682 100644 --- a/src/coreclr/jit/rangecheck.cpp +++ b/src/coreclr/jit/rangecheck.cpp @@ -935,7 +935,7 @@ void RangeCheck::MergeAssertion(BasicBlock* block, GenTree* op, Range* pRange DE { GenTreePhiArg* arg = (GenTreePhiArg*)op; BasicBlock* pred = arg->gtPredBB; - if (pred->bbFallsThrough() && pred->bbNext == block) + if (pred->bbFallsThrough() && pred->GetBBNext() == block) { assertions = pred->bbAssertionOut; JITDUMP("Merge assertions from pred " FMT_BB " edge: ", pred->bbNum); diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index dfbd1863cb4b6d..579d2d5d66e622 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -48,7 +48,7 @@ PhaseStatus Compiler::optRedundantBranches() { bool madeChangesThisBlock = m_compiler->optRedundantRelop(block); - BasicBlock* const bbNext = block->bbNext; + BasicBlock* const bbNext = block->GetBBNext(); BasicBlock* const bbJump = block->bbJumpDest; madeChangesThisBlock |= m_compiler->optRedundantBranch(block); @@ -568,7 +568,7 @@ bool Compiler::optRedundantBranch(BasicBlock* const block) (rii.vnRelation == ValueNumStore::VN_RELATION_KIND::VRK_Swap); BasicBlock* const trueSuccessor = domBlock->bbJumpDest; - BasicBlock* const falseSuccessor = domBlock->bbNext; + BasicBlock* const falseSuccessor = domBlock->GetBBNext(); // If we can trace the flow from the dominating relop, we can infer its value. // @@ -613,7 +613,7 @@ bool Compiler::optRedundantBranch(BasicBlock* const block) // const bool relopIsFalse = rii.reverseSense ^ (domIsSameRelop | domIsInferredRelop); JITDUMP("Fall through successor " FMT_BB " of " FMT_BB " reaches, relop [%06u] must be %s\n", - domBlock->bbNext->bbNum, domBlock->bbNum, dspTreeID(tree), + domBlock->GetBBNext()->bbNum, domBlock->bbNum, dspTreeID(tree), relopIsFalse ? "false" : "true"); relopValue = relopIsFalse ? 0 : 1; break; @@ -711,7 +711,7 @@ struct JumpThreadInfo JumpThreadInfo(Compiler* comp, BasicBlock* block) : m_block(block) , m_trueTarget(block->bbJumpDest) - , m_falseTarget(block->bbNext) + , m_falseTarget(block->GetBBNext()) , m_fallThroughPred(nullptr) , m_ambiguousVNBlock(nullptr) , m_truePreds(BlockSetOps::MakeEmpty(comp)) @@ -1072,8 +1072,8 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl // latter should prove useful in subsequent work, where we aim to enable jump // threading in cases where block has side effects. // - BasicBlock* const domTrueSuccessor = domIsSameRelop ? domBlock->bbJumpDest : domBlock->bbNext; - BasicBlock* const domFalseSuccessor = domIsSameRelop ? domBlock->bbNext : domBlock->bbJumpDest; + BasicBlock* const domTrueSuccessor = domIsSameRelop ? domBlock->bbJumpDest : domBlock->GetBBNext(); + BasicBlock* const domFalseSuccessor = domIsSameRelop ? domBlock->GetBBNext() : domBlock->bbJumpDest; JumpThreadInfo jti(this, block); for (BasicBlock* const predBlock : block->PredBlocks()) @@ -1143,7 +1143,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl // Note if the true or false pred is the fall through pred. // - if (predBlock->bbNext == block) + if (predBlock->GetBBNext() == block) { JITDUMP(FMT_BB " is the fall-through pred\n", predBlock->bbNum); assert(jti.m_fallThroughPred == nullptr); @@ -1403,7 +1403,7 @@ bool Compiler::optJumpThreadPhi(BasicBlock* block, GenTree* tree, ValueNum treeN // Note if the true or false pred is the fall through pred. // - if (predBlock->bbNext == block) + if (predBlock->GetBBNext() == block) { JITDUMP(FMT_BB " is the fall-through pred\n", predBlock->bbNum); assert(jti.m_fallThroughPred == nullptr); diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp index 125c2cf2fbebee..956db8444ff8e9 100644 --- a/src/coreclr/jit/switchrecognition.cpp +++ b/src/coreclr/jit/switchrecognition.cpp @@ -26,7 +26,7 @@ PhaseStatus Compiler::optSwitchRecognition() // a series of ccmp instruction (see ifConvert phase). #ifdef TARGET_XARCH bool modified = false; - for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) + for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->GetBBNext()) { // block->KindIs(BBJ_COND) check is for better throughput. if (block->KindIs(BBJ_COND) && !block->isRunRarely() && optSwitchDetectAndConvert(block)) @@ -95,10 +95,10 @@ bool IsConstantTestCondBlock(const BasicBlock* block, } *isReversed = rootNode->gtGetOp1()->OperIs(GT_NE); - *blockIfTrue = *isReversed ? block->bbNext : block->bbJumpDest; - *blockIfFalse = *isReversed ? block->bbJumpDest : block->bbNext; + *blockIfTrue = *isReversed ? block->GetBBNext() : block->bbJumpDest; + *blockIfFalse = *isReversed ? block->bbJumpDest : block->GetBBNext(); - if ((block->bbNext == block->bbJumpDest) || (block->bbJumpDest == block)) + if ((block->GetBBNext() == block->bbJumpDest) || (block->bbJumpDest == block)) { // Ignoring weird cases like a condition jumping to itself return false; @@ -166,7 +166,7 @@ bool Compiler::optSwitchDetectAndConvert(BasicBlock* firstBlock) const BasicBlock* prevBlock = firstBlock; // Now walk the next blocks and see if they are basically the same type of test - for (const BasicBlock* currBb = firstBlock->bbNext; currBb != nullptr; currBb = currBb->bbNext) + for (const BasicBlock* currBb = firstBlock->GetBBNext(); currBb != nullptr; currBb = currBb->GetBBNext()) { GenTree* currVariableNode = nullptr; ssize_t currCns = 0; @@ -309,7 +309,7 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* const BasicBlock* lastBlock = firstBlock; for (int i = 0; i < testsCount - 1; i++) { - lastBlock = lastBlock->bbNext; + lastBlock = lastBlock->GetBBNext(); } BasicBlock* blockIfTrue = nullptr; @@ -338,11 +338,11 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* gtUpdateStmtSideEffects(firstBlock->lastStmt()); // Unlink and remove the whole chain of conditional blocks - BasicBlock* blockToRemove = firstBlock->bbNext; + BasicBlock* blockToRemove = firstBlock->GetBBNext(); fgRemoveRefPred(blockToRemove, firstBlock); - while (blockToRemove != lastBlock->bbNext) + while (blockToRemove != lastBlock->GetBBNext()) { - BasicBlock* nextBlock = blockToRemove->bbNext; + BasicBlock* nextBlock = blockToRemove->GetBBNext(); fgRemoveBlock(blockToRemove, true); blockToRemove = nextBlock; } @@ -351,12 +351,12 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* assert((jumpCount > 0) && (jumpCount <= SWITCH_MAX_DISTANCE + 1)); const auto jmpTab = new (this, CMK_BasicBlock) BasicBlock*[jumpCount + 1 /*default case*/]; + fgHasSwitch = true; firstBlock->bbJumpSwt = new (this, CMK_BasicBlock) BBswtDesc; firstBlock->bbJumpSwt->bbsCount = jumpCount + 1; firstBlock->bbJumpSwt->bbsHasDefault = true; firstBlock->bbJumpSwt->bbsDstTab = jmpTab; - firstBlock->bbNext = isReversed ? blockIfTrue : blockIfFalse; - fgHasSwitch = true; + firstBlock->SetBBNext(isReversed ? blockIfTrue : blockIfFalse); // Splitting doesn't work well with jump-tables currently opts.compProcedureSplitting = false; diff --git a/src/coreclr/jit/unwind.cpp b/src/coreclr/jit/unwind.cpp index 27047d50a19bee..db8e66ed9cf893 100644 --- a/src/coreclr/jit/unwind.cpp +++ b/src/coreclr/jit/unwind.cpp @@ -127,9 +127,9 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, { assert(func->funKind == FUNC_HANDLER); *ppStartLoc = new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndBeg)); - *ppEndLoc = (HBtab->ebdHndLast->bbNext == nullptr) + *ppEndLoc = (HBtab->ebdHndLast->GetBBNext() == nullptr) ? nullptr - : new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndLast->bbNext)); + : new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndLast->GetBBNext())); } } } From 254c3621db2e9d10e15befbc6b0cf7690a5d941e Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Thu, 5 Oct 2023 12:10:59 -0400 Subject: [PATCH 07/14] Add IsFirst() and IsLast() --- src/coreclr/jit/block.cpp | 2 +- src/coreclr/jit/block.h | 14 ++++++++++++-- src/coreclr/jit/codegenarm.cpp | 2 +- src/coreclr/jit/codegencommon.cpp | 8 ++++---- src/coreclr/jit/codegenlinear.cpp | 18 +++++++++--------- src/coreclr/jit/compiler.cpp | 2 +- src/coreclr/jit/fgbasic.cpp | 12 ++++++------ src/coreclr/jit/fgdiagnostic.cpp | 10 +++++----- src/coreclr/jit/fgopt.cpp | 6 +++--- src/coreclr/jit/flowgraph.cpp | 10 +++++----- src/coreclr/jit/jiteh.cpp | 2 +- src/coreclr/jit/liveness.cpp | 6 +++--- src/coreclr/jit/lsra.cpp | 2 +- src/coreclr/jit/optimizebools.cpp | 2 +- src/coreclr/jit/optimizer.cpp | 8 ++++---- src/coreclr/jit/unwind.cpp | 2 +- 16 files changed, 58 insertions(+), 48 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 2afe829d20fb9f..78df6e9b961bc8 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -1509,7 +1509,7 @@ bool BasicBlock::isBBCallAlwaysPair() const assert(!(this->bbFlags & BBF_RETLESS_CALL)); #endif // Some asserts that the next block is a BBJ_ALWAYS of the proper form. - assert(this->GetBBNext() != nullptr); + assert(!this->IsLast()); assert(this->GetBBNext()->KindIs(BBJ_ALWAYS)); assert(this->GetBBNext()->bbFlags & BBF_KEEP_BBJ_ALWAYS); assert(this->GetBBNext()->isEmpty()); diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 9d44238b309106..0812aeb56e2c53 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -559,6 +559,16 @@ struct BasicBlock : private LIR::Range } } + bool IsFirst() const + { + return (bbPrev == nullptr); + } + + bool IsLast() const + { + return (bbNext == nullptr); + } + /* The following union describes the jump target(s) of this block */ union { unsigned bbJumpOffs; // PC offset (temporary only) @@ -1454,8 +1464,8 @@ class BasicBlockIterator { assert(m_block != nullptr); // Check that we haven't been spliced out of the list. - assert((m_block->GetBBNext() == nullptr) || (m_block->GetBBNext()->GetBBPrev() == m_block)); - assert((m_block->GetBBPrev() == nullptr) || (m_block->GetBBPrev()->GetBBNext() == m_block)); + assert((m_block->IsLast()) || (m_block->GetBBNext()->GetBBPrev() == m_block)); + assert((m_block->IsFirst()) || (m_block->GetBBPrev()->GetBBNext() == m_block)); m_block = m_block->GetBBNext(); return *this; diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index cd05af9a6f04ab..01459878ab1e45 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -123,7 +123,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // we would have otherwise created retless calls. assert(block->isBBCallAlwaysPair()); - assert(block->GetBBNext() != NULL); + assert(!block->IsLast()); assert(block->GetBBNext()->KindIs(BBJ_ALWAYS)); assert(block->GetBBNext()->bbJumpDest != NULL); assert(block->GetBBNext()->bbJumpDest->bbFlags & BBF_FINALLY_TARGET); diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index d631263e4a71e8..f245509c535748 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -446,13 +446,13 @@ void CodeGen::genMarkLabelsForCodegen() JITDUMP(" " FMT_BB " : try begin\n", HBtab->ebdTryBeg->bbNum); JITDUMP(" " FMT_BB " : hnd begin\n", HBtab->ebdHndBeg->bbNum); - if (HBtab->ebdTryLast->GetBBNext() != nullptr) + if (!HBtab->ebdTryLast->IsLast()) { HBtab->ebdTryLast->GetBBNext()->bbFlags |= BBF_HAS_LABEL; JITDUMP(" " FMT_BB " : try end\n", HBtab->ebdTryLast->GetBBNext()->bbNum); } - if (HBtab->ebdHndLast->GetBBNext() != nullptr) + if (!HBtab->ebdHndLast->IsLast()) { HBtab->ebdHndLast->GetBBNext()->bbFlags |= BBF_HAS_LABEL; JITDUMP(" " FMT_BB " : hnd end\n", HBtab->ebdHndLast->GetBBNext()->bbNum); @@ -5210,7 +5210,7 @@ void CodeGen::genReserveEpilog(BasicBlock* block) assert(block != nullptr); const VARSET_TP& gcrefVarsArg(GetEmitter()->emitThisGCrefVars); - bool last = (block->GetBBNext() == nullptr); + bool last = (block->IsLast()); GetEmitter()->emitCreatePlaceholderIG(IGPT_EPILOG, block, gcrefVarsArg, gcrefRegsArg, byrefRegsArg, last); } @@ -5257,7 +5257,7 @@ void CodeGen::genReserveFuncletEpilog(BasicBlock* block) JITDUMP("Reserving funclet epilog IG for block " FMT_BB "\n", block->bbNum); - bool last = (block->GetBBNext() == nullptr); + bool last = (block->IsLast()); GetEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_EPILOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur, last); } diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index c2b8258c4b74b4..5615e9941c1db8 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -330,7 +330,7 @@ void CodeGen::genCodeForBBlist() // // Note: We need to have set compCurBB before calling emitAddLabel // - if ((block->GetBBPrev() != nullptr) && block->GetBBPrev()->KindIs(BBJ_COND) && + if (!block->IsFirst() && block->GetBBPrev()->KindIs(BBJ_COND) && (block->bbWeight != block->GetBBPrev()->bbWeight)) { JITDUMP("Adding label due to BB weight difference: BBJ_COND " FMT_BB " with weight " FMT_WT @@ -519,7 +519,7 @@ void CodeGen::genCodeForBBlist() #endif // DEBUG #if defined(DEBUG) - if (block->GetBBNext() == nullptr) + if (block->IsLast()) { // Unit testing of the emitter: generate a bunch of instructions into the last block // (it's as good as any, but better than the prologue, which can only be a single instruction @@ -547,10 +547,10 @@ void CodeGen::genCodeForBBlist() /* Is this the last block, and are there any open scopes left ? */ - bool isLastBlockProcessed = (block->GetBBNext() == nullptr); + bool isLastBlockProcessed = (block->IsLast()); if (block->isBBCallAlwaysPair()) { - isLastBlockProcessed = (block->GetBBNext()->GetBBNext() == nullptr); + isLastBlockProcessed = (block->GetBBNext()->IsLast()); } if (compiler->opts.compDbgInfo && isLastBlockProcessed) @@ -615,7 +615,7 @@ void CodeGen::genCodeForBBlist() // Note: we may be generating a few too many NOPs for the case of call preceding an epilog. Technically, // if the next block is a BBJ_RETURN, an epilog will be generated, but there may be some instructions // generated before the OS epilog starts, such as a GS cookie check. - if ((block->GetBBNext() == nullptr) || !BasicBlock::sameEHRegion(block, block->GetBBNext())) + if ((block->IsLast()) || !BasicBlock::sameEHRegion(block, block->GetBBNext())) { // We only need the NOP if we're not going to generate any more code as part of the block end. @@ -636,7 +636,7 @@ void CodeGen::genCodeForBBlist() break; case BBJ_NONE: - if (block->GetBBNext() == nullptr) + if (block->IsLast()) { // Call immediately before the end of the code; we should never get here . instGen(INS_BREAKPOINT); // This should never get executed @@ -679,7 +679,7 @@ void CodeGen::genCodeForBBlist() // 2. If this is this is the last block of the hot section. // 3. If the subsequent block is a special throw block. // 4. On AMD64, if the next block is in a different EH region. - if ((block->GetBBNext() == nullptr) || (block->GetBBNext()->bbFlags & BBF_FUNCLET_BEG) || + if ((block->IsLast()) || (block->GetBBNext()->bbFlags & BBF_FUNCLET_BEG) || !BasicBlock::sameEHRegion(block, block->GetBBNext()) || (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->GetBBNext())) || block->GetBBNext() == compiler->fgFirstColdBlock) @@ -783,7 +783,7 @@ void CodeGen::genCodeForBBlist() { GetEmitter()->emitSetLoopBackEdge(block->bbJumpDest); - if (block->GetBBNext() != nullptr) + if (!block->IsLast()) { JITDUMP("Mark " FMT_BB " as label: alignment end-of-loop\n", block->GetBBNext()->bbNum); block->GetBBNext()->bbFlags |= BBF_HAS_LABEL; @@ -818,7 +818,7 @@ void CodeGen::genCodeForBBlist() GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->KindIs(BBJ_ALWAYS))); } - if ((block->GetBBNext() != nullptr) && (block->GetBBNext()->isLoopAlign())) + if (!block->IsLast() && (block->GetBBNext()->isLoopAlign())) { if (compiler->opts.compJitHideAlignBehindJmp) { diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 8dd31ea5576636..68f2a6f016c845 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5291,7 +5291,7 @@ PhaseStatus Compiler::placeLoopAlignInstructions() } } - if ((block->GetBBNext() != nullptr) && (block->GetBBNext()->isLoopAlign())) + if (!block->IsLast() && (block->GetBBNext()->isLoopAlign())) { // Loop alignment is disabled for cold blocks assert((block->bbFlags & BBF_COLD) == 0); diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 920dc3fed37067..05223cbeb8704b 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -4980,7 +4980,7 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) if (fgLastBB == bEnd) { fgLastBB = bPrev; - noway_assert(fgLastBB->GetBBNext() == nullptr); + noway_assert(fgLastBB->IsLast()); } // If bEnd was the first Cold basic block update fgFirstColdBlock @@ -5514,7 +5514,7 @@ bool Compiler::fgRenumberBlocks() block->bbNum = num; } - if (block->GetBBNext() == nullptr) + if (block->IsLast()) { fgLastBB = block; fgBBcount = num; @@ -5637,7 +5637,7 @@ void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBloc { printf("Relocated block%s [" FMT_BB ".." FMT_BB "] inserted after " FMT_BB "%s\n", (bStart == bEnd) ? "" : "s", bStart->bbNum, bEnd->bbNum, insertAfterBlk->bbNum, - (insertAfterBlk->GetBBNext() == nullptr) ? " at the end of method" : ""); + (insertAfterBlk->IsLast()) ? " at the end of method" : ""); } #endif // DEBUG @@ -5650,7 +5650,7 @@ void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBloc if (insertAfterBlk == fgLastBB) { fgLastBB = bEnd; - noway_assert(fgLastBB->GetBBNext() == nullptr); + noway_assert(fgLastBB->IsLast()); } } @@ -5723,7 +5723,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r #if !defined(FEATURE_EH_FUNCLETS) // In the funclets case, we still need to set some information on the handler blocks - if (bLast->GetBBNext() == NULL) + if (bLast->IsLast()) { INDEBUG(reason = "region is already at the end of the method";) goto FAILURE; @@ -6210,7 +6210,7 @@ void Compiler::fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk) if (fgLastBB == insertAfterBlk) { fgLastBB = newBlk; - assert(fgLastBB->GetBBNext() == nullptr); + assert(fgLastBB->IsLast()); } } diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index 16378e567ce488..857b17b6049b70 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -168,7 +168,7 @@ void Compiler::fgDebugCheckUpdate() // We are allowed to have a branch from a hot 'block' to a cold 'bbNext' // - if ((block->GetBBNext() != nullptr) && fgInDifferentRegions(block, block->GetBBNext())) + if (!block->IsLast() && fgInDifferentRegions(block, block->GetBBNext())) { doAssertOnJumpToNextBlock = false; } @@ -1214,7 +1214,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { // Invisible edge for bbNext chain // - if (bSource->GetBBNext() != nullptr) + if (!bSource->IsLast()) { fprintf(fgxFile, " " FMT_BB " -> " FMT_BB " [style=\"invis\", weight=25];\n", bSource->bbNum, bSource->GetBBNext()->bbNum); @@ -2790,7 +2790,7 @@ void Compiler::fgDebugCheckBBNumIncreasing() { for (BasicBlock* const block : Blocks()) { - assert(block->GetBBNext() == nullptr || (block->bbNum < block->GetBBNext()->bbNum)); + assert(block->IsLast() || (block->bbNum < block->GetBBNext()->bbNum)); } } @@ -2866,7 +2866,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef if (checkBBNum) { // Check that bbNum is sequential - assert(block->GetBBNext() == nullptr || (block->bbNum + 1 == block->GetBBNext()->bbNum)); + assert(block->IsLast() || (block->bbNum + 1 == block->GetBBNext()->bbNum)); } // If the block is a BBJ_COND, a BBJ_SWITCH or a @@ -3704,7 +3704,7 @@ void Compiler::fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees) // ensure that bbNext and bbPrev are consistent void Compiler::fgDebugCheckBlockLinks() { - assert(fgFirstBB->GetBBPrev() == nullptr); + assert(fgFirstBB->IsFirst()); for (BasicBlock* const block : Blocks()) { diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 7b788764275981..0fd3738f9192dd 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -2569,7 +2569,7 @@ void Compiler::fgUnreachableBlock(BasicBlock* block) } #endif // DEBUG - noway_assert(block->GetBBPrev() != nullptr); // Can't use this function to remove the first block + noway_assert(!block->IsFirst()); // Can't use this function to remove the first block #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) assert(!block->isBBCallAlwaysPairTail()); // can't remove the BBJ_ALWAYS of a BBJ_CALLFINALLY / BBJ_ALWAYS pair @@ -4832,7 +4832,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) #endif // FEATURE_EH_FUNCLETS // We can't relocate anything if we only have one block - if (fgFirstBB->GetBBNext() == nullptr) + if (fgFirstBB->IsLast()) { return false; } @@ -5666,7 +5666,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // or if bEnd->bbNext is in a different try region // then we cannot move the blocks // - if ((bEnd->GetBBNext() == nullptr) || !BasicBlock::sameTryRegion(startBlk, bEnd->GetBBNext())) + if ((bEnd->IsLast()) || !BasicBlock::sameTryRegion(startBlk, bEnd->GetBBNext())) { goto CANNOT_MOVE; } diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 574bd8800d0214..00166b1f72984a 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -1578,7 +1578,7 @@ void Compiler::fgAddSyncMethodEnterExit() BasicBlock* faultBB = fgNewBBafter(BBJ_EHFAULTRET, tryLastBB, false); assert(tryLastBB->GetBBNext() == faultBB); - assert(faultBB->GetBBNext() == nullptr); + assert(faultBB->IsLast()); assert(faultBB == fgLastBB); faultBB->bbRefs = 1; @@ -2154,7 +2154,7 @@ class MergedReturns BasicBlock* newReturnBB = comp->fgNewBBinRegion(BBJ_RETURN); comp->fgReturnCount++; - noway_assert(newReturnBB->GetBBNext() == nullptr); + noway_assert(newReturnBB->IsLast()); JITDUMP("\n newReturnBB [" FMT_BB "] created\n", newReturnBB->bbNum); @@ -3009,7 +3009,7 @@ BasicBlock* Compiler::fgLastBBInMainFunction() #endif // FEATURE_EH_FUNCLETS - assert(fgLastBB->GetBBNext() == nullptr); + assert(fgLastBB->IsLast()); return fgLastBB; } @@ -3078,7 +3078,7 @@ BasicBlock* Compiler::fgEndBBAfterMainFunction() #endif // FEATURE_EH_FUNCLETS - assert(fgLastBB->GetBBNext() == nullptr); + assert(fgLastBB->IsLast()); return nullptr; } @@ -3486,7 +3486,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // Cold section is 5 bytes in size. // Ignore if stress-splitting. // - if (!forceSplit && firstColdBlock->GetBBNext() == nullptr) + if (!forceSplit && firstColdBlock->IsLast()) { // If the size of the cold block is 7 or less // then we will keep it in the Hot section. diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 7e8664ee7ac595..984a3ad08aafbf 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -4349,7 +4349,7 @@ bool Compiler::fgRelocateEHRegions() */ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) { - assert(block->GetBBPrev() != nullptr); + assert(!block->IsFirst()); BasicBlock* bPrev = block->GetBBPrev(); diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 787312f519e593..073323ecb737e7 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -889,7 +889,7 @@ void Compiler::fgExtendDbgLifetimes() switch (block->GetBBJumpKind()) { case BBJ_NONE: - PREFIX_ASSUME(block->GetBBNext() != nullptr); + PREFIX_ASSUME(!block->IsLast()); VarSetOps::UnionD(this, initVars, block->GetBBNext()->bbScope); break; @@ -903,14 +903,14 @@ void Compiler::fgExtendDbgLifetimes() if (!(block->bbFlags & BBF_RETLESS_CALL)) { assert(block->isBBCallAlwaysPair()); - PREFIX_ASSUME(block->GetBBNext() != nullptr); + PREFIX_ASSUME(!block->IsLast()); VarSetOps::UnionD(this, initVars, block->GetBBNext()->bbScope); } VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope); break; case BBJ_COND: - PREFIX_ASSUME(block->GetBBNext() != nullptr); + PREFIX_ASSUME(!block->IsLast()); VarSetOps::UnionD(this, initVars, block->GetBBNext()->bbScope); VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope); break; diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 820cd08a00c25e..f51ffeec2e7819 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -1484,7 +1484,7 @@ void LinearScan::recordVarLocationsAtStartOfBB(BasicBlock* bb) count++; BasicBlock* prevReportedBlock = bb->GetBBPrev(); - if (bb->GetBBPrev() != nullptr && bb->GetBBPrev()->isBBCallAlwaysPairTail()) + if (!bb->IsFirst() && bb->GetBBPrev()->isBBCallAlwaysPairTail()) { // For callf+always pair we generate the code for the always // block in genCallFinally and skip it, so we don't report diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 28319dccbad4ae..76cf3ab3c89a3d 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -891,7 +891,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() assert(m_b2->KindIs(BBJ_COND)); assert(m_b1->bbJumpDest == m_b2->bbJumpDest); assert(m_b1->GetBBNext() == m_b2); - assert(m_b2->GetBBNext() != nullptr); + assert(!m_b2->IsLast()); } if (!optReturnBlock) diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 91433579efd5e6..1bf9b8f393afbf 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -743,7 +743,7 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT { if (predBlock->KindIs(BBJ_NONE) && (predBlock->GetBBNext() == optLoopTable[loopInd].lpEntry) && (predBlock->countOfInEdges() == 1) && (predBlock->firstStmt() == nullptr) && - (predBlock->GetBBPrev() != nullptr) && predBlock->GetBBPrev()->bbFallsThrough()) + !predBlock->IsFirst() && predBlock->GetBBPrev()->bbFallsThrough()) { initBlockOk = true; } @@ -1151,7 +1151,7 @@ bool Compiler::optExtractInitTestIncr( // the first time, which might be empty if no hoisting has yet occurred. In this case, look a // little harder for the possible loop initialization statement. if (initBlock->KindIs(BBJ_NONE) && (initBlock->GetBBNext() == top) && (initBlock->countOfInEdges() == 1) && - (initBlock->GetBBPrev() != nullptr) && initBlock->GetBBPrev()->bbFallsThrough()) + !initBlock->IsFirst() && initBlock->GetBBPrev()->bbFallsThrough()) { initBlock = initBlock->GetBBPrev(); phdrStmt = initBlock->firstStmt(); @@ -1924,7 +1924,7 @@ class LoopSearch isFirstVisit = true; } - if (isFirstVisit && (predBlock->GetBBNext() != nullptr) && + if (isFirstVisit && !predBlock->IsLast() && (PositionNum(predBlock->GetBBNext()) == predBlock->bbNum)) { // We've created a new block immediately after `predBlock` to @@ -2503,7 +2503,7 @@ void Compiler::optFindNaturalLoops() LoopSearch search(this); - for (BasicBlock* head = fgFirstBB; head->GetBBNext() != nullptr; head = head->GetBBNext()) + for (BasicBlock* head = fgFirstBB; !head->IsLast(); head = head->GetBBNext()) { BasicBlock* top = head->GetBBNext(); diff --git a/src/coreclr/jit/unwind.cpp b/src/coreclr/jit/unwind.cpp index db8e66ed9cf893..7348fa2ee53f6e 100644 --- a/src/coreclr/jit/unwind.cpp +++ b/src/coreclr/jit/unwind.cpp @@ -127,7 +127,7 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, { assert(func->funKind == FUNC_HANDLER); *ppStartLoc = new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndBeg)); - *ppEndLoc = (HBtab->ebdHndLast->GetBBNext() == nullptr) + *ppEndLoc = HBtab->ebdHndLast->IsLast() ? nullptr : new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndLast->GetBBNext())); } From f6dfb21ec67755b08b6e11889360175ef152af2c Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Thu, 5 Oct 2023 12:25:58 -0400 Subject: [PATCH 08/14] Add BasicBlock::IsLastHotBlock() --- src/coreclr/jit/block.cpp | 15 +++++++++++++++ src/coreclr/jit/block.h | 8 +++++--- src/coreclr/jit/codegenlinear.cpp | 2 +- src/coreclr/jit/fgbasic.cpp | 2 +- src/coreclr/jit/fgopt.cpp | 2 +- 5 files changed, 23 insertions(+), 6 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 78df6e9b961bc8..42aa90ffef4c94 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -186,6 +186,21 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk) return res; } +//------------------------------------------------------------------------ +// IsLastHotBlock: see if this is the last block before the cold section +// +// Arguments: +// compiler - current compiler instance +// +// Returns: +// true if the next block is fgFirstColdBlock +// (if fgFirstColdBlock is null, this call is equivalent to IsLast()) +// +bool BasicBlock::IsLastHotBlock(Compiler* compiler) const +{ + return (bbNext == compiler->fgFirstColdBlock); +} + //------------------------------------------------------------------------ // checkPredListOrder: see if pred list is properly ordered // diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 0812aeb56e2c53..822530bce8a16b 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -520,13 +520,13 @@ struct BasicBlock : private LIR::Range return bbJumpKind; } - void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* comp)) + void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* compiler)) { #ifdef DEBUG // BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout - // TODO: Change assert to check if comp is in appropriate optimization phase to use BBJ_NONE + // TODO: Change assert to check if compiler is in appropriate optimization phase to use BBJ_NONE // (right now, this assertion does the null check to avoid unused variable warnings) - assert((kind != BBJ_NONE) || (comp != nullptr)); + assert((kind != BBJ_NONE) || (compiler != nullptr)); #endif // DEBUG bbJumpKind = kind; } @@ -569,6 +569,8 @@ struct BasicBlock : private LIR::Range return (bbNext == nullptr); } + bool IsLastHotBlock(Compiler* compiler) const; + /* The following union describes the jump target(s) of this block */ union { unsigned bbJumpOffs; // PC offset (temporary only) diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 5615e9941c1db8..e94da2bdfd2304 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -682,7 +682,7 @@ void CodeGen::genCodeForBBlist() if ((block->IsLast()) || (block->GetBBNext()->bbFlags & BBF_FUNCLET_BEG) || !BasicBlock::sameEHRegion(block, block->GetBBNext()) || (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->GetBBNext())) || - block->GetBBNext() == compiler->fgFirstColdBlock) + block->IsLastHotBlock(compiler)) { instGen(INS_BREAKPOINT); // This should never get executed } diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 05223cbeb8704b..3c3739f845a126 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -5061,7 +5061,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } else if (bPrev->KindIs(BBJ_ALWAYS) && bPrev->bbJumpDest == block->GetBBNext() && !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && - (block->GetBBNext() != fgFirstColdBlock)) + !block->IsLastHotBlock(this)) { // previous block is a BBJ_ALWAYS to the next block: change to BBJ_NONE. // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 0fd3738f9192dd..81ff5938be9434 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -2936,7 +2936,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) } // can't allow fall through into cold code - if (block->GetBBNext() == fgFirstColdBlock) + if (block->IsLastHotBlock(this)) { break; } From 52370b01e652f254a3c25a25c2ff159e08615bd5 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Thu, 5 Oct 2023 13:30:45 -0400 Subject: [PATCH 09/14] Add NextIs() and PrevIs() --- src/coreclr/jit/assertionprop.cpp | 2 +- src/coreclr/jit/block.h | 16 ++++-- src/coreclr/jit/codegenarm64.cpp | 2 +- src/coreclr/jit/codegenloongarch64.cpp | 2 +- src/coreclr/jit/codegenxarch.cpp | 2 +- src/coreclr/jit/compiler.h | 2 +- src/coreclr/jit/fgbasic.cpp | 66 ++++++++++++------------- src/coreclr/jit/fgdiagnostic.cpp | 30 +++++------ src/coreclr/jit/fgehopt.cpp | 12 ++--- src/coreclr/jit/fginline.cpp | 12 ++--- src/coreclr/jit/fgopt.cpp | 40 +++++++-------- src/coreclr/jit/fgprofile.cpp | 4 +- src/coreclr/jit/flowgraph.cpp | 6 +-- src/coreclr/jit/importer.cpp | 8 +-- src/coreclr/jit/jiteh.cpp | 4 +- src/coreclr/jit/liveness.cpp | 2 +- src/coreclr/jit/loopcloning.cpp | 4 +- src/coreclr/jit/lower.cpp | 14 +++--- src/coreclr/jit/lsra.cpp | 2 +- src/coreclr/jit/morph.cpp | 2 +- src/coreclr/jit/optimizebools.cpp | 14 +++--- src/coreclr/jit/optimizer.cpp | 34 ++++++------- src/coreclr/jit/promotionliveness.cpp | 2 +- src/coreclr/jit/rangecheck.cpp | 2 +- src/coreclr/jit/redundantbranchopts.cpp | 4 +- src/coreclr/jit/switchrecognition.cpp | 4 +- 26 files changed, 151 insertions(+), 141 deletions(-) diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index e124236a3c189f..755724e74f3ce7 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -5268,7 +5268,7 @@ class AssertionPropFlowCallback { // Scenario where next block and conditional block, both point to the same block. // In such case, intersect the assertions present on both the out edges of predBlock. - assert(predBlock->GetBBNext() == block); + assert(predBlock->NextIs(block)); BitVecOps::IntersectionD(apTraits, pAssertionOut, predBlock->bbAssertionOut); if (VerboseDataflow()) diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 822530bce8a16b..875327d349c2ba 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -569,6 +569,16 @@ struct BasicBlock : private LIR::Range return (bbNext == nullptr); } + bool PrevIs(BasicBlock* block) const + { + return (bbPrev == block); + } + + bool NextIs(BasicBlock* block) const + { + return (bbNext == block); + } + bool IsLastHotBlock(Compiler* compiler) const; /* The following union describes the jump target(s) of this block */ @@ -1466,8 +1476,8 @@ class BasicBlockIterator { assert(m_block != nullptr); // Check that we haven't been spliced out of the list. - assert((m_block->IsLast()) || (m_block->GetBBNext()->GetBBPrev() == m_block)); - assert((m_block->IsFirst()) || (m_block->GetBBPrev()->GetBBNext() == m_block)); + assert((m_block->IsLast()) || m_block->GetBBNext()->PrevIs(m_block)); + assert((m_block->IsFirst()) || m_block->GetBBPrev()->NextIs(m_block)); m_block = m_block->GetBBNext(); return *this; @@ -1638,7 +1648,7 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) // If both fall-through and branch successors are identical, then only include // them once in the iteration (this is the same behavior as NumSucc()/GetSucc()). - if (block->bbJumpDest == block->GetBBNext()) + if (block->NextIs(block->bbJumpDest)) { m_end = &m_succs[1]; } diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 4b12859a9316db..e014a108d571a1 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -2184,7 +2184,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) BasicBlock* const jumpDest = nextBlock->bbJumpDest; // Now go to where the finally funclet needs to return to. - if ((jumpDest == nextBlock->GetBBNext()) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) + if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) { // Fall-through. // TODO-ARM64-CQ: Can we get rid of this instruction, and just have the call return directly diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index 48ad9679720ba4..2eacd544971fa5 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1544,7 +1544,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) BasicBlock* const jumpDest = nextBlock->bbJumpDest; // Now go to where the finally funclet needs to return to. - if ((jumpDest == nextBlock->GetBBNext()) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) + if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) { // Fall-through. // TODO-LOONGARCH64-CQ: Can we get rid of this instruction, and just have the call return directly diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 78580fe5e28c13..ae57da88dd86ad 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -256,7 +256,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) BasicBlock* const jumpDest = nextBlock->bbJumpDest; // Now go to where the finally funclet needs to return to. - if ((jumpDest == nextBlock->GetBBNext()) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) + if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) { // Fall-through. // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index e94b76b0a1f7fe..76081ebf8b09c2 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -6482,7 +6482,7 @@ class Compiler // Returns "true" iff this is a "top entry" loop. bool lpIsTopEntry() const { - if (lpHead->GetBBNext() == lpEntry) + if (lpHead->NextIs(lpEntry)) { assert(lpHead->bbFallsThrough()); assert(lpTop == lpEntry); diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 3c3739f845a126..8214b4552a9ef9 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -2793,7 +2793,7 @@ void Compiler::fgLinkBasicBlocks() break; } - if (!curBBdesc->GetBBNext()) + if (curBBdesc->IsLast()) { BADCODE("Fall thru the end of a method"); } @@ -2839,7 +2839,7 @@ void Compiler::fgLinkBasicBlocks() /* Default case of CEE_SWITCH (next block), is at end of jumpTab[] */ - noway_assert(*(jumpPtr - 1) == curBBdesc->GetBBNext()); + noway_assert(curBBdesc->NextIs(*(jumpPtr - 1))); break; } @@ -3685,7 +3685,7 @@ void Compiler::fgFindBasicBlocks() } } - if (!block->GetBBNext() || block->GetBBNext() != hndBegBB) + if (block->IsLast() || !block->NextIs(hndBegBB)) { BADCODE3("Filter does not immediately precede handler for filter", " at offset %04X", filtBB->bbCodeOffs); @@ -4855,7 +4855,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) assert(fgGetPredForBlock(succ, curr) != nullptr); BasicBlock* newBlock; - if (succ == curr->GetBBNext()) + if (curr->NextIs(succ)) { // The successor is the fall-through path of a BBJ_COND, or // an immediately following block of a BBJ_SWITCH (which has @@ -4928,15 +4928,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) void Compiler::fgUnlinkBlock(BasicBlock* block) { - if (block->GetBBPrev()) - { - block->GetBBPrev()->SetBBNext(block->GetBBNext()); - if (block == fgLastBB) - { - fgLastBB = block->GetBBPrev(); - } - } - else + if (block->IsFirst()) { assert(block == fgFirstBB); assert(block != fgLastBB); @@ -4957,6 +4949,14 @@ void Compiler::fgUnlinkBlock(BasicBlock* block) fgFirstBBScratch = nullptr; } } + else + { + block->GetBBPrev()->SetBBNext(block->GetBBNext()); + if (block == fgLastBB) + { + fgLastBB = block->GetBBPrev(); + } + } } /***************************************************************************************************** @@ -5025,7 +5025,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // the SwitchDescs might be removed. InvalidateUniqueSwitchSuccMap(); - noway_assert((block == fgFirstBB) || (bPrev && (bPrev->GetBBNext() == block))); + noway_assert((block == fgFirstBB) || (bPrev && bPrev->NextIs(block))); noway_assert(!(block->bbFlags & BBF_DONT_REMOVE)); // Should never remove a genReturnBB, as we might have special hookups there. @@ -5059,7 +5059,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) NO_WAY("No retless call finally blocks; need unwind target instead"); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } - else if (bPrev->KindIs(BBJ_ALWAYS) && bPrev->bbJumpDest == block->GetBBNext() && + else if (bPrev->KindIs(BBJ_ALWAYS) && block->NextIs(bPrev->bbJumpDest) && !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && !block->IsLastHotBlock(this)) { @@ -5269,7 +5269,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } /* Check if both side of the BBJ_COND now jump to the same block */ - if (predBlock->GetBBNext() == succBlock) + if (predBlock->NextIs(succBlock)) { // Make sure we are replacing "block" with "succBlock" in predBlock->bbJumpDest. noway_assert(predBlock->bbJumpDest == block); @@ -5323,7 +5323,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // the next block. This is the safest fix. We should remove all this BBJ_CALLFINALLY/BBJ_ALWAYS // pairing. - if ((bPrev->bbJumpDest == bPrev->GetBBNext()) && + if (bPrev->NextIs(bPrev->bbJumpDest) && !fgInDifferentRegions(bPrev, bPrev->bbJumpDest)) // We don't remove a branch from Hot -> Cold { if ((bPrev == fgFirstBB) || !bPrev->isBBCallAlwaysPairTail()) @@ -5336,7 +5336,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) case BBJ_COND: /* Check for branch to next block */ - if (bPrev->bbJumpDest == bPrev->GetBBNext()) + if (bPrev->NextIs(bPrev->bbJumpDest)) { fgRemoveConditionalJump(bPrev); } @@ -5372,7 +5372,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) { /* If bSrc falls through to a block that is not bDst, we will insert a jump to bDst */ - if (bSrc->bbFallsThrough() && (bSrc->GetBBNext() != bDst)) + if (bSrc->bbFallsThrough() && !bSrc->NextIs(bDst)) { switch (bSrc->GetBBJumpKind()) { @@ -5456,7 +5456,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) // then change it to a BBJ_NONE block // if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && - (bSrc->bbJumpDest == bSrc->GetBBNext())) + bSrc->NextIs(bSrc->bbJumpDest)) { bSrc->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); JITDUMP("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB @@ -5748,7 +5748,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r noway_assert(inTheRange == false); inTheRange = true; } - else if (block == bLast->GetBBNext()) + else if (bLast->NextIs(block)) { noway_assert(inTheRange == true); inTheRange = false; @@ -5874,7 +5874,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r fgSetTryEnd(HBtab, bPrev); break; } - else if (block == HBtab->ebdTryLast->GetBBNext()) + else if (HBtab->ebdTryLast->NextIs(block)) { // bPrev does not come after the TryBeg, thus we are larger, and // it is moving with us. @@ -5893,7 +5893,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r fgSetHndEnd(HBtab, bPrev); break; } - else if (block == HBtab->ebdHndLast->GetBBNext()) + else if (HBtab->ebdHndLast->NextIs(block)) { // bPrev does not come after the HndBeg break; @@ -5954,7 +5954,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r fgSetTryEnd(HBtab, bPrev); break; } - else if (block == HBtab->ebdTryLast->GetBBNext()) + else if (HBtab->ebdTryLast->NextIs(block)) { // bPrev does not come after the TryBeg break; @@ -5972,7 +5972,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r fgSetHndEnd(HBtab, bPrev); break; } - else if (block == HBtab->ebdHndLast->GetBBNext()) + else if (HBtab->ebdHndLast->NextIs(block)) { // bPrev does not come after the HndBeg break; @@ -6172,17 +6172,17 @@ BasicBlock* Compiler::fgNewBBFromTreeAfter( */ void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk) { - if (insertBeforeBlk->GetBBPrev()) - { - fgInsertBBafter(insertBeforeBlk->GetBBPrev(), newBlk); - } - else + if (insertBeforeBlk->IsFirst()) { newBlk->SetBBNext(fgFirstBB); fgFirstBB = newBlk; newBlk->SetBBPrev(nullptr); } + else + { + fgInsertBBafter(insertBeforeBlk->GetBBPrev(), newBlk); + } #if defined(FEATURE_EH_FUNCLETS) @@ -6449,7 +6449,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, // and be in the correct EH region. This is must be guaranteed by the caller (as it is by // fgNewBBinRegion(), which passes the search range as an exact EH region block range). // Because of this assumption, we only check the EH information for blocks before the last block. - if (blk->GetBBNext() != endBlk) + if (!blk->NextIs(endBlk)) { // We are in the middle of the search range. We can't insert the new block in // an inner try or handler region. We can, however, set the insertion @@ -6890,7 +6890,7 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, // Is afterBlk at the end of a try region? if (HBtab->ebdTryLast == afterBlk) { - noway_assert(afterBlkNext == newBlk->GetBBNext()); + noway_assert(newBlk->NextIs(afterBlkNext)); bool extendTryRegion = false; if (newBlk->hasTryIndex()) @@ -6929,7 +6929,7 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, // Is afterBlk at the end of a handler region? if (HBtab->ebdHndLast == afterBlk) { - noway_assert(afterBlkNext == newBlk->GetBBNext()); + noway_assert(newBlk->NextIs(afterBlkNext)); // Does newBlk extend this handler region? bool extendHndRegion = false; diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index 857b17b6049b70..7f1067f7a10aff 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -176,7 +176,7 @@ void Compiler::fgDebugCheckUpdate() if (doAssertOnJumpToNextBlock) { - if (block->bbJumpDest == block->GetBBNext()) + if (block->NextIs(block->bbJumpDest)) { noway_assert(!"Unnecessary jump to the next block!"); } @@ -2201,7 +2201,7 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 * /* brace matching editor workaround to compensate for the following line: { */ printf("} "); } - if (HBtab->HasFilter() && block->GetBBNext() == HBtab->ebdHndBeg) + if (HBtab->HasFilter() && block->NextIs(HBtab->ebdHndBeg)) { cnt += 2; /* brace matching editor workaround to compensate for the following line: { */ @@ -2341,9 +2341,9 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, for (BasicBlock* block : *fgBBOrder) { // First, do some checking on the bbPrev links - if (block->GetBBPrev()) + if (!block->IsFirst()) { - if (block->GetBBPrev()->GetBBNext() != block) + if (!block->GetBBPrev()->NextIs(block)) { printf("bad prev link\n"); } @@ -2663,11 +2663,11 @@ bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block) switch (blockPred->GetBBJumpKind()) { case BBJ_COND: - assert(blockPred->GetBBNext() == block || blockPred->bbJumpDest == block); + assert(blockPred->NextIs(block) || blockPred->bbJumpDest == block); return true; case BBJ_NONE: - assert(blockPred->GetBBNext() == block); + assert(blockPred->NextIs(block)); return true; case BBJ_CALLFINALLY: @@ -2739,7 +2739,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) continue; } - if (block == bcall->GetBBNext()) + if (bcall->NextIs(block)) { return true; } @@ -2761,7 +2761,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) continue; } - if (block != bcall->GetBBNext()) + if (!bcall->NextIs(block)) { continue; } @@ -3708,22 +3708,22 @@ void Compiler::fgDebugCheckBlockLinks() for (BasicBlock* const block : Blocks()) { - if (block->GetBBNext()) + if (block->IsLast()) { - assert(block->GetBBNext()->GetBBPrev() == block); + assert(block == fgLastBB); } else { - assert(block == fgLastBB); + assert(block->GetBBNext()->PrevIs(block)); } - if (block->GetBBPrev()) + if (block->IsFirst()) { - assert(block->GetBBPrev()->GetBBNext() == block); + assert(block == fgFirstBB); } else { - assert(block == fgFirstBB); + assert(block->GetBBPrev()->NextIs(block)); } // If this is a switch, check that the tables are consistent. @@ -4799,7 +4799,7 @@ void Compiler::fgDebugCheckLoopTable() else { assert(h->KindIs(BBJ_NONE)); - assert(h->GetBBNext() == e); + assert(h->NextIs(e)); assert(loop.lpTop == e); assert(loop.lpIsTopEntry()); } diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index a71770ab613184..9902d65f08f6d6 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -417,7 +417,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() BasicBlock* const callFinally = firstTryBlock; // Try must be a callalways pair of blocks. - if (firstTryBlock->GetBBNext() != lastTryBlock) + if (!firstTryBlock->NextIs(lastTryBlock)) { JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n", XTnum, firstTryBlock->GetBBNext()->bbNum); @@ -1008,7 +1008,7 @@ PhaseStatus Compiler::fgCloneFinally() fgVerifyHandlerTab(); #endif // DEBUG - assert(nextBlock == lastBlock->GetBBNext()); + assert(lastBlock->NextIs(nextBlock)); // Update where the callfinally range begins, since we might // have altered this with callfinally rearrangement, and/or @@ -1064,9 +1064,9 @@ PhaseStatus Compiler::fgCloneFinally() // If the clone ends up just after the finally, adjust // the stopping point for finally traversal. - if (newBlock->GetBBNext() == nextBlock) + if (newBlock->NextIs(nextBlock)) { - assert(newBlock->GetBBPrev() == lastBlock); + assert(newBlock->PrevIs(lastBlock)); nextBlock = newBlock; } } @@ -2215,7 +2215,7 @@ PhaseStatus Compiler::fgTailMergeThrows() case BBJ_COND: { // Flow to non canonical block could be via fall through or jump or both. - if (predBlock->GetBBNext() == nonCanonicalBlock) + if (predBlock->NextIs(nonCanonicalBlock)) { fgTailMergeThrowsFallThroughHelper(predBlock, nonCanonicalBlock, canonicalBlock, predEdge); } @@ -2291,7 +2291,7 @@ void Compiler::fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, BasicBlock* canonicalBlock, FlowEdge* predEdge) { - assert(predBlock->GetBBNext() == nonCanonicalBlock); + assert(predBlock->NextIs(nonCanonicalBlock)); BasicBlock* const newBlock = fgNewBBafter(BBJ_ALWAYS, predBlock, true); diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index 6548769f0e7401..fda5cf1e767206 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -1526,18 +1526,18 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) if (block->KindIs(BBJ_RETURN)) { noway_assert((block->bbFlags & BBF_HAS_JMP) == 0); - if (block->GetBBNext()) + if (block->IsLast()) + { + JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + } + else { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = bottomBlock; } - else - { - JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum); - block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); - } fgAddRefPred(bottomBlock, block); } diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 81ff5938be9434..dcc420aaa4e241 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -1916,7 +1916,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) return false; } - noway_assert(block->GetBBNext() == bNext); + noway_assert(block->NextIs(bNext)); if (!block->KindIs(BBJ_NONE)) { @@ -2029,7 +2029,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) noway_assert((block->bbFlags & BBF_REMOVED) == 0); noway_assert(block->KindIs(BBJ_NONE)); - noway_assert(bNext == block->GetBBNext()); + noway_assert(block->NextIs(bNext)); noway_assert(bNext != nullptr); noway_assert((bNext->bbFlags & BBF_REMOVED) == 0); noway_assert(bNext->countOfInEdges() == 1 || block->isEmpty()); @@ -2627,7 +2627,7 @@ void Compiler::fgUnreachableBlock(BasicBlock* block) // void Compiler::fgRemoveConditionalJump(BasicBlock* block) { - noway_assert(block->KindIs(BBJ_COND) && block->bbJumpDest == block->GetBBNext()); + noway_assert(block->KindIs(BBJ_COND) && block->NextIs(block->bbJumpDest)); assert(compRationalIRForm == block->IsLIR()); FlowEdge* flow = fgGetPredForBlock(block->GetBBNext(), block); @@ -2914,7 +2914,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) // A GOTO cannot be to the next block since that // should have been fixed by the optimization above // An exception is made for a jump from Hot to Cold - noway_assert(block->bbJumpDest != block->GetBBNext() || block->isBBCallAlwaysPairTail() || + noway_assert(!block->NextIs(block->bbJumpDest) || block->isBBCallAlwaysPairTail() || fgInDifferentRegions(block, block->GetBBNext())); /* Cannot remove the first BB */ @@ -3323,7 +3323,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) return true; } - else if (block->bbJumpSwt->bbsCount == 2 && block->bbJumpSwt->bbsDstTab[1] == block->GetBBNext()) + else if ((block->bbJumpSwt->bbsCount == 2) && block->NextIs(block->bbJumpSwt->bbsDstTab[1])) { /* Use a BBJ_COND(switchVal==0) for a switch with only one significant clause besides the default clause, if the @@ -3826,8 +3826,8 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi { assert(block->KindIs(BBJ_COND, BBJ_ALWAYS)); assert(block->bbJumpDest == bNext); - assert(block->GetBBNext() == bNext); - assert(block->GetBBPrev() == bPrev); + assert(block->NextIs(bNext)); + assert(block->PrevIs(bPrev)); if (block->KindIs(BBJ_ALWAYS)) { @@ -4026,7 +4026,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) return false; } - if (bDest->bbJumpDest != bJump->GetBBNext()) + if (!bJump->NextIs(bDest->bbJumpDest)) { return false; } @@ -5211,7 +5211,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) /* (bPrev is known to be a normal block at this point) */ if (!isRare) { - if ((bDest == block->GetBBNext()) && block->KindIs(BBJ_RETURN) && bPrev->KindIs(BBJ_ALWAYS)) + if (block->NextIs(bDest) && block->KindIs(BBJ_RETURN) && bPrev->KindIs(BBJ_ALWAYS)) { // This is a common case with expressions like "return Expr1 && Expr2" -- move the return // to establish fall-through. @@ -5309,7 +5309,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) #if defined(FEATURE_EH_FUNCLETS) // Check if we've reached the funclets region, at the end of the function - if (fgFirstFuncletBB == bEnd->GetBBNext()) + if (bEnd->NextIs(fgFirstFuncletBB)) { break; } @@ -5396,7 +5396,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) bPrev2 = block; while (bPrev2 != nullptr) { - if (bPrev2->GetBBNext() == bDest) + if (bPrev2->NextIs(bDest)) { break; } @@ -5579,7 +5579,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) noway_assert(!bEnd->KindIs(BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); // bStartPrev must be set to the block that precedes bStart - noway_assert(bStartPrev->GetBBNext() == bStart); + noway_assert(bStartPrev->NextIs(bStart)); // Since we will be unlinking [bStart..bEnd], // we need to compute and remember if bStart is in each of @@ -5719,7 +5719,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) fgIsForwardBranch(bEnd, bPrev)) { // Set nearBlk to be the block in [startBlk..endBlk] - // such that nearBlk->GetBBNext() == bEnd->JumpDest + // such that nearBlk->NextIs(bEnd->JumpDest) // if no such block exists then set nearBlk to NULL nearBlk = startBlk; jumpBlk = bEnd; @@ -5731,7 +5731,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) if (nearBlk != bPrev) { // Check if nearBlk satisfies our requirement - if (nearBlk->GetBBNext() == bEnd->bbJumpDest) + if (nearBlk->NextIs(bEnd->bbJumpDest)) { break; } @@ -5784,7 +5784,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) /* relink [bStart .. bEnd] into the flow graph */ bPrev->SetBBNext(bStart); - if (bEnd->GetBBNext()) + if (!bEnd->IsLast()) { bEnd->GetBBNext()->SetBBPrev(bEnd); } @@ -5880,7 +5880,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) else { noway_assert(insertAfterBlk == bPrev); - noway_assert(insertAfterBlk->GetBBNext() == block); + noway_assert(insertAfterBlk->NextIs(block)); /* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */ bPrev->bbJumpDest = block; @@ -6176,7 +6176,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) { // case (a) // - const bool isJumpAroundEmpty = (bNext->GetBBNext() == bDest); + const bool isJumpAroundEmpty = bNext->NextIs(bDest); // case (b) // @@ -6241,7 +6241,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) { // We don't expect bDest to already be right after bNext. // - assert(bDest != bNext->GetBBNext()); + assert(!bNext->NextIs(bDest)); JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum, bNext->bbNum); @@ -6483,7 +6483,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) if (block->isEmpty()) { - assert(bPrev == block->GetBBPrev()); + assert(block->PrevIs(bPrev)); if (fgOptimizeEmptyBlock(block)) { change = true; @@ -7067,7 +7067,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) // ternaries in C#). // The logic below could be generalized to BBJ_SWITCH, but this currently // has almost no CQ benefit but does have a TP impact. - if (!block->KindIs(BBJ_COND) || (block->GetBBNext() == block->bbJumpDest)) + if (!block->KindIs(BBJ_COND) || block->NextIs(block->bbJumpDest)) { return false; } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 3665e499daad9c..a8cb23e3caf434 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -530,7 +530,7 @@ void BlockCountInstrumentor::RelocateProbes() // Handle case where we had a fall through critical edge // - if (pred->GetBBNext() == intermediary) + if (pred->NextIs(intermediary)) { m_comp->fgRemoveRefPred(pred, block); m_comp->fgAddRefPred(intermediary, block); @@ -4761,7 +4761,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() weight_t diff; FlowEdge* otherEdge; BasicBlock* otherDst; - if (bSrc->GetBBNext() == bDst) + if (bSrc->NextIs(bDst)) { otherDst = bSrc->bbJumpDest; } diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 00166b1f72984a..5fc49925f07868 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -392,7 +392,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) break; case BBJ_COND: // replace predecessor in the fall through block. - noway_assert(bottom->GetBBNext()); + noway_assert(!bottom->IsLast()); fgReplacePred(bottom->GetBBNext(), top, bottom); // fall through for the jump target @@ -1577,7 +1577,7 @@ void Compiler::fgAddSyncMethodEnterExit() assert(!tryLastBB->bbFallsThrough()); BasicBlock* faultBB = fgNewBBafter(BBJ_EHFAULTRET, tryLastBB, false); - assert(tryLastBB->GetBBNext() == faultBB); + assert(tryLastBB->NextIs(faultBB)); assert(faultBB->IsLast()); assert(faultBB == fgLastBB); @@ -2594,7 +2594,7 @@ PhaseStatus Compiler::fgAddInternal() // Visit the BBJ_RETURN blocks and merge as necessary. - for (BasicBlock* block = fgFirstBB; block != lastBlockBeforeGenReturns->GetBBNext(); block = block->GetBBNext()) + for (BasicBlock* block = fgFirstBB; !lastBlockBeforeGenReturns->NextIs(block); block = block->GetBBNext()) { if (block->KindIs(BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index b4f3d385455b4d..f4df7141ff27e1 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -7298,7 +7298,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) BADCODE("invalid type for brtrue/brfalse"); } - if (opts.OptimizationEnabled() && (block->bbJumpDest == block->GetBBNext())) + if (opts.OptimizationEnabled() && block->NextIs(block->bbJumpDest)) { // We may have already modified `block`'s jump kind, if this is a re-importation. // @@ -7544,7 +7544,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) assertImp((genActualType(op1) == genActualType(op2)) || (varTypeIsI(op1) && varTypeIsI(op2)) || (varTypeIsFloating(op1) && varTypeIsFloating(op2))); - if (opts.OptimizationEnabled() && (block->bbJumpDest == block->GetBBNext())) + if (opts.OptimizationEnabled() && block->NextIs(block->bbJumpDest)) { // We may have already modified `block`'s jump kind, if this is a re-importation. // @@ -7630,7 +7630,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { - if (curJump != block->GetBBNext()) + if (!block->NextIs(curJump)) { // transform the basic block into a BBJ_ALWAYS block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); @@ -11135,7 +11135,7 @@ void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart) // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdFilter! - const bool isSingleBlockFilter = (filterBB->GetBBNext() == hndBegBB); + const bool isSingleBlockFilter = (filterBB->NextIs(hndBegBB)); filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter); impImportBlockPending(filterBB); diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 984a3ad08aafbf..a69c6ac7142b42 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -2281,7 +2281,7 @@ bool Compiler::fgNormalizeEHCase2() fgReplaceJumpTarget(predBlock, newTryStart, insertBeforeBlk); } - if ((predBlock->GetBBNext() == newTryStart) && predBlock->bbFallsThrough()) + if (predBlock->NextIs(newTryStart) && predBlock->bbFallsThrough()) { fgRemoveRefPred(insertBeforeBlk, predBlock); fgAddRefPred(newTryStart, predBlock); @@ -4112,7 +4112,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) if (xtab->HasFinallyHandler()) { assert((xtab->ebdHndBeg == block) || // The normal case - ((xtab->ebdHndBeg->GetBBNext() == block) && + (xtab->ebdHndBeg->NextIs(block) && (xtab->ebdHndBeg->bbFlags & BBF_INTERNAL))); // After we've already inserted a header block, and we're // trying to decide how to split up the predecessor edges. if (predBlock->KindIs(BBJ_CALLFINALLY)) diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 073323ecb737e7..55c563540bb950 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -1309,7 +1309,7 @@ class LiveVarAnalysis { // sometimes block numbers are not monotonically increasing which // would cause us not to identify backedges - if (block->GetBBNext() && block->GetBBNext()->bbNum <= block->bbNum) + if (!block->IsLast() && block->GetBBNext()->bbNum <= block->bbNum) { m_hasPossibleBackEdge = true; } diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index a64b7e3609e80c..a44c5a07ed8e53 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -2245,14 +2245,14 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) assert(context->HasBlockConditions(loopInd)); assert(h->KindIs(BBJ_NONE)); - assert(h->GetBBNext() == h2); + assert(h->NextIs(h2)); // If any condition is false, go to slowHead (which branches or falls through to e2). BasicBlock* e2 = nullptr; bool foundIt = blockMap->Lookup(loop.lpEntry, &e2); assert(foundIt && e2 != nullptr); - if (slowHead->GetBBNext() != e2) + if (!slowHead->NextIs(e2)) { // We can't just fall through to the slow path entry, so make it an unconditional branch. assert(slowHead->KindIs(BBJ_NONE)); // This is how we created it above. diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 82cddeab266984..230b26cb8222cc 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -799,7 +799,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) { JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum); noway_assert(comp->opts.OptimizationDisabled()); - if (originalSwitchBB->GetBBNext() == jumpTab[0]) + if (originalSwitchBB->NextIs(jumpTab[0])) { originalSwitchBB->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = nullptr; @@ -892,7 +892,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. assert(originalSwitchBB->KindIs(BBJ_NONE)); - assert(originalSwitchBB->GetBBNext() == afterDefaultCondBlock); + assert(originalSwitchBB->NextIs(afterDefaultCondBlock)); assert(afterDefaultCondBlock->KindIs(BBJ_SWITCH)); assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. @@ -955,7 +955,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) assert(jumpTab[i] == uniqueSucc); (void)comp->fgRemoveRefPred(uniqueSucc, afterDefaultCondBlock); } - if (afterDefaultCondBlock->GetBBNext() == uniqueSucc) + if (afterDefaultCondBlock->NextIs(uniqueSucc)) { afterDefaultCondBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); afterDefaultCondBlock->bbJumpDest = nullptr; @@ -1221,7 +1221,7 @@ bool Lowering::TryLowerSwitchToBitTest( // impacts register allocation. // - if ((bbSwitch->GetBBNext() != bbCase0) && (bbSwitch->GetBBNext() != bbCase1)) + if (!bbSwitch->NextIs(bbCase0) && !bbSwitch->NextIs(bbCase1)) { return false; } @@ -1252,7 +1252,7 @@ bool Lowering::TryLowerSwitchToBitTest( comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); - if (bbSwitch->GetBBNext() == bbCase0) + if (bbSwitch->NextIs(bbCase0)) { // GenCondition::C generates JC so we jump to bbCase1 when the bit is set bbSwitchCondition = GenCondition::C; @@ -1263,7 +1263,7 @@ bool Lowering::TryLowerSwitchToBitTest( } else { - assert(bbSwitch->GetBBNext() == bbCase1); + assert(bbSwitch->NextIs(bbCase1)); // GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set bbSwitchCondition = GenCondition::NC; @@ -1288,7 +1288,7 @@ bool Lowering::TryLowerSwitchToBitTest( // // Fallback to AND(RSZ(bitTable, switchValue), 1) // - GenTree* tstCns = comp->gtNewIconNode(bbSwitch->GetBBNext() != bbCase0 ? 0 : 1, bitTableType); + GenTree* tstCns = comp->gtNewIconNode(bbSwitch->NextIs(bbCase0) ? 1 : 0, bitTableType); GenTree* shift = comp->gtNewOperNode(GT_RSZ, bitTableType, bitTableIcon, switchValue); GenTree* one = comp->gtNewIconNode(1, bitTableType); GenTree* andOp = comp->gtNewOperNode(GT_AND, bitTableType, shift, one); diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index f51ffeec2e7819..a308fbf9d3c1ee 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -2548,7 +2548,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, { // Special handling to improve matching on backedges. BasicBlock* otherBlock = - (block == predBlock->GetBBNext()) ? predBlock->bbJumpDest : predBlock->GetBBNext(); + predBlock->NextIs(block) ? predBlock->bbJumpDest : predBlock->GetBBNext(); noway_assert(otherBlock != nullptr); if (isBlockVisited(otherBlock) && !blockInfo[otherBlock->bbNum].hasEHBoundaryIn) { diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 6833ee79a09172..d4c2f7842e52e8 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -13421,7 +13421,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { - if (curJump != block->GetBBNext()) + if (!block->NextIs(curJump)) { // transform the basic block into a BBJ_ALWAYS block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 76cf3ab3c89a3d..796fcd58f18750 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -106,7 +106,7 @@ class OptBoolsDsc // B3: GT_RETURN (BBJ_RETURN) // B4: GT_RETURN (BBJ_RETURN) // -// Case 2: if B1.bbJumpDest == B2->GetBBNext(), it transforms +// Case 2: if B2->NextIs(B1.bbJumpDest), it transforms // B1 : brtrue(t1, B3) // B2 : brtrue(t2, Bx) // B3 : @@ -136,7 +136,7 @@ bool OptBoolsDsc::optOptimizeBoolsCondBlock() m_sameTarget = true; } - else if (m_b1->bbJumpDest == m_b2->GetBBNext()) + else if (m_b2->NextIs(m_b1->bbJumpDest)) { // Given the following sequence of blocks : // B1: brtrue(t1, B3) @@ -480,13 +480,13 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock() m_t3 = nullptr; bool foundEndOfOrConditions = false; - if ((m_b1->GetBBNext() == m_b2) && (m_b1->bbJumpDest == m_b2->GetBBNext())) + if (m_b1->NextIs(m_b2) && m_b2->NextIs(m_b1->bbJumpDest)) { // Found the end of two (or more) conditions being ORed together. // The final condition has been inverted. foundEndOfOrConditions = true; } - else if ((m_b1->GetBBNext() == m_b2) && (m_b1->bbJumpDest == m_b2->bbJumpDest)) + else if (m_b1->NextIs(m_b2) && (m_b1->bbJumpDest == m_b2->bbJumpDest)) { // Found two conditions connected together. } @@ -882,7 +882,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() m_b1->bbJumpSwt = m_b2->bbJumpSwt; #endif assert(m_b2->KindIs(BBJ_RETURN)); - assert(m_b1->GetBBNext() == m_b2); + assert(m_b1->NextIs(m_b2)); assert(m_b3 != nullptr); } else @@ -890,7 +890,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() assert(m_b1->KindIs(BBJ_COND)); assert(m_b2->KindIs(BBJ_COND)); assert(m_b1->bbJumpDest == m_b2->bbJumpDest); - assert(m_b1->GetBBNext() == m_b2); + assert(m_b1->NextIs(m_b2)); assert(!m_b2->IsLast()); } @@ -1494,7 +1494,7 @@ PhaseStatus Compiler::optOptimizeBools() if (b2->KindIs(BBJ_COND)) { - if ((b1->bbJumpDest != b2->bbJumpDest) && (b1->bbJumpDest != b2->GetBBNext())) + if ((b1->bbJumpDest != b2->bbJumpDest) && !b2->NextIs(b1->bbJumpDest)) { continue; } diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 1bf9b8f393afbf..d2095c346cc2d6 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -741,7 +741,7 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT bool initBlockOk = (predBlock == initBlock); if (!initBlockOk) { - if (predBlock->KindIs(BBJ_NONE) && (predBlock->GetBBNext() == optLoopTable[loopInd].lpEntry) && + if (predBlock->KindIs(BBJ_NONE) && predBlock->NextIs(optLoopTable[loopInd].lpEntry) && (predBlock->countOfInEdges() == 1) && (predBlock->firstStmt() == nullptr) && !predBlock->IsFirst() && predBlock->GetBBPrev()->bbFallsThrough()) { @@ -1150,7 +1150,7 @@ bool Compiler::optExtractInitTestIncr( // If we are rebuilding the loop table, we would already have the pre-header block introduced // the first time, which might be empty if no hoisting has yet occurred. In this case, look a // little harder for the possible loop initialization statement. - if (initBlock->KindIs(BBJ_NONE) && (initBlock->GetBBNext() == top) && (initBlock->countOfInEdges() == 1) && + if (initBlock->KindIs(BBJ_NONE) && initBlock->NextIs(top) && (initBlock->countOfInEdges() == 1) && !initBlock->IsFirst() && initBlock->GetBBPrev()->bbFallsThrough()) { initBlock = initBlock->GetBBPrev(); @@ -1394,7 +1394,7 @@ void Compiler::optCheckPreds() } FALLTHROUGH; case BBJ_NONE: - noway_assert(bb->GetBBNext() == block); + noway_assert(bb->NextIs(block)); break; case BBJ_EHFILTERRET: case BBJ_ALWAYS: @@ -1961,7 +1961,7 @@ class LoopSearch // This must be a block we inserted to connect fall-through after moving blocks. // To determine if it's in the loop or not, use the number of its unique predecessor // block. - assert(block->bbPreds->getSourceBlock() == block->GetBBPrev()); + assert(block->PrevIs(block->bbPreds->getSourceBlock())); assert(block->bbPreds->getNextPredEdge() == nullptr); return block->GetBBPrev()->bbNum; } @@ -3032,7 +3032,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // BasicBlock* const t = optLoopTable[loopInd].lpTop; assert(siblingB->KindIs(BBJ_COND)); - assert(siblingB->GetBBNext() == t); + assert(siblingB->NextIs(t)); JITDUMP(FMT_LP " head " FMT_BB " is also " FMT_LP " bottom\n", loopInd, h->bbNum, sibling); @@ -3206,7 +3206,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati // Because of this, introducing a block before t automatically gives us // the right flow out of h. // - assert(h->GetBBNext() == t); + assert(h->NextIs(t)); assert(h->bbFallsThrough()); assert(h->KindIs(BBJ_NONE, BBJ_COND)); if (h->KindIs(BBJ_COND)) @@ -3330,8 +3330,8 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati } } - assert(h->GetBBNext() == newT); - assert(newT->GetBBNext() == t); + assert(h->NextIs(newT)); + assert(newT->NextIs(t)); // With the Option::Current we are changing which block is loop top. // Make suitable updates. @@ -3361,7 +3361,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati childLoop = optLoopTable[childLoop].lpSibling) { if ((optLoopTable[childLoop].lpEntry == origE) && (optLoopTable[childLoop].lpHead == h) && - newT->KindIs(BBJ_NONE) && (newT->GetBBNext() == origE)) + newT->KindIs(BBJ_NONE) && newT->NextIs(origE)) { optUpdateLoopHead(childLoop, h, newT); @@ -4359,7 +4359,7 @@ PhaseStatus Compiler::optUnrollLoops() // Note: we can't use the loop.LoopBlocks() iterator, as it captures loop.lpBottom->bbNext at the // beginning of iteration, and we insert blocks before that. So we need to evaluate lpBottom->bbNext // every iteration. - for (BasicBlock* block = loop.lpTop; block != loop.lpBottom->GetBBNext(); block = block->GetBBNext()) + for (BasicBlock* block = loop.lpTop; !loop.lpBottom->NextIs(block); block = block->GetBBNext()) { BasicBlock* newBlock = insertAfter = fgNewBBafter(block->GetBBJumpKind(), insertAfter, /*extendRegion*/ true); @@ -4478,7 +4478,7 @@ PhaseStatus Compiler::optUnrollLoops() // for (BasicBlock* succ : block->Succs(this)) { - if ((block == bottom) && (succ == bottom->GetBBNext())) + if ((block == bottom) && bottom->NextIs(succ)) { continue; } @@ -4860,7 +4860,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // BasicBlock* const bTop = bTest->bbJumpDest; - if (bTop != block->GetBBNext()) + if (!block->NextIs(bTop)) { return false; } @@ -7987,7 +7987,7 @@ bool Compiler::optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* loopVnInv // void Compiler::fgSetEHRegionForNewLoopHead(BasicBlock* newHead, BasicBlock* top) { - assert(newHead->GetBBNext() == top); + assert(newHead->NextIs(top)); assert(!fgIsFirstBlockOfFilterOrHandler(top)); if ((top->bbFlags & BBF_TRY_BEG) != 0) @@ -8200,7 +8200,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) { // Allow for either the fall-through or branch to target 'entry'. BasicBlock* skipLoopBlock; - if (head->GetBBNext() == entry) + if (head->NextIs(entry)) { skipLoopBlock = head->bbJumpDest; } @@ -8302,7 +8302,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) case BBJ_NONE: // This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop, // meaning it must be fall-through to 'entry', and we must have a top-entry loop. - noway_assert((entry == top) && (predBlock == head) && (predBlock->GetBBNext() == preHead)); + noway_assert((entry == top) && (predBlock == head) && predBlock->NextIs(preHead)); fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; @@ -8311,11 +8311,11 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) if (predBlock->bbJumpDest == entry) { predBlock->bbJumpDest = preHead; - noway_assert(predBlock->GetBBNext() != preHead); + noway_assert(!predBlock->NextIs(preHead)); } else { - noway_assert((entry == top) && (predBlock == head) && (predBlock->GetBBNext() == preHead)); + noway_assert((entry == top) && (predBlock == head) && predBlock->NextIs(preHead)); } fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); diff --git a/src/coreclr/jit/promotionliveness.cpp b/src/coreclr/jit/promotionliveness.cpp index c8e004c88f4b55..727f153905e53a 100644 --- a/src/coreclr/jit/promotionliveness.cpp +++ b/src/coreclr/jit/promotionliveness.cpp @@ -301,7 +301,7 @@ void PromotionLiveness::InterBlockLiveness() for (BasicBlock* block = m_compiler->fgLastBB; block != nullptr; block = block->GetBBPrev()) { - m_hasPossibleBackEdge |= block->GetBBNext() && (block->GetBBNext()->bbNum <= block->bbNum); + m_hasPossibleBackEdge |= !block->IsLast() && (block->GetBBNext()->bbNum <= block->bbNum); changed |= PerBlockLiveness(block); } diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp index 38f632dffa1682..29e8992fdbe59e 100644 --- a/src/coreclr/jit/rangecheck.cpp +++ b/src/coreclr/jit/rangecheck.cpp @@ -935,7 +935,7 @@ void RangeCheck::MergeAssertion(BasicBlock* block, GenTree* op, Range* pRange DE { GenTreePhiArg* arg = (GenTreePhiArg*)op; BasicBlock* pred = arg->gtPredBB; - if (pred->bbFallsThrough() && pred->GetBBNext() == block) + if (pred->bbFallsThrough() && pred->NextIs(block)) { assertions = pred->bbAssertionOut; JITDUMP("Merge assertions from pred " FMT_BB " edge: ", pred->bbNum); diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index 579d2d5d66e622..07255f54b6a134 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -1143,7 +1143,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl // Note if the true or false pred is the fall through pred. // - if (predBlock->GetBBNext() == block) + if (predBlock->NextIs(block)) { JITDUMP(FMT_BB " is the fall-through pred\n", predBlock->bbNum); assert(jti.m_fallThroughPred == nullptr); @@ -1403,7 +1403,7 @@ bool Compiler::optJumpThreadPhi(BasicBlock* block, GenTree* tree, ValueNum treeN // Note if the true or false pred is the fall through pred. // - if (predBlock->GetBBNext() == block) + if (predBlock->NextIs(block)) { JITDUMP(FMT_BB " is the fall-through pred\n", predBlock->bbNum); assert(jti.m_fallThroughPred == nullptr); diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp index 956db8444ff8e9..072552cccfdc76 100644 --- a/src/coreclr/jit/switchrecognition.cpp +++ b/src/coreclr/jit/switchrecognition.cpp @@ -98,7 +98,7 @@ bool IsConstantTestCondBlock(const BasicBlock* block, *blockIfTrue = *isReversed ? block->GetBBNext() : block->bbJumpDest; *blockIfFalse = *isReversed ? block->bbJumpDest : block->GetBBNext(); - if ((block->GetBBNext() == block->bbJumpDest) || (block->bbJumpDest == block)) + if (block->NextIs(block->bbJumpDest) || (block->bbJumpDest == block)) { // Ignoring weird cases like a condition jumping to itself return false; @@ -340,7 +340,7 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* // Unlink and remove the whole chain of conditional blocks BasicBlock* blockToRemove = firstBlock->GetBBNext(); fgRemoveRefPred(blockToRemove, firstBlock); - while (blockToRemove != lastBlock->GetBBNext()) + while (!lastBlock->NextIs(blockToRemove)) { BasicBlock* nextBlock = blockToRemove->GetBBNext(); fgRemoveBlock(blockToRemove, true); From b0fc61046369184dcac41346315f885bdd18f13c Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Thu, 5 Oct 2023 13:34:01 -0400 Subject: [PATCH 10/14] Rename GetBBNext() et al --- src/coreclr/jit/block.cpp | 10 +- src/coreclr/jit/block.h | 20 +-- src/coreclr/jit/clrjit.natvis | 4 +- src/coreclr/jit/codegenarm.cpp | 10 +- src/coreclr/jit/codegenarm64.cpp | 2 +- src/coreclr/jit/codegencommon.cpp | 24 +-- src/coreclr/jit/codegenlinear.cpp | 26 ++-- src/coreclr/jit/codegenloongarch64.cpp | 2 +- src/coreclr/jit/codegenriscv64.cpp | 4 +- src/coreclr/jit/codegenxarch.cpp | 2 +- src/coreclr/jit/compiler.cpp | 6 +- src/coreclr/jit/compiler.hpp | 14 +- src/coreclr/jit/fgbasic.cpp | 112 +++++++------- src/coreclr/jit/fgdiagnostic.cpp | 38 ++--- src/coreclr/jit/fgehopt.cpp | 64 ++++---- src/coreclr/jit/fgflow.cpp | 14 +- src/coreclr/jit/fginline.cpp | 10 +- src/coreclr/jit/fgopt.cpp | 160 ++++++++++---------- src/coreclr/jit/fgprofile.cpp | 20 +-- src/coreclr/jit/fgprofilesynthesis.cpp | 6 +- src/coreclr/jit/flowgraph.cpp | 32 ++-- src/coreclr/jit/helperexpansion.cpp | 2 +- src/coreclr/jit/ifconversion.cpp | 10 +- src/coreclr/jit/importer.cpp | 26 ++-- src/coreclr/jit/indirectcalltransformer.cpp | 4 +- src/coreclr/jit/jiteh.cpp | 50 +++--- src/coreclr/jit/liveness.cpp | 14 +- src/coreclr/jit/loopcloning.cpp | 16 +- src/coreclr/jit/lower.cpp | 4 +- src/coreclr/jit/lsra.cpp | 10 +- src/coreclr/jit/morph.cpp | 20 +-- src/coreclr/jit/optimizebools.cpp | 8 +- src/coreclr/jit/optimizer.cpp | 80 +++++----- src/coreclr/jit/patchpoint.cpp | 2 +- src/coreclr/jit/promotionliveness.cpp | 4 +- src/coreclr/jit/redundantbranchopts.cpp | 12 +- src/coreclr/jit/switchrecognition.cpp | 16 +- src/coreclr/jit/unwind.cpp | 2 +- 38 files changed, 430 insertions(+), 430 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 42aa90ffef4c94..5b6d4b828342fd 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -133,7 +133,7 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk) // these cannot cause transfer to the handler...) // TODO-Throughput: It would be nice if we could iterate just over the blocks in the try, via // something like: - // for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->GetBBNext(); bb = bb->GetBBNext()) + // for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->Next(); bb = bb->Next()) // (plus adding in any filter blocks outside the try whose exceptions are handled here). // That doesn't work, however: funclets have caused us to sometimes split the body of a try into // more than one sequence of contiguous blocks. We need to find a better way to do this. @@ -160,7 +160,7 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk) if (enclosingDsc->HasFilter()) { for (BasicBlock* filterBlk = enclosingDsc->ebdFilter; filterBlk != enclosingDsc->ebdHndBeg; - filterBlk = filterBlk->GetBBNext()) + filterBlk = filterBlk->Next()) { res = new (this, CMK_FlowEdge) FlowEdge(filterBlk, res); @@ -1525,9 +1525,9 @@ bool BasicBlock::isBBCallAlwaysPair() const #endif // Some asserts that the next block is a BBJ_ALWAYS of the proper form. assert(!this->IsLast()); - assert(this->GetBBNext()->KindIs(BBJ_ALWAYS)); - assert(this->GetBBNext()->bbFlags & BBF_KEEP_BBJ_ALWAYS); - assert(this->GetBBNext()->isEmpty()); + assert(this->Next()->KindIs(BBJ_ALWAYS)); + assert(this->Next()->bbFlags & BBF_KEEP_BBJ_ALWAYS); + assert(this->Next()->isEmpty()); return true; } diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 875327d349c2ba..098ae712e13b3d 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -531,12 +531,12 @@ struct BasicBlock : private LIR::Range bbJumpKind = kind; } - BasicBlock* GetBBPrev() const + BasicBlock* Prev() const { return bbPrev; } - void SetBBPrev(BasicBlock* prev) + void SetPrev(BasicBlock* prev) { bbPrev = prev; if (prev) @@ -545,12 +545,12 @@ struct BasicBlock : private LIR::Range } } - BasicBlock* GetBBNext() const + BasicBlock* Next() const { return bbNext; } - void SetBBNext(BasicBlock* next) + void SetNext(BasicBlock* next) { bbNext = next; if (next) @@ -1476,10 +1476,10 @@ class BasicBlockIterator { assert(m_block != nullptr); // Check that we haven't been spliced out of the list. - assert((m_block->IsLast()) || m_block->GetBBNext()->PrevIs(m_block)); - assert((m_block->IsFirst()) || m_block->GetBBPrev()->NextIs(m_block)); + assert((m_block->IsLast()) || m_block->Next()->PrevIs(m_block)); + assert((m_block->IsFirst()) || m_block->Prev()->NextIs(m_block)); - m_block = m_block->GetBBNext(); + m_block = m_block->Next(); return *this; } @@ -1542,7 +1542,7 @@ class BasicBlockRangeList BasicBlockIterator end() const { - return BasicBlockIterator(m_end->GetBBNext()); // walk until we see the block *following* the `m_end` block + return BasicBlockIterator(m_end->Next()); // walk until we see the block *following* the `m_end` block } }; @@ -1637,13 +1637,13 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) break; case BBJ_NONE: - m_succs[0] = block->GetBBNext(); + m_succs[0] = block->Next(); m_begin = &m_succs[0]; m_end = &m_succs[1]; break; case BBJ_COND: - m_succs[0] = block->GetBBNext(); + m_succs[0] = block->Next(); m_begin = &m_succs[0]; // If both fall-through and branch successors are identical, then only include diff --git a/src/coreclr/jit/clrjit.natvis b/src/coreclr/jit/clrjit.natvis index f75fd877e0e1ee..81e1503de22cf7 100644 --- a/src/coreclr/jit/clrjit.natvis +++ b/src/coreclr/jit/clrjit.natvis @@ -105,7 +105,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u varIndex++ bbLiveInMap = bbLiveInMap >> 1 - block = block->GetBBNext() + block = block->Next() "OutVarToRegMaps" @@ -124,7 +124,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u varIndex++ bbLiveInMap = bbLiveInMap >> 1 - block = block->GetBBNext() + block = block->Next() this->m_AvailableRegs diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index 01459878ab1e45..874284cab4dadf 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -124,11 +124,11 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) assert(block->isBBCallAlwaysPair()); assert(!block->IsLast()); - assert(block->GetBBNext()->KindIs(BBJ_ALWAYS)); - assert(block->GetBBNext()->bbJumpDest != NULL); - assert(block->GetBBNext()->bbJumpDest->bbFlags & BBF_FINALLY_TARGET); + assert(block->Next()->KindIs(BBJ_ALWAYS)); + assert(block->Next()->bbJumpDest != NULL); + assert(block->Next()->bbJumpDest->bbFlags & BBF_FINALLY_TARGET); - bbFinallyRet = block->GetBBNext()->bbJumpDest; + bbFinallyRet = block->Next()->bbJumpDest; // Load the address where the finally funclet should return into LR. // The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do the return. @@ -143,7 +143,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // block is RETLESS. assert(!(block->bbFlags & BBF_RETLESS_CALL)); assert(block->isBBCallAlwaysPair()); - return block->GetBBNext(); + return block->Next(); } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index e014a108d571a1..4fc3436df5155c 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -2160,7 +2160,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } GetEmitter()->emitIns_J(INS_bl_local, block->bbJumpDest); - BasicBlock* const nextBlock = block->GetBBNext(); + BasicBlock* const nextBlock = block->Next(); if (block->bbFlags & BBF_RETLESS_CALL) { diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index f245509c535748..bfd8db45ce0233 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -402,10 +402,10 @@ void CodeGen::genMarkLabelsForCodegen() { // For callfinally thunks, we need to mark the block following the callfinally/always pair, // as that's needed for identifying the range of the "duplicate finally" region in EH data. - BasicBlock* bbToLabel = block->GetBBNext(); + BasicBlock* bbToLabel = block->Next(); if (block->isBBCallAlwaysPair()) { - bbToLabel = bbToLabel->GetBBNext(); // skip the BBJ_ALWAYS + bbToLabel = bbToLabel->Next(); // skip the BBJ_ALWAYS } if (bbToLabel != nullptr) { @@ -448,14 +448,14 @@ void CodeGen::genMarkLabelsForCodegen() if (!HBtab->ebdTryLast->IsLast()) { - HBtab->ebdTryLast->GetBBNext()->bbFlags |= BBF_HAS_LABEL; - JITDUMP(" " FMT_BB " : try end\n", HBtab->ebdTryLast->GetBBNext()->bbNum); + HBtab->ebdTryLast->Next()->bbFlags |= BBF_HAS_LABEL; + JITDUMP(" " FMT_BB " : try end\n", HBtab->ebdTryLast->Next()->bbNum); } if (!HBtab->ebdHndLast->IsLast()) { - HBtab->ebdHndLast->GetBBNext()->bbFlags |= BBF_HAS_LABEL; - JITDUMP(" " FMT_BB " : hnd end\n", HBtab->ebdHndLast->GetBBNext()->bbNum); + HBtab->ebdHndLast->Next()->bbFlags |= BBF_HAS_LABEL; + JITDUMP(" " FMT_BB " : hnd end\n", HBtab->ebdHndLast->Next()->bbNum); } if (HBtab->HasFilter()) @@ -2302,9 +2302,9 @@ void CodeGen::genReportEH() hndBeg = compiler->ehCodeOffset(HBtab->ebdHndBeg); tryEnd = (HBtab->ebdTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize - : compiler->ehCodeOffset(HBtab->ebdTryLast->GetBBNext()); + : compiler->ehCodeOffset(HBtab->ebdTryLast->Next()); hndEnd = (HBtab->ebdHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize - : compiler->ehCodeOffset(HBtab->ebdHndLast->GetBBNext()); + : compiler->ehCodeOffset(HBtab->ebdHndLast->Next()); if (HBtab->HasFilter()) { @@ -2524,9 +2524,9 @@ void CodeGen::genReportEH() hndBeg = compiler->ehCodeOffset(bbHndBeg); tryEnd = (bbTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize - : compiler->ehCodeOffset(bbTryLast->GetBBNext()); + : compiler->ehCodeOffset(bbTryLast->Next()); hndEnd = (bbHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize - : compiler->ehCodeOffset(bbHndLast->GetBBNext()); + : compiler->ehCodeOffset(bbHndLast->Next()); if (encTab->HasFilter()) { @@ -2590,10 +2590,10 @@ void CodeGen::genReportEH() // How big is it? The BBJ_ALWAYS has a null bbEmitCookie! Look for the block after, which must be // a label or jump target, since the BBJ_CALLFINALLY doesn't fall through. - BasicBlock* bbLabel = block->GetBBNext(); + BasicBlock* bbLabel = block->Next(); if (block->isBBCallAlwaysPair()) { - bbLabel = bbLabel->GetBBNext(); // skip the BBJ_ALWAYS + bbLabel = bbLabel->Next(); // skip the BBJ_ALWAYS } if (bbLabel == nullptr) { diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index e94da2bdfd2304..bc9ce42fe7ae6e 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -170,7 +170,7 @@ void CodeGen::genCodeForBBlist() BasicBlock* block; - for (block = compiler->fgFirstBB; block != nullptr; block = block->GetBBNext()) + for (block = compiler->fgFirstBB; block != nullptr; block = block->Next()) { #ifdef DEBUG @@ -319,7 +319,7 @@ void CodeGen::genCodeForBBlist() } #endif // We should never have a block that falls through into the Cold section - noway_assert(!block->GetBBPrev()->bbFallsThrough()); + noway_assert(!block->Prev()->bbFallsThrough()); needLabel = true; } @@ -330,12 +330,12 @@ void CodeGen::genCodeForBBlist() // // Note: We need to have set compCurBB before calling emitAddLabel // - if (!block->IsFirst() && block->GetBBPrev()->KindIs(BBJ_COND) && - (block->bbWeight != block->GetBBPrev()->bbWeight)) + if (!block->IsFirst() && block->Prev()->KindIs(BBJ_COND) && + (block->bbWeight != block->Prev()->bbWeight)) { JITDUMP("Adding label due to BB weight difference: BBJ_COND " FMT_BB " with weight " FMT_WT " different from " FMT_BB " with weight " FMT_WT "\n", - block->GetBBPrev()->bbNum, block->GetBBPrev()->bbWeight, block->bbNum, block->bbWeight); + block->Prev()->bbNum, block->Prev()->bbWeight, block->bbNum, block->bbWeight); needLabel = true; } @@ -550,7 +550,7 @@ void CodeGen::genCodeForBBlist() bool isLastBlockProcessed = (block->IsLast()); if (block->isBBCallAlwaysPair()) { - isLastBlockProcessed = (block->GetBBNext()->IsLast()); + isLastBlockProcessed = (block->Next()->IsLast()); } if (compiler->opts.compDbgInfo && isLastBlockProcessed) @@ -615,7 +615,7 @@ void CodeGen::genCodeForBBlist() // Note: we may be generating a few too many NOPs for the case of call preceding an epilog. Technically, // if the next block is a BBJ_RETURN, an epilog will be generated, but there may be some instructions // generated before the OS epilog starts, such as a GS cookie check. - if ((block->IsLast()) || !BasicBlock::sameEHRegion(block, block->GetBBNext())) + if ((block->IsLast()) || !BasicBlock::sameEHRegion(block, block->Next())) { // We only need the NOP if we're not going to generate any more code as part of the block end. @@ -679,9 +679,9 @@ void CodeGen::genCodeForBBlist() // 2. If this is this is the last block of the hot section. // 3. If the subsequent block is a special throw block. // 4. On AMD64, if the next block is in a different EH region. - if ((block->IsLast()) || (block->GetBBNext()->bbFlags & BBF_FUNCLET_BEG) || - !BasicBlock::sameEHRegion(block, block->GetBBNext()) || - (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->GetBBNext())) || + if ((block->IsLast()) || (block->Next()->bbFlags & BBF_FUNCLET_BEG) || + !BasicBlock::sameEHRegion(block, block->Next()) || + (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->Next())) || block->IsLastHotBlock(compiler)) { instGen(INS_BREAKPOINT); // This should never get executed @@ -785,8 +785,8 @@ void CodeGen::genCodeForBBlist() if (!block->IsLast()) { - JITDUMP("Mark " FMT_BB " as label: alignment end-of-loop\n", block->GetBBNext()->bbNum); - block->GetBBNext()->bbFlags |= BBF_HAS_LABEL; + JITDUMP("Mark " FMT_BB " as label: alignment end-of-loop\n", block->Next()->bbNum); + block->Next()->bbFlags |= BBF_HAS_LABEL; } } #endif // FEATURE_LOOP_ALIGN @@ -818,7 +818,7 @@ void CodeGen::genCodeForBBlist() GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->KindIs(BBJ_ALWAYS))); } - if (!block->IsLast() && (block->GetBBNext()->isLoopAlign())) + if (!block->IsLast() && (block->Next()->isLoopAlign())) { if (compiler->opts.compJitHideAlignBehindJmp) { diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index 2eacd544971fa5..d57e4d18a0af9a 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1520,7 +1520,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } GetEmitter()->emitIns_J(INS_bl, block->bbJumpDest); - BasicBlock* const nextBlock = block->GetBBNext(); + BasicBlock* const nextBlock = block->Next(); if (block->bbFlags & BBF_RETLESS_CALL) { diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index b02e6bb160fe71..0fabf1daf8c190 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -1158,7 +1158,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } GetEmitter()->emitIns_J(INS_jal, block->bbJumpDest); - BasicBlock* const nextBlock = block->GetBBNext(); + BasicBlock* const nextBlock = block->Next(); if (block->bbFlags & BBF_RETLESS_CALL) { @@ -1182,7 +1182,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) BasicBlock* const jumpDest = nextBlock->bbJumpDest; // Now go to where the finally funclet needs to return to. - if ((jumpDest == nextBlock->GetBBNext()) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) + if ((jumpDest == nextBlock->Next()) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) { // Fall-through. // TODO-RISCV64-CQ: Can we get rid of this instruction, and just have the call return directly diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index ae57da88dd86ad..b2d427959ae50d 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -205,7 +205,7 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg) BasicBlock* CodeGen::genCallFinally(BasicBlock* block) { - BasicBlock* const nextBlock = block->GetBBNext(); + BasicBlock* const nextBlock = block->Next(); #if defined(FEATURE_EH_FUNCLETS) // Generate a call to the finally, like this: diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 68f2a6f016c845..eeb03d955667c9 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5291,11 +5291,11 @@ PhaseStatus Compiler::placeLoopAlignInstructions() } } - if (!block->IsLast() && (block->GetBBNext()->isLoopAlign())) + if (!block->IsLast() && (block->Next()->isLoopAlign())) { // Loop alignment is disabled for cold blocks assert((block->bbFlags & BBF_COLD) == 0); - BasicBlock* const loopTop = block->GetBBNext(); + BasicBlock* const loopTop = block->Next(); bool isSpecialCallFinally = block->isBBCallAlwaysPairTail(); bool unmarkedLoopAlign = false; @@ -9614,7 +9614,7 @@ BasicBlock* dFindBlock(unsigned bbNum) BasicBlock* block = nullptr; dbBlock = nullptr; - for (block = comp->fgFirstBB; block != nullptr; block = block->GetBBNext()) + for (block = comp->fgFirstBB; block != nullptr; block = block->Next()) { if (block->bbNum == bbNum) { diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index c37f20a8b528dd..080d1fb4be13da 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -633,7 +633,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) BasicBlock* finBeg = ehDsc->ebdHndBeg; - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { @@ -642,12 +642,12 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) assert(bcall->isBBCallAlwaysPair()); - RETURN_ON_ABORT(func(bcall->GetBBNext())); + RETURN_ON_ABORT(func(bcall->Next())); } RETURN_ON_ABORT(VisitEHSuccessors(comp, this, func)); - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { @@ -655,7 +655,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) } assert(bcall->isBBCallAlwaysPair()); - RETURN_ON_ABORT(VisitSuccessorEHSuccessors(comp, this, bcall->GetBBNext(), func)); + RETURN_ON_ABORT(VisitSuccessorEHSuccessors(comp, this, bcall->Next(), func)); } break; @@ -767,7 +767,7 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func) BasicBlock* finBeg = ehDsc->ebdHndBeg; - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { @@ -776,7 +776,7 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func) assert(bcall->isBBCallAlwaysPair()); - RETURN_ON_ABORT(func(bcall->GetBBNext())); + RETURN_ON_ABORT(func(bcall->Next())); } break; @@ -3235,7 +3235,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) // Must do this after we update bbJumpKind of block. if (isCallAlwaysPair) { - BasicBlock* leaveBlk = block->GetBBNext(); + BasicBlock* leaveBlk = block->Next(); noway_assert(leaveBlk->KindIs(BBJ_ALWAYS)); // leaveBlk is now unreachable, so scrub the pred lists. diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 8214b4552a9ef9..ca80c75789f0f2 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -212,12 +212,12 @@ BasicBlock* Compiler::fgNewBasicBlock(BBjumpKinds jumpKind) if (fgFirstBB) { - fgLastBB->SetBBNext(block); + fgLastBB->SetNext(block); } else { fgFirstBB = block; - block->SetBBPrev(nullptr); + block->SetPrev(nullptr); } fgLastBB = block; @@ -694,7 +694,7 @@ BasicBlock* Compiler::fgLookupBB(unsigned addr) while (dsc->bbFlags & BBF_INTERNAL) { - dsc = dsc->GetBBNext(); + dsc = dsc->Next(); mid++; // We skipped over too many, Set hi back to the original mid - 1 @@ -2803,7 +2803,7 @@ void Compiler::fgLinkBasicBlocks() FALLTHROUGH; case BBJ_NONE: - fgAddRefPred(curBBdesc->GetBBNext(), curBBdesc, oldEdge); + fgAddRefPred(curBBdesc->Next(), curBBdesc, oldEdge); break; case BBJ_EHFILTERRET: @@ -3664,7 +3664,7 @@ void Compiler::fgFindBasicBlocks() hndBegBB->bbCatchTyp = BBCT_FILTER_HANDLER; // Mark all BBs that belong to the filter with the XTnum of the corresponding handler - for (block = filtBB; /**/; block = block->GetBBNext()) + for (block = filtBB; /**/; block = block->Next()) { if (block == nullptr) { @@ -3753,10 +3753,10 @@ void Compiler::fgFindBasicBlocks() HBtab->ebdHandlerType = ToEHHandlerType(clause.Flags); HBtab->ebdTryBeg = tryBegBB; - HBtab->ebdTryLast = (tryEndBB == nullptr) ? fgLastBB : tryEndBB->GetBBPrev(); + HBtab->ebdTryLast = (tryEndBB == nullptr) ? fgLastBB : tryEndBB->Prev(); HBtab->ebdHndBeg = hndBegBB; - HBtab->ebdHndLast = (hndEndBB == nullptr) ? fgLastBB : hndEndBB->GetBBPrev(); + HBtab->ebdHndLast = (hndEndBB == nullptr) ? fgLastBB : hndEndBB->Prev(); // // Assert that all of our try/hnd blocks are setup correctly. @@ -3798,7 +3798,7 @@ void Compiler::fgFindBasicBlocks() BasicBlock* block; - for (block = hndBegBB; block && (block->bbCodeOffs < hndEndOff); block = block->GetBBNext()) + for (block = hndBegBB; block && (block->bbCodeOffs < hndEndOff); block = block->Next()) { if (!block->hasHndIndex()) { @@ -3821,7 +3821,7 @@ void Compiler::fgFindBasicBlocks() /* Mark all blocks within the covered range of the try */ - for (block = tryBegBB; block && (block->bbCodeOffs < tryEndOff); block = block->GetBBNext()) + for (block = tryBegBB; block && (block->bbCodeOffs < tryEndOff); block = block->Next()) { /* Mark this BB as belonging to a 'try' block */ @@ -4016,7 +4016,7 @@ void Compiler::fgFixEntryFlowForOSR() // fgEnsureFirstBBisScratch(); assert(fgFirstBB->KindIs(BBJ_NONE)); - fgRemoveRefPred(fgFirstBB->GetBBNext(), fgFirstBB); + fgRemoveRefPred(fgFirstBB->Next(), fgFirstBB); fgFirstBB->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgFirstBB->bbJumpDest = fgOSREntryBB; FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB); @@ -4061,7 +4061,7 @@ void Compiler::fgCheckBasicBlockControlFlow() { case BBJ_NONE: // block flows into the next one (no jump) - fgControlFlowPermitted(blk, blk->GetBBNext()); + fgControlFlowPermitted(blk, blk->Next()); break; @@ -4073,7 +4073,7 @@ void Compiler::fgCheckBasicBlockControlFlow() case BBJ_COND: // block conditionally jumps to the target - fgControlFlowPermitted(blk, blk->GetBBNext()); + fgControlFlowPermitted(blk, blk->Next()); fgControlFlowPermitted(blk, blk->bbJumpDest); @@ -4934,8 +4934,8 @@ void Compiler::fgUnlinkBlock(BasicBlock* block) assert(block != fgLastBB); assert((fgFirstBBScratch == nullptr) || (fgFirstBBScratch == fgFirstBB)); - fgFirstBB = block->GetBBNext(); - fgFirstBB->SetBBPrev(nullptr); + fgFirstBB = block->Next(); + fgFirstBB->SetPrev(nullptr); if (fgFirstBBScratch != nullptr) { @@ -4951,10 +4951,10 @@ void Compiler::fgUnlinkBlock(BasicBlock* block) } else { - block->GetBBPrev()->SetBBNext(block->GetBBNext()); + block->Prev()->SetNext(block->Next()); if (block == fgLastBB) { - fgLastBB = block->GetBBPrev(); + fgLastBB = block->Prev(); } } } @@ -4971,10 +4971,10 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) assert(bBeg != nullptr); assert(bEnd != nullptr); - BasicBlock* bPrev = bBeg->GetBBPrev(); + BasicBlock* bPrev = bBeg->Prev(); assert(bPrev != nullptr); // Can't unlink a range starting with the first block - bPrev->SetBBNext(bEnd->GetBBNext()); + bPrev->SetNext(bEnd->Next()); /* If we removed the last block in the method then update fgLastBB */ if (fgLastBB == bEnd) @@ -4986,7 +4986,7 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) // If bEnd was the first Cold basic block update fgFirstColdBlock if (fgFirstColdBlock == bEnd) { - fgFirstColdBlock = bPrev->GetBBNext(); + fgFirstColdBlock = bPrev->Next(); } #if defined(FEATURE_EH_FUNCLETS) @@ -4995,7 +4995,7 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) // can't cross the non-funclet/funclet region. And you can't unlink the first block // of the first funclet with this, either. (If that's necessary, it could be allowed // by updating fgFirstFuncletBB to bEnd->bbNext.) - for (BasicBlock* tempBB = bBeg; tempBB != bEnd->GetBBNext(); tempBB = tempBB->GetBBNext()) + for (BasicBlock* tempBB = bBeg; tempBB != bEnd->Next(); tempBB = tempBB->Next()) { assert(tempBB != fgFirstFuncletBB); } @@ -5014,7 +5014,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) PREFIX_ASSUME(block != nullptr); - BasicBlock* bPrev = block->GetBBPrev(); + BasicBlock* bPrev = block->Prev(); JITDUMP("fgRemoveBlock " FMT_BB ", unreachable=%s\n", block->bbNum, dspBool(unreachable)); @@ -5046,7 +5046,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // If block was the fgFirstFuncletBB then set fgFirstFuncletBB to block->bbNext if (block == fgFirstFuncletBB) { - fgFirstFuncletBB = block->GetBBNext(); + fgFirstFuncletBB = block->Next(); } #endif // FEATURE_EH_FUNCLETS @@ -5073,7 +5073,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // If this is the first Cold basic block update fgFirstColdBlock if (block == fgFirstColdBlock) { - fgFirstColdBlock = block->GetBBNext(); + fgFirstColdBlock = block->Next(); } /* Unlink this block from the bbNext chain */ @@ -5087,7 +5087,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) */ if (block->isBBCallAlwaysPair()) { - BasicBlock* leaveBlk = block->GetBBNext(); + BasicBlock* leaveBlk = block->Next(); noway_assert(leaveBlk->KindIs(BBJ_ALWAYS)); leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; @@ -5156,7 +5156,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } else { - succBlock = block->GetBBNext(); + succBlock = block->Next(); } bool skipUnmarkLoop = false; @@ -5171,14 +5171,14 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // If this is the first Cold basic block update fgFirstColdBlock if (block == fgFirstColdBlock) { - fgFirstColdBlock = block->GetBBNext(); + fgFirstColdBlock = block->Next(); } #if defined(FEATURE_EH_FUNCLETS) // Update fgFirstFuncletBB if necessary if (block == fgFirstFuncletBB) { - fgFirstFuncletBB = block->GetBBNext(); + fgFirstFuncletBB = block->Next(); } #endif // FEATURE_EH_FUNCLETS @@ -5461,7 +5461,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) bSrc->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); JITDUMP("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB " into a BBJ_NONE block\n", - bSrc->bbNum, bSrc->GetBBNext()->bbNum); + bSrc->bbNum, bSrc->Next()->bbNum); } } } @@ -5590,7 +5590,7 @@ bool Compiler::fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc /* = NULL * while (true) { - bTemp = bTemp->GetBBNext(); + bTemp = bTemp->Next(); if (bTemp == nullptr) { @@ -5643,8 +5643,8 @@ void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBloc /* relink [bStart .. bEnd] into the flow graph */ - bEnd->SetBBNext(insertAfterBlk->GetBBNext()); - insertAfterBlk->SetBBNext(bStart); + bEnd->SetNext(insertAfterBlk->Next()); + insertAfterBlk->SetNext(bStart); /* If insertAfterBlk was fgLastBB then update fgLastBB */ if (insertAfterBlk == fgLastBB) @@ -5774,12 +5774,12 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r break; } - block = block->GetBBNext(); + block = block->Next(); } // Ensure that bStart .. bLast defined a valid range noway_assert((validRange == true) && (inTheRange == false)); - bPrev = bStart->GetBBPrev(); + bPrev = bStart->Prev(); noway_assert(bPrev != nullptr); // Can't move a range that includes the first block of the function. JITDUMP("Relocating %s range " FMT_BB ".." FMT_BB " (EH#%u) to end of BBlist\n", @@ -5816,7 +5816,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r #endif // FEATURE_EH_FUNCLETS BasicBlock* bNext; - bNext = bLast->GetBBNext(); + bNext = bLast->Next(); /* Temporarily unlink [bStart .. bLast] from the flow graph */ fgUnlinkRange(bStart, bLast); @@ -5865,7 +5865,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r { // If we moved a set of blocks that were at the end of // a different try region then we may need to update ebdTryLast - for (block = HBtab->ebdTryBeg; block != nullptr; block = block->GetBBNext()) + for (block = HBtab->ebdTryBeg; block != nullptr; block = block->Next()) { if (block == bPrev) { @@ -5886,7 +5886,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r { // If we moved a set of blocks that were at the end of // a different handler region then we must update ebdHndLast - for (block = HBtab->ebdHndBeg; block != nullptr; block = block->GetBBNext()) + for (block = HBtab->ebdHndBeg; block != nullptr; block = block->Next()) { if (block == bPrev) { @@ -5913,7 +5913,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r else { assert(fgFirstFuncletBB != - insertAfterBlk->GetBBNext()); // We insert at the end, not at the beginning, of the funclet region. + insertAfterBlk->Next()); // We insert at the end, not at the beginning, of the funclet region. } // These asserts assume we aren't moving try regions (which we might need to do). Only @@ -5947,7 +5947,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r { // If we moved a set of blocks that were at the end of // a different try region then we may need to update ebdTryLast - for (block = HBtab->ebdTryBeg; block != NULL; block = block->GetBBNext()) + for (block = HBtab->ebdTryBeg; block != NULL; block = block->Next()) { if (block == bPrev) { @@ -5965,7 +5965,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r { // If we moved a set of blocks that were at the end of // a different handler region then we must update ebdHndLast - for (block = HBtab->ebdHndBeg; block != NULL; block = block->GetBBNext()) + for (block = HBtab->ebdHndBeg; block != NULL; block = block->Next()) { if (block == bPrev) { @@ -6174,14 +6174,14 @@ void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk) { if (insertBeforeBlk->IsFirst()) { - newBlk->SetBBNext(fgFirstBB); + newBlk->SetNext(fgFirstBB); fgFirstBB = newBlk; - newBlk->SetBBPrev(nullptr); + newBlk->SetPrev(nullptr); } else { - fgInsertBBafter(insertBeforeBlk->GetBBPrev(), newBlk); + fgInsertBBafter(insertBeforeBlk->Prev(), newBlk); } #if defined(FEATURE_EH_FUNCLETS) @@ -6204,8 +6204,8 @@ void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk) */ void Compiler::fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk) { - newBlk->SetBBNext(insertAfterBlk->GetBBNext()); - insertAfterBlk->SetBBNext(newBlk); + newBlk->SetNext(insertAfterBlk->Next()); + insertAfterBlk->SetNext(newBlk); if (fgLastBB == insertAfterBlk) { @@ -6241,7 +6241,7 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) } // Currently bNext is the fall through for bCur - BasicBlock* bNext = bCur->GetBBNext(); + BasicBlock* bNext = bCur->Next(); noway_assert(bNext != nullptr); // We will set result to true if bAlt is a better fall through than bCur @@ -6367,7 +6367,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, // Assert that startBlk precedes endBlk in the block list. // We don't want to use bbNum to assert this condition, as we cannot depend on the block numbers being // sequential at all times. - for (BasicBlock* b = startBlk; b != endBlk; b = b->GetBBNext()) + for (BasicBlock* b = startBlk; b != endBlk; b = b->Next()) { assert(b != nullptr); // We reached the end of the block list, but never found endBlk. } @@ -6396,7 +6396,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, if (nearBlk != nullptr) { // Does the nearBlk precede the startBlk? - for (blk = nearBlk; blk != nullptr; blk = blk->GetBBNext()) + for (blk = nearBlk; blk != nullptr; blk = blk->Next()) { if (blk == startBlk) { @@ -6410,7 +6410,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, } } - for (blk = startBlk; blk != endBlk; blk = blk->GetBBNext()) + for (blk = startBlk; blk != endBlk; blk = blk->Next()) { // The only way (blk == nullptr) could be true is if the caller passed an endBlk that preceded startBlk in the // block list, or if endBlk isn't in the block list at all. In DEBUG, we'll instead hit the similar @@ -6583,10 +6583,10 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, // inserted block is marked as the entry block for the filter. Becuase this sort of split can be complex // (especially given that it must ensure that the liveness of the exception object is properly tracked), // we avoid this situation by never generating single-block filters on x86 (see impPushCatchArgOnStack). - if (insertingIntoFilter && (bestBlk == endBlk->GetBBPrev())) + if (insertingIntoFilter && (bestBlk == endBlk->Prev())) { assert(bestBlk != startBlk); - bestBlk = bestBlk->GetBBPrev(); + bestBlk = bestBlk->Prev(); } #endif // defined(JIT32_GCENCODER) @@ -6741,7 +6741,7 @@ BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind, // We will put the newBB in the try region. EHblkDsc* ehDsc = ehGetDsc(tryIndex - 1); startBlk = ehDsc->ebdTryBeg; - endBlk = ehDsc->ebdTryLast->GetBBNext(); + endBlk = ehDsc->ebdTryLast->Next(); regionIndex = tryIndex; } else if (putInFilter) @@ -6757,7 +6757,7 @@ BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind, // We will put the newBB in the handler region. EHblkDsc* ehDsc = ehGetDsc(hndIndex - 1); startBlk = ehDsc->ebdHndBeg; - endBlk = ehDsc->ebdHndLast->GetBBNext(); + endBlk = ehDsc->ebdHndLast->Next(); regionIndex = hndIndex; } @@ -6857,7 +6857,7 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, bool putInTryRegion) { /* Insert the new block */ - BasicBlock* afterBlkNext = afterBlk->GetBBNext(); + BasicBlock* afterBlkNext = afterBlk->Next(); (void)afterBlkNext; // prevent "unused variable" error from GCC BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false); @@ -6967,7 +6967,7 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, } /* If afterBlk falls through, we insert a jump around newBlk */ - fgConnectFallThrough(afterBlk, newBlk->GetBBNext()); + fgConnectFallThrough(afterBlk, newBlk->Next()); // If the loop table is valid, add this block to the appropriate loop. // Note we don't verify (via flow) that this block actually belongs @@ -6976,8 +6976,8 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, // if (optLoopTableValid) { - BasicBlock* const bbPrev = newBlk->GetBBPrev(); - BasicBlock* const bbNext = newBlk->GetBBNext(); + BasicBlock* const bbPrev = newBlk->Prev(); + BasicBlock* const bbNext = newBlk->Next(); if ((bbPrev != nullptr) && (bbNext != nullptr)) { diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index 7f1067f7a10aff..953b003f036232 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -82,7 +82,7 @@ void Compiler::fgDebugCheckUpdate() BasicBlock* prev; BasicBlock* block; - for (prev = nullptr, block = fgFirstBB; block != nullptr; prev = block, block = block->GetBBNext()) + for (prev = nullptr, block = fgFirstBB; block != nullptr; prev = block, block = block->Next()) { /* no unreachable blocks */ @@ -168,7 +168,7 @@ void Compiler::fgDebugCheckUpdate() // We are allowed to have a branch from a hot 'block' to a cold 'bbNext' // - if (!block->IsLast() && fgInDifferentRegions(block, block->GetBBNext())) + if (!block->IsLast() && fgInDifferentRegions(block, block->Next())) { doAssertOnJumpToNextBlock = false; } @@ -199,7 +199,7 @@ void Compiler::fgDebugCheckUpdate() /* no un-compacted blocks */ - if (fgCanCompactBlocks(block, block->GetBBNext())) + if (fgCanCompactBlocks(block, block->Next())) { noway_assert(!"Found un-compacted blocks!"); } @@ -889,7 +889,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) "ALWAYS", "LEAVE", "CALLFINALLY", "COND", "SWITCH"}; BasicBlock* block; - for (block = fgFirstBB, blockOrdinal = 1; block != nullptr; block = block->GetBBNext(), blockOrdinal++) + for (block = fgFirstBB, blockOrdinal = 1; block != nullptr; block = block->Next(), blockOrdinal++) { if (createDotFile) { @@ -1091,7 +1091,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { unsigned edgeNum = 1; BasicBlock* bTarget; - for (bTarget = fgFirstBB; bTarget != nullptr; bTarget = bTarget->GetBBNext()) + for (bTarget = fgFirstBB; bTarget != nullptr; bTarget = bTarget->Next()) { double targetWeightDivisor; if (bTarget->bbWeight == BB_ZERO_WEIGHT) @@ -1217,7 +1217,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) if (!bSource->IsLast()) { fprintf(fgxFile, " " FMT_BB " -> " FMT_BB " [style=\"invis\", weight=25];\n", bSource->bbNum, - bSource->GetBBNext()->bbNum); + bSource->Next()->bbNum); } } @@ -1641,7 +1641,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) bool needIndent = true; BasicBlock* bbCur = rgn->m_bbStart; - BasicBlock* bbEnd = rgn->m_bbEnd->GetBBNext(); + BasicBlock* bbEnd = rgn->m_bbEnd->Next(); Region* child = rgn->m_rgnChild; BasicBlock* childCurBB = (child == nullptr) ? nullptr : child->m_bbStart; @@ -1660,7 +1660,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { fprintf(file, "%*s" FMT_BB ";", needIndent ? indent : 0, "", bbCur->bbNum); needIndent = false; - bbCur = bbCur->GetBBNext(); + bbCur = bbCur->Next(); } if (bbCur == bbEnd) @@ -1684,7 +1684,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) childCount++; - bbCur = child->m_bbEnd->GetBBNext(); // Next, output blocks after this child. + bbCur = child->m_bbEnd->Next(); // Next, output blocks after this child. child = child->m_rgnNext; // Move to the next child, if any. childCurBB = (child == nullptr) ? nullptr : child->m_bbStart; } @@ -1746,7 +1746,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { sprintf_s(name, sizeof(name), "EH#%u filter", XTnum); rgnGraph.Insert(name, RegionGraph::RegionType::EH, ehDsc->ebdFilter, - ehDsc->ebdHndBeg->GetBBPrev()); + ehDsc->ebdHndBeg->Prev()); } } } @@ -2257,7 +2257,7 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, int ibcColWidth = 0; - for (BasicBlock* block = firstBlock; block != nullptr; block = block->GetBBNext()) + for (BasicBlock* block = firstBlock; block != nullptr; block = block->Next()) { if (block->hasProfileWeight()) { @@ -2343,7 +2343,7 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, // First, do some checking on the bbPrev links if (!block->IsFirst()) { - if (!block->GetBBPrev()->NextIs(block)) + if (!block->Prev()->NextIs(block)) { printf("bad prev link\n"); } @@ -2451,7 +2451,7 @@ void Compiler::fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock) { // Note that typically we have already called fgDispBasicBlocks() // so we don't need to print the preds and succs again here. - for (BasicBlock* block = firstBlock; block != nullptr; block = block->GetBBNext()) + for (BasicBlock* block = firstBlock; block != nullptr; block = block->Next()) { fgDumpBlock(block); @@ -2606,7 +2606,7 @@ bool BBPredsChecker::CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHb // is marked as "returning" to the BBJ_ALWAYS block following the BBJ_CALLFINALLY // block that does a local call to the finally. This BBJ_ALWAYS is within // the try region protected by the finally (for x86, ARM), but that's ok. - BasicBlock* prevBlock = block->GetBBPrev(); + BasicBlock* prevBlock = block->Prev(); if (prevBlock->KindIs(BBJ_CALLFINALLY) && block->KindIs(BBJ_ALWAYS) && blockPred->KindIs(BBJ_EHFINALLYRET)) { return true; @@ -2732,7 +2732,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) BasicBlock* endBlk; comp->ehGetCallFinallyBlockRange(hndIndex, &begBlk, &endBlk); - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { @@ -2790,7 +2790,7 @@ void Compiler::fgDebugCheckBBNumIncreasing() { for (BasicBlock* const block : Blocks()) { - assert(block->IsLast() || (block->bbNum < block->GetBBNext()->bbNum)); + assert(block->IsLast() || (block->bbNum < block->Next()->bbNum)); } } @@ -2866,7 +2866,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef if (checkBBNum) { // Check that bbNum is sequential - assert(block->IsLast() || (block->bbNum + 1 == block->GetBBNext()->bbNum)); + assert(block->IsLast() || (block->bbNum + 1 == block->Next()->bbNum)); } // If the block is a BBJ_COND, a BBJ_SWITCH or a @@ -3714,7 +3714,7 @@ void Compiler::fgDebugCheckBlockLinks() } else { - assert(block->GetBBNext()->PrevIs(block)); + assert(block->Next()->PrevIs(block)); } if (block->IsFirst()) @@ -3723,7 +3723,7 @@ void Compiler::fgDebugCheckBlockLinks() } else { - assert(block->GetBBPrev()->NextIs(block)); + assert(block->Prev()->NextIs(block)); } // If this is a switch, check that the tables are consistent. diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 9902d65f08f6d6..5bd8d296df1f84 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -140,7 +140,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() while (currentBlock != endCallFinallyRangeBlock) { - BasicBlock* nextBlock = currentBlock->GetBBNext(); + BasicBlock* nextBlock = currentBlock->Next(); if (currentBlock->KindIs(BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) { @@ -151,7 +151,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() // the finally is empty. noway_assert(currentBlock->isBBCallAlwaysPair()); - BasicBlock* const leaveBlock = currentBlock->GetBBNext(); + BasicBlock* const leaveBlock = currentBlock->Next(); BasicBlock* const postTryFinallyBlock = leaveBlock->bbJumpDest; JITDUMP("Modifying callfinally " FMT_BB " leave " FMT_BB " finally " FMT_BB " continuation " FMT_BB @@ -172,7 +172,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() // Delete the leave block, which should be marked as // keep always and have the sole finally block as a pred. assert((leaveBlock->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0); - nextBlock = leaveBlock->GetBBNext(); + nextBlock = leaveBlock->Next(); fgRemoveRefPred(leaveBlock, firstBlock); leaveBlock->bbFlags &= ~BBF_KEEP_BBJ_ALWAYS; fgRemoveBlock(leaveBlock, /* unreachable */ true); @@ -398,7 +398,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() if (firstTryBlock != lastTryBlock) { JITDUMP("EH#%u first try block " FMT_BB " not only block in try; skipping.\n", XTnum, - firstTryBlock->GetBBNext()->bbNum); + firstTryBlock->Next()->bbNum); XTnum++; continue; } @@ -420,7 +420,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() if (!firstTryBlock->NextIs(lastTryBlock)) { JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n", XTnum, - firstTryBlock->GetBBNext()->bbNum); + firstTryBlock->Next()->bbNum); XTnum++; continue; } @@ -437,7 +437,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() ehGetCallFinallyBlockRange(XTnum, &firstCallFinallyRangeBlock, &endCallFinallyRangeBlock); for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; - block = block->GetBBNext()) + block = block->Next()) { if (block->KindIs(BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) { @@ -450,7 +450,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() break; } - block = block->GetBBNext(); + block = block->Next(); } } @@ -468,7 +468,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() callFinally->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Identify the leave block and the continuation - BasicBlock* const leave = callFinally->GetBBNext(); + BasicBlock* const leave = callFinally->Next(); BasicBlock* const continuation = leave->bbJumpDest; // (2) Cleanup the leave so it can be deleted by subsequent opts @@ -731,14 +731,14 @@ PhaseStatus Compiler::fgCloneFinally() BasicBlock* const lastBlock = HBtab->ebdHndLast; assert(firstBlock != nullptr); assert(lastBlock != nullptr); - BasicBlock* nextBlock = lastBlock->GetBBNext(); + BasicBlock* nextBlock = lastBlock->Next(); unsigned regionBBCount = 0; unsigned regionStmtCount = 0; bool hasFinallyRet = false; bool isAllRare = true; bool hasSwitch = false; - for (const BasicBlock* block = firstBlock; block != nextBlock; block = block->GetBBNext()) + for (const BasicBlock* block = firstBlock; block != nextBlock; block = block->Next()) { if (block->KindIs(BBJ_SWITCH)) { @@ -806,7 +806,7 @@ PhaseStatus Compiler::fgCloneFinally() BasicBlock* const lastTryBlock = HBtab->ebdTryLast; assert(firstTryBlock->getTryIndex() == XTnum); assert(bbInTryRegions(XTnum, lastTryBlock)); - BasicBlock* const beforeTryBlock = firstTryBlock->GetBBPrev(); + BasicBlock* const beforeTryBlock = firstTryBlock->Prev(); BasicBlock* normalCallFinallyBlock = nullptr; BasicBlock* normalCallFinallyReturn = nullptr; @@ -815,7 +815,7 @@ PhaseStatus Compiler::fgCloneFinally() const bool usingProfileWeights = fgIsUsingProfileWeights(); weight_t currentWeight = BB_ZERO_WEIGHT; - for (BasicBlock* block = lastTryBlock; block != beforeTryBlock; block = block->GetBBPrev()) + for (BasicBlock* block = lastTryBlock; block != beforeTryBlock; block = block->Prev()) { #if FEATURE_EH_CALLFINALLY_THUNKS // Blocks that transfer control to callfinallies are usually @@ -825,7 +825,7 @@ PhaseStatus Compiler::fgCloneFinally() if (block->KindIs(BBJ_NONE) && (block == lastTryBlock)) { - jumpDest = block->GetBBNext(); + jumpDest = block->Next(); } else if (block->KindIs(BBJ_ALWAYS)) { @@ -855,7 +855,7 @@ PhaseStatus Compiler::fgCloneFinally() // Found a block that invokes the finally. // - BasicBlock* const finallyReturnBlock = jumpDest->GetBBNext(); + BasicBlock* const finallyReturnBlock = jumpDest->Next(); BasicBlock* const postTryFinallyBlock = finallyReturnBlock->bbJumpDest; bool isUpdate = false; @@ -969,7 +969,7 @@ PhaseStatus Compiler::fgCloneFinally() BasicBlock* firstCallFinallyBlock = nullptr; for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; - block = block->GetBBNext()) + block = block->Next()) { if (block->isBBCallAlwaysPair()) { @@ -989,7 +989,7 @@ PhaseStatus Compiler::fgCloneFinally() // but only if it's targeted by the last block in the try range. if (firstCallFinallyBlock != normalCallFinallyBlock) { - BasicBlock* const placeToMoveAfter = firstCallFinallyBlock->GetBBPrev(); + BasicBlock* const placeToMoveAfter = firstCallFinallyBlock->Prev(); if (placeToMoveAfter->KindIs(BBJ_ALWAYS) && (placeToMoveAfter->bbJumpDest == normalCallFinallyBlock)) { @@ -997,7 +997,7 @@ PhaseStatus Compiler::fgCloneFinally() normalCallFinallyBlock->bbNum, firstCallFinallyBlock->bbNum); BasicBlock* const firstToMove = normalCallFinallyBlock; - BasicBlock* const lastToMove = normalCallFinallyBlock->GetBBNext(); + BasicBlock* const lastToMove = normalCallFinallyBlock->Next(); fgUnlinkRange(firstToMove, lastToMove); fgMoveBlocksAfter(firstToMove, lastToMove, placeToMoveAfter); @@ -1045,7 +1045,7 @@ PhaseStatus Compiler::fgCloneFinally() unsigned cloneBBCount = 0; weight_t const originalWeight = firstBlock->hasProfileWeight() ? firstBlock->bbWeight : BB_ZERO_WEIGHT; - for (BasicBlock* block = firstBlock; block != nextBlock; block = block->GetBBNext()) + for (BasicBlock* block = firstBlock; block != nextBlock; block = block->Next()) { BasicBlock* newBlock; @@ -1129,7 +1129,7 @@ PhaseStatus Compiler::fgCloneFinally() // Redirect any branches within the newly-cloned // finally, and any finally returns to jump to the return // point. - for (BasicBlock* block = firstBlock; block != nextBlock; block = block->GetBBNext()) + for (BasicBlock* block = firstBlock; block != nextBlock; block = block->Next()) { BasicBlock* newBlock = blockMap[block]; @@ -1161,13 +1161,13 @@ PhaseStatus Compiler::fgCloneFinally() while (currentBlock != endCallFinallyRangeBlock) { - BasicBlock* nextBlockToScan = currentBlock->GetBBNext(); + BasicBlock* nextBlockToScan = currentBlock->Next(); if (currentBlock->isBBCallAlwaysPair()) { if (currentBlock->bbJumpDest == firstBlock) { - BasicBlock* const leaveBlock = currentBlock->GetBBNext(); + BasicBlock* const leaveBlock = currentBlock->Next(); BasicBlock* const postTryFinallyBlock = leaveBlock->bbJumpDest; // Note we must retarget all callfinallies that have this @@ -1191,7 +1191,7 @@ PhaseStatus Compiler::fgCloneFinally() // Delete the leave block, which should be marked as // keep always. assert((leaveBlock->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0); - nextBlock = leaveBlock->GetBBNext(); + nextBlock = leaveBlock->Next(); // All preds should be BBJ_EHFINALLYRETs from the finally. for (BasicBlock* const leavePred : leaveBlock->PredBlocks()) @@ -1239,8 +1239,8 @@ PhaseStatus Compiler::fgCloneFinally() // Change all BBJ_EHFINALLYRET to BBJ_EHFAULTRET in the now-fault region. BasicBlock* const hndBegIter = HBtab->ebdHndBeg; - BasicBlock* const hndEndIter = HBtab->ebdHndLast->GetBBNext(); - for (BasicBlock* block = hndBegIter; block != hndEndIter; block = block->GetBBNext()) + BasicBlock* const hndEndIter = HBtab->ebdHndLast->Next(); + for (BasicBlock* block = hndBegIter; block != hndEndIter; block = block->Next()) { if (block->KindIs(BBJ_EHFINALLYRET)) { @@ -1471,7 +1471,7 @@ void Compiler::fgDebugCheckTryFinallyExits() { if (succBlock->isEmpty()) { - BasicBlock* const succSuccBlock = succBlock->GetBBNext(); + BasicBlock* const succSuccBlock = succBlock->Next(); // case (d) if (succSuccBlock->bbFlags & BBF_CLONED_FINALLY_BEGIN) @@ -1622,7 +1622,7 @@ void Compiler::fgAddFinallyTargetFlags() { if (block->isBBCallAlwaysPair()) { - BasicBlock* const leave = block->GetBBNext(); + BasicBlock* const leave = block->Next(); BasicBlock* const continuation = leave->bbJumpDest; if ((continuation->bbFlags & BBF_FINALLY_TARGET) == 0) @@ -1791,7 +1791,7 @@ PhaseStatus Compiler::fgMergeFinallyChains() BasicBlock* const beginHandlerBlock = HBtab->ebdHndBeg; for (BasicBlock* currentBlock = firstCallFinallyRangeBlock; currentBlock != endCallFinallyRangeBlock; - currentBlock = currentBlock->GetBBNext()) + currentBlock = currentBlock->Next()) { // Ignore "retless" callfinallys (where the finally doesn't return). if (currentBlock->isBBCallAlwaysPair() && (currentBlock->bbJumpDest == beginHandlerBlock)) @@ -1805,7 +1805,7 @@ PhaseStatus Compiler::fgMergeFinallyChains() callFinallyCount++; // Locate the continuation - BasicBlock* const leaveBlock = currentBlock->GetBBNext(); + BasicBlock* const leaveBlock = currentBlock->Next(); BasicBlock* const continuationBlock = leaveBlock->bbJumpDest; // If this is the first time we've seen this @@ -1838,7 +1838,7 @@ PhaseStatus Compiler::fgMergeFinallyChains() // sure they all jump to the appropriate canonical // callfinally. for (BasicBlock* currentBlock = firstCallFinallyRangeBlock; currentBlock != endCallFinallyRangeBlock; - currentBlock = currentBlock->GetBBNext()) + currentBlock = currentBlock->Next()) { bool merged = fgRetargetBranchesToCanonicalCallFinally(currentBlock, beginHandlerBlock, continuationMap); didMerge = didMerge || merged; @@ -1923,7 +1923,7 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, // Ok, this is a callfinally that invokes the right handler. // Get its continuation. - BasicBlock* const leaveBlock = callFinally->GetBBNext(); + BasicBlock* const leaveBlock = callFinally->Next(); BasicBlock* const continuationBlock = leaveBlock->bbJumpDest; // Find the canonical callfinally for that continuation. @@ -1958,7 +1958,7 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, canonicalCallFinally->setBBProfileWeight(newCanonicalWeight); - BasicBlock* const canonicalLeaveBlock = canonicalCallFinally->GetBBNext(); + BasicBlock* const canonicalLeaveBlock = canonicalCallFinally->Next(); weight_t const canonicalLeaveWeight = canonicalLeaveBlock->hasProfileWeight() ? canonicalLeaveBlock->bbWeight : BB_ZERO_WEIGHT; @@ -2101,7 +2101,7 @@ PhaseStatus Compiler::fgTailMergeThrows() // Walk blocks from last to first so that any branches we // introduce to the canonical blocks end up lexically forward // and there is less jumbled flow to sort out later. - for (BasicBlock* block = fgLastBB; block != nullptr; block = block->GetBBPrev()) + for (BasicBlock* block = fgLastBB; block != nullptr; block = block->Prev()) { // Workaround: don't consider try entry blocks as candidates // for merging; if the canonical throw is later in the same try, diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index c3dcac465426ba..23f122177606b4 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -351,7 +351,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) assert(block->isBBCallAlwaysPair()); /* The block after the BBJ_CALLFINALLY block is not reachable */ - bNext = block->GetBBNext(); + bNext = block->Next(); /* bNext is an unreachable BBJ_ALWAYS block */ noway_assert(bNext->KindIs(BBJ_ALWAYS)); @@ -370,12 +370,12 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) break; case BBJ_NONE: - fgRemoveRefPred(block->GetBBNext(), block); + fgRemoveRefPred(block->Next(), block); break; case BBJ_COND: fgRemoveRefPred(block->bbJumpDest, block); - fgRemoveRefPred(block->GetBBNext(), block); + fgRemoveRefPred(block->Next(), block); break; case BBJ_EHFILTERRET: @@ -401,7 +401,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) BasicBlock* finBeg = ehDsc->ebdHndBeg; - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { if ((bcall->bbFlags & BBF_REMOVED) || !bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) @@ -410,7 +410,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) } assert(bcall->isBBCallAlwaysPair()); - fgRemoveRefPred(bcall->GetBBNext(), block); + fgRemoveRefPred(bcall->Next(), block); } } } @@ -468,7 +468,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* BasicBlock* finBeg = ehDsc->ebdHndBeg; - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { @@ -479,7 +479,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* if (succNum == i) { - *bres = bcall->GetBBNext(); + *bres = bcall->Next(); return; } succNum++; diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index fda5cf1e767206..3f99cb6a7fb391 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -676,7 +676,7 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorIsIntegralConst(0)) { block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_compiler)); - m_compiler->fgRemoveRefPred(block->GetBBNext(), block); + m_compiler->fgRemoveRefPred(block->Next(), block); } else { @@ -819,7 +819,7 @@ PhaseStatus Compiler::fgInline() } } - block = block->GetBBNext(); + block = block->Next(); } while (block); @@ -840,7 +840,7 @@ PhaseStatus Compiler::fgInline() fgWalkTreePre(stmt->GetRootNodePointer(), fgDebugCheckInlineCandidates); } - block = block->GetBBNext(); + block = block->Next(); } while (block); @@ -1548,10 +1548,10 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) InlineeCompiler->fgFirstBB->bbRefs--; // Insert inlinee's blocks into inliner's block list. - topBlock->SetBBNext(InlineeCompiler->fgFirstBB); + topBlock->SetNext(InlineeCompiler->fgFirstBB); fgRemoveRefPred(bottomBlock, topBlock); fgAddRefPred(InlineeCompiler->fgFirstBB, topBlock); - InlineeCompiler->fgLastBB->SetBBNext(bottomBlock); + InlineeCompiler->fgLastBB->SetNext(bottomBlock); // // Add inlinee's block count to inliner's. diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index dcc420aaa4e241..48a6f66fd0d2a2 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -134,7 +134,7 @@ bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2) { noway_assert(b1->KindIs(BBJ_NONE, BBJ_ALWAYS, BBJ_COND)); - if (b1->KindIs(BBJ_NONE, BBJ_COND) && fgReachable(b1->GetBBNext(), b2)) + if (b1->KindIs(BBJ_NONE, BBJ_COND) && fgReachable(b1->Next(), b2)) { return true; } @@ -367,7 +367,7 @@ void Compiler::fgComputeEnterBlocksSet() assert(block->isBBCallAlwaysPair()); // Don't remove the BBJ_ALWAYS block that is only here for the unwinder. - BlockSetOps::AddElemD(this, fgAlwaysBlks, block->GetBBNext()->bbNum); + BlockSetOps::AddElemD(this, fgAlwaysBlks, block->Next()->bbNum); } } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -474,8 +474,8 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) // the target node (of BBJ_ALWAYS) since BBJ_CALLFINALLY node is getting converted to a BBJ_THROW. if (bIsBBCallAlwaysPair) { - noway_assert(block->GetBBNext()->KindIs(BBJ_ALWAYS)); - fgClearFinallyTargetBit(block->GetBBNext()->bbJumpDest); + noway_assert(block->Next()->KindIs(BBJ_ALWAYS)); + fgClearFinallyTargetBit(block->Next()->bbJumpDest); } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } @@ -490,7 +490,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) if (hasUnreachableBlocks) { // Now remove the unreachable blocks - for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->GetBBNext()) + for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->Next()) { // If we marked a block with BBF_REMOVED then we need to call fgRemoveBlock() on it @@ -506,7 +506,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) // if (block->isBBCallAlwaysPair()) { - block = block->GetBBNext(); + block = block->Next(); } } } @@ -643,7 +643,7 @@ bool Compiler::fgRemoveDeadBlocks() assert(block->isBBCallAlwaysPair()); // Don't remove the BBJ_ALWAYS block that is only here for the unwinder. - worklist.push_back(block->GetBBNext()); + worklist.push_back(block->Next()); } } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -1007,7 +1007,7 @@ void Compiler::fgComputeDoms() BasicBlock* block = nullptr; - for (block = fgFirstBB->GetBBNext(); block != nullptr; block = block->GetBBNext()) + for (block = fgFirstBB->Next(); block != nullptr; block = block->Next()) { // If any basic block has no predecessors then we flag it as processed and temporarily // mark its predecessor list to be flRoot. This makes the flowgraph connected, @@ -1175,7 +1175,7 @@ DomTreeNode* Compiler::fgBuildDomTree() // Traverse the entire block list to build the dominator tree. Skip fgFirstBB // as it is always a root of the dominator forest. - for (BasicBlock* const block : Blocks(fgFirstBB->GetBBNext())) + for (BasicBlock* const block : Blocks(fgFirstBB->Next())) { BasicBlock* parent = block->bbIDom; @@ -1470,7 +1470,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() for (cur = fgFirstBB; cur != nullptr; cur = nxt) { // Get hold of the next block (in case we delete 'cur') - nxt = cur->GetBBNext(); + nxt = cur->Next(); // Should this block be removed? if (!(cur->bbFlags & BBF_IMPORTED)) @@ -1571,10 +1571,10 @@ PhaseStatus Compiler::fgPostImportationCleanup() // Find the first unremoved block before the try entry block. // BasicBlock* const oldTryEntry = HBtab->ebdTryBeg; - BasicBlock* tryEntryPrev = oldTryEntry->GetBBPrev(); + BasicBlock* tryEntryPrev = oldTryEntry->Prev(); while ((tryEntryPrev != nullptr) && ((tryEntryPrev->bbFlags & BBF_REMOVED) != 0)) { - tryEntryPrev = tryEntryPrev->GetBBPrev(); + tryEntryPrev = tryEntryPrev->Prev(); } // Because we've added an unremovable scratch block as @@ -1585,7 +1585,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // If there is a next block of this prev block, and that block is // contained in the current try, we'd like to make that block // the new start of the try, and keep the region. - BasicBlock* newTryEntry = tryEntryPrev->GetBBNext(); + BasicBlock* newTryEntry = tryEntryPrev->Next(); bool updateTryEntry = false; if ((newTryEntry != nullptr) && bbInTryRegions(XTnum, newTryEntry)) @@ -1648,13 +1648,13 @@ PhaseStatus Compiler::fgPostImportationCleanup() // out of order handler, the next block may be a handler. So even though // this new try entry block is unreachable, we need to give it a // plausible flow target. Simplest is to just mark it as a throw. - if (bbIsHandlerBeg(newTryEntry->GetBBNext())) + if (bbIsHandlerBeg(newTryEntry->Next())) { newTryEntry->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); } else { - fgAddRefPred(newTryEntry->GetBBNext(), newTryEntry); + fgAddRefPred(newTryEntry->Next(), newTryEntry); } JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to new " FMT_BB "\n", @@ -2320,7 +2320,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* Unlink bNext and update all the marker pointers if necessary */ - fgUnlinkRange(block->GetBBNext(), bNext); + fgUnlinkRange(block->Next(), bNext); // If bNext was the last block of a try or handler, update the EH table. @@ -2345,15 +2345,15 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) fgReplacePred(bNext->bbJumpDest, bNext, block); /* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */ - if (bNext->KindIs(BBJ_COND) && bNext->bbJumpDest != bNext->GetBBNext()) + if (bNext->KindIs(BBJ_COND) && bNext->bbJumpDest != bNext->Next()) { - fgReplacePred(bNext->GetBBNext(), bNext, block); + fgReplacePred(bNext->Next(), bNext, block); } break; case BBJ_NONE: /* Update the predecessor list for 'bNext->bbNext' */ - fgReplacePred(bNext->GetBBNext(), bNext, block); + fgReplacePred(bNext->Next(), bNext, block); break; case BBJ_EHFILTERRET: @@ -2373,7 +2373,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) BasicBlock* finBeg = ehDsc->ebdHndBeg; - for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->GetBBNext()) + for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { @@ -2381,7 +2381,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) } noway_assert(bcall->isBBCallAlwaysPair()); - fgReplacePred(bcall->GetBBNext(), bNext, block); + fgReplacePred(bcall->Next(), bNext, block); } } } @@ -2630,12 +2630,12 @@ void Compiler::fgRemoveConditionalJump(BasicBlock* block) noway_assert(block->KindIs(BBJ_COND) && block->NextIs(block->bbJumpDest)); assert(compRationalIRForm == block->IsLIR()); - FlowEdge* flow = fgGetPredForBlock(block->GetBBNext(), block); + FlowEdge* flow = fgGetPredForBlock(block->Next(), block); noway_assert(flow->getDupCount() == 2); // Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount. block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); - --block->GetBBNext()->bbRefs; + --block->Next()->bbRefs; flow->decrementDupCount(); #ifdef DEBUG @@ -2644,7 +2644,7 @@ void Compiler::fgRemoveConditionalJump(BasicBlock* block) { printf("Block " FMT_BB " becoming a BBJ_NONE to " FMT_BB " (jump target is the same whether the condition" " is true or false)\n", - block->bbNum, block->GetBBNext()->bbNum); + block->bbNum, block->Next()->bbNum); } #endif @@ -2884,7 +2884,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) assert(block->isEmpty()); bool madeChanges = false; - BasicBlock* bPrev = block->GetBBPrev(); + BasicBlock* bPrev = block->Prev(); switch (block->GetBBJumpKind()) { @@ -2915,7 +2915,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) // should have been fixed by the optimization above // An exception is made for a jump from Hot to Cold noway_assert(!block->NextIs(block->bbJumpDest) || block->isBBCallAlwaysPairTail() || - fgInDifferentRegions(block, block->GetBBNext())); + fgInDifferentRegions(block, block->Next())); /* Cannot remove the first BB */ if (!bPrev) @@ -2986,7 +2986,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) } else { - succBlock = block->GetBBNext(); + succBlock = block->Next(); } if ((succBlock != nullptr) && !BasicBlock::sameEHRegion(block, succBlock)) @@ -3073,7 +3073,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) // Make sure we don't break that invariant. if (fgIsUsingProfileWeights() && block->hasProfileWeight() && (block->bbFlags & BBF_INTERNAL) == 0) { - BasicBlock* bNext = block->GetBBNext(); + BasicBlock* bNext = block->Next(); // Check if the next block can't maintain the invariant. if ((bNext == nullptr) || ((bNext->bbFlags & BBF_INTERNAL) != 0) || !bNext->hasProfileWeight()) @@ -3082,7 +3082,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) BasicBlock* curBB = bPrev; while ((curBB != nullptr) && (curBB->bbFlags & BBF_INTERNAL) != 0) { - curBB = curBB->GetBBPrev(); + curBB = curBB->Prev(); } if (curBB == nullptr) { @@ -3743,10 +3743,10 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* { assert(target->KindIs(BBJ_COND)); - if ((target->GetBBNext()->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) + if ((target->Next()->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) { JITDUMP("Deferring: " FMT_BB " --> " FMT_BB "; latter looks like loop top\n", target->bbNum, - target->GetBBNext()->bbNum); + target->Next()->bbNum); return false; } @@ -3800,7 +3800,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // The new block 'next' will inherit its weight from 'block' // next->inheritWeight(block); - next->bbJumpDest = target->GetBBNext(); + next->bbJumpDest = target->Next(); fgAddRefPred(next, block); fgAddRefPred(next->bbJumpDest, next); @@ -4039,7 +4039,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) } // do not jump into another try region - BasicBlock* bDestNext = bDest->GetBBNext(); + BasicBlock* bDestNext = bDest->Next(); if (bDestNext->hasTryIndex() && !BasicBlock::sameTryRegion(bJump, bDestNext)) { return false; @@ -4072,10 +4072,10 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) bool allProfileWeightsAreValid = false; weight_t weightJump = bJump->bbWeight; weight_t weightDest = bDest->bbWeight; - weight_t weightNext = bJump->GetBBNext()->bbWeight; + weight_t weightNext = bJump->Next()->bbWeight; bool rareJump = bJump->isRunRarely(); bool rareDest = bDest->isRunRarely(); - bool rareNext = bJump->GetBBNext()->isRunRarely(); + bool rareNext = bJump->Next()->isRunRarely(); // If we have profile data then we calculate the number of time // the loop will iterate into loopIterations @@ -4086,7 +4086,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // if ((bJump->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) && (bDest->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) && - (bJump->GetBBNext()->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY))) + (bJump->Next()->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY))) { allProfileWeightsAreValid = true; @@ -4233,13 +4233,13 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) bJump->bbFlags |= bDest->bbFlags & BBF_COPY_PROPAGATE; bJump->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); - bJump->bbJumpDest = bDest->GetBBNext(); + bJump->bbJumpDest = bDest->Next(); /* Update bbRefs and bbPreds */ // bJump now falls through into the next block // - fgAddRefPred(bJump->GetBBNext(), bJump); + fgAddRefPred(bJump->Next(), bJump); // bJump no longer jumps to bDest // @@ -4247,7 +4247,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // bJump now jumps to bDest->bbNext // - fgAddRefPred(bDest->GetBBNext(), bJump); + fgAddRefPred(bDest->Next(), bJump); if (weightJump > 0) { @@ -4510,7 +4510,7 @@ bool Compiler::fgExpandRarelyRunBlocks() { // If we've got a BBJ_CALLFINALLY/BBJ_ALWAYS pair, treat the BBJ_CALLFINALLY as an // additional predecessor for the BBJ_ALWAYS block - tmpbb = bPrev->GetBBPrev(); + tmpbb = bPrev->Prev(); noway_assert(tmpbb != nullptr); #if defined(FEATURE_EH_FUNCLETS) noway_assert(tmpbb->isBBCallAlwaysPair()); @@ -4542,7 +4542,7 @@ bool Compiler::fgExpandRarelyRunBlocks() // Walk the flow graph lexically forward from pred->getBlock() // if we find (block == bPrevPrev) then // pred->getBlock() is an earlier predecessor. - for (tmpbb = pred->getSourceBlock(); tmpbb != nullptr; tmpbb = tmpbb->GetBBNext()) + for (tmpbb = pred->getSourceBlock(); tmpbb != nullptr; tmpbb = tmpbb->Next()) { if (tmpbb == bPrevPrev) { @@ -4570,7 +4570,7 @@ bool Compiler::fgExpandRarelyRunBlocks() // bPrevPrev is lexically after bPrev and we do not // want to select it as our new block - for (tmpbb = bPrevPrev; tmpbb != nullptr; tmpbb = tmpbb->GetBBNext()) + for (tmpbb = bPrevPrev; tmpbb != nullptr; tmpbb = tmpbb->Next()) { if (tmpbb == bPrev) { @@ -4596,7 +4596,7 @@ bool Compiler::fgExpandRarelyRunBlocks() BasicBlock* block; BasicBlock* bPrev; - for (bPrev = fgFirstBB, block = bPrev->GetBBNext(); block != nullptr; bPrev = block, block = block->GetBBNext()) + for (bPrev = fgFirstBB, block = bPrev->Next(); block != nullptr; bPrev = block, block = block->Next()) { if (bPrev->isRunRarely()) { @@ -4678,7 +4678,7 @@ bool Compiler::fgExpandRarelyRunBlocks() // Now iterate over every block to see if we can prove that a block is rarely run // (i.e. when all predecessors to the block are rarely run) // - for (bPrev = fgFirstBB, block = bPrev->GetBBNext(); block != nullptr; bPrev = block, block = block->GetBBNext()) + for (bPrev = fgFirstBB, block = bPrev->Next(); block != nullptr; bPrev = block, block = block->Next()) { // If block is not run rarely, then check to make sure that it has // at least one non-rarely run block. @@ -4728,7 +4728,7 @@ bool Compiler::fgExpandRarelyRunBlocks() // if (block->isBBCallAlwaysPair()) { - BasicBlock* bNext = block->GetBBNext(); + BasicBlock* bNext = block->Next(); PREFIX_ASSUME(bNext != nullptr); bNext->bbSetRunRarely(); #ifdef DEBUG @@ -4880,7 +4880,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) EHblkDsc* HBtab; // Iterate over every block, remembering our previous block in bPrev - for (bPrev = fgFirstBB, block = bPrev->GetBBNext(); block != nullptr; bPrev = block, block = block->GetBBNext()) + for (bPrev = fgFirstBB, block = bPrev->Next(); block != nullptr; bPrev = block, block = block->Next()) { // // Consider relocating the rarely run blocks such that they are at the end of the method. @@ -5128,7 +5128,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) weight_t highestWeight = 0; BasicBlock* candidateBlock = nullptr; BasicBlock* lastNonFallThroughBlock = bPrev; - BasicBlock* bTmp = bPrev->GetBBNext(); + BasicBlock* bTmp = bPrev->Next(); while (bTmp != nullptr) { @@ -5137,7 +5137,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) if (bTmp->isBBCallAlwaysPair()) { // Move bTmp forward - bTmp = bTmp->GetBBNext(); + bTmp = bTmp->Next(); } // @@ -5164,7 +5164,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // otherwise we have a new candidateBlock // highestWeight = bTmp->bbWeight; - candidateBlock = lastNonFallThroughBlock->GetBBNext(); + candidateBlock = lastNonFallThroughBlock->Next(); } } @@ -5173,7 +5173,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) lastNonFallThroughBlock = bTmp; } - bTmp = bTmp->GetBBNext(); + bTmp = bTmp->Next(); } // If we didn't find a suitable block then skip this @@ -5277,7 +5277,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) BasicBlock* bStart = block; BasicBlock* bEnd = bStart; - bNext = bEnd->GetBBNext(); + bNext = bEnd->Next(); bool connected_bDest = false; if ((backwardBranch && !isRare) || @@ -5296,7 +5296,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) { // Move bEnd and bNext forward bEnd = bNext; - bNext = bNext->GetBBNext(); + bNext = bNext->Next(); } // @@ -5356,7 +5356,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // Move bEnd and bNext forward bEnd = bNext; - bNext = bNext->GetBBNext(); + bNext = bNext->Next(); } // Set connected_bDest to true if moving blocks [bStart .. bEnd] @@ -5401,7 +5401,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) break; } - bPrev2 = bPrev2->GetBBNext(); + bPrev2 = bPrev2->Next(); } if ((bPrev2 != nullptr) && fgEhAllowsMoveBlock(bPrev, bDest)) @@ -5414,7 +5414,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // bStart2 = bDest; bEnd2 = bStart2; - bNext = bEnd2->GetBBNext(); + bNext = bEnd2->Next(); while (true) { @@ -5425,7 +5425,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) noway_assert(bNext->KindIs(BBJ_ALWAYS)); // Move bEnd2 and bNext forward bEnd2 = bNext; - bNext = bNext->GetBBNext(); + bNext = bNext->Next(); } // Check for the Loop exit conditions @@ -5475,7 +5475,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // Move bEnd2 and bNext forward bEnd2 = bNext; - bNext = bNext->GetBBNext(); + bNext = bNext->Next(); } } } @@ -5630,7 +5630,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) if (ehDsc != nullptr) { - endBlk = lastBlk->GetBBNext(); + endBlk = lastBlk->Next(); /* Multiple (nested) try regions might start from the same BB. @@ -5650,7 +5650,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) */ while (!BasicBlock::sameTryRegion(startBlk, bStart) && (startBlk != endBlk)) { - startBlk = startBlk->GetBBNext(); + startBlk = startBlk->Next(); } // startBlk cannot equal endBlk as it must come before endBlk @@ -5666,12 +5666,12 @@ bool Compiler::fgReorderBlocks(bool useProfile) // or if bEnd->bbNext is in a different try region // then we cannot move the blocks // - if ((bEnd->IsLast()) || !BasicBlock::sameTryRegion(startBlk, bEnd->GetBBNext())) + if ((bEnd->IsLast()) || !BasicBlock::sameTryRegion(startBlk, bEnd->Next())) { goto CANNOT_MOVE; } - startBlk = bEnd->GetBBNext(); + startBlk = bEnd->Next(); // Check that the new startBlk still comes before endBlk @@ -5684,7 +5684,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) BasicBlock* tmpBlk = startBlk; while ((tmpBlk != endBlk) && (tmpBlk != nullptr)) { - tmpBlk = tmpBlk->GetBBNext(); + tmpBlk = tmpBlk->Next(); } // when tmpBlk is NULL that means startBlk is after endBlk @@ -5745,7 +5745,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) } // advance nearBlk to the next block - nearBlk = nearBlk->GetBBNext(); + nearBlk = nearBlk->Next(); } while (nearBlk != nullptr); } @@ -5783,10 +5783,10 @@ bool Compiler::fgReorderBlocks(bool useProfile) /* We couldn't move the blocks, so put everything back */ /* relink [bStart .. bEnd] into the flow graph */ - bPrev->SetBBNext(bStart); + bPrev->SetNext(bStart); if (!bEnd->IsLast()) { - bEnd->GetBBNext()->SetBBPrev(bEnd); + bEnd->Next()->SetPrev(bEnd); } #ifdef DEBUG if (verbose) @@ -5933,7 +5933,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) fgConnectFallThrough(bPrev, block); } - BasicBlock* bSkip = bEnd->GetBBNext(); + BasicBlock* bSkip = bEnd->Next(); /* If bEnd falls through, we must insert a jump to bNext */ fgConnectFallThrough(bEnd, bNext); @@ -5968,7 +5968,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // Set our iteration point 'block' to be the new bPrev->bbNext // It will be used as the next bPrev - block = bPrev->GetBBNext(); + block = bPrev->Next(); } // end of for loop(bPrev,block) @@ -6068,7 +6068,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) BasicBlock* bNext; // the successor of the current block BasicBlock* bDest; // the jump target of the current block - for (block = fgFirstBB; block != nullptr; block = block->GetBBNext()) + for (block = fgFirstBB; block != nullptr; block = block->Next()) { /* Some blocks may be already marked removed by other optimizations * (e.g worthless loop removal), without being explicitly removed @@ -6079,14 +6079,14 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) { if (bPrev) { - bPrev->SetBBNext(block->GetBBNext()); + bPrev->SetNext(block->Next()); } else { /* WEIRD first basic block is removed - should have an assert here */ noway_assert(!"First basic block marked as BBF_REMOVED???"); - fgFirstBB = block->GetBBNext(); + fgFirstBB = block->Next(); } continue; } @@ -6099,7 +6099,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) REPEAT:; - bNext = block->GetBBNext(); + bNext = block->Next(); bDest = nullptr; if (block->KindIs(BBJ_ALWAYS)) @@ -6110,19 +6110,19 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) change = true; modified = true; bDest = block->bbJumpDest; - bNext = block->GetBBNext(); + bNext = block->Next(); } } if (block->KindIs(BBJ_NONE)) { bDest = nullptr; - if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->GetBBNext())) + if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->Next())) { change = true; modified = true; bDest = block->bbJumpDest; - bNext = block->GetBBNext(); + bNext = block->Next(); } } @@ -6249,13 +6249,13 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // If bDest can fall through we'll need to create a jump // block after it too. Remember where to jump to. // - BasicBlock* const bDestNext = bDest->GetBBNext(); + BasicBlock* const bDestNext = bDest->Next(); // Move bDest // if (ehIsBlockEHLast(bDest)) { - ehUpdateLastBlocks(bDest, bDest->GetBBPrev()); + ehUpdateLastBlocks(bDest, bDest->Prev()); } fgUnlinkBlock(bDest); @@ -6333,7 +6333,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // If this is the first Cold basic block update fgFirstColdBlock if (bNext == fgFirstColdBlock) { - fgFirstColdBlock = bNext->GetBBNext(); + fgFirstColdBlock = bNext->Next(); } // @@ -7116,7 +7116,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) Statement* nextFirstStmt; Statement* destFirstStmt; - if (!getSuccCandidate(block->GetBBNext(), &nextFirstStmt) || !getSuccCandidate(block->bbJumpDest, &destFirstStmt)) + if (!getSuccCandidate(block->Next(), &nextFirstStmt) || !getSuccCandidate(block->bbJumpDest, &destFirstStmt)) { return false; } @@ -7144,10 +7144,10 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) JITDUMP("We can; moving statement\n"); - fgUnlinkStmt(block->GetBBNext(), nextFirstStmt); + fgUnlinkStmt(block->Next(), nextFirstStmt); fgInsertStmtNearEnd(block, nextFirstStmt); fgUnlinkStmt(block->bbJumpDest, destFirstStmt); - block->bbFlags |= block->GetBBNext()->bbFlags & BBF_COPY_PROPAGATE; + block->bbFlags |= block->Next()->bbFlags & BBF_COPY_PROPAGATE; return true; } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index a8cb23e3caf434..5b61db49a9099c 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -963,7 +963,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) { // This block should be the only pred of the continuation. // - BasicBlock* const target = block->GetBBNext(); + BasicBlock* const target = block->Next(); assert(!BlockSetOps::IsMember(this, marked, target->bbNum)); visitor->VisitTreeEdge(block, target); stack.Push(target); @@ -3363,7 +3363,7 @@ void EfficientEdgeCountReconstructor::Solve() // The ideal solver order is likely reverse postorder over the depth-first spanning tree. // We approximate it here by running from last node to first. // - for (BasicBlock* block = m_comp->fgLastBB; (block != nullptr); block = block->GetBBPrev()) + for (BasicBlock* block = m_comp->fgLastBB; (block != nullptr); block = block->Prev()) { BlockInfo* const info = BlockToInfo(block); @@ -4413,7 +4413,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) weight = 0; iterations++; - for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->GetBBNext()) + for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->Next()) { if (!bDst->hasProfileWeight() && (bDst->bbPreds != nullptr)) { @@ -4431,7 +4431,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) // Does this block flow into only one other block if (bSrc->KindIs(BBJ_NONE)) { - bOnlyNext = bSrc->GetBBNext(); + bOnlyNext = bSrc->Next(); } else if (bSrc->KindIs(BBJ_ALWAYS)) { @@ -4452,7 +4452,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) // Does this block flow into only one other block if (bDst->KindIs(BBJ_NONE)) { - bOnlyNext = bDst->GetBBNext(); + bOnlyNext = bDst->Next(); } else if (bDst->KindIs(BBJ_ALWAYS)) { @@ -4582,7 +4582,7 @@ bool Compiler::fgComputeCalledCount(weight_t returnWeight) // while (firstILBlock->bbFlags & BBF_INTERNAL) { - firstILBlock = firstILBlock->GetBBNext(); + firstILBlock = firstILBlock->Next(); } } @@ -4655,7 +4655,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() JITDUMP("Initial weight assignments\n\n"); // Now we will compute the initial m_edgeWeightMin and m_edgeWeightMax values - for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->GetBBNext()) + for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->Next()) { weight_t bDstWeight = bDst->bbWeight; @@ -4746,7 +4746,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() hasIncompleteEdgeWeights = false; JITDUMP("\n -- step 1 --\n"); - for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->GetBBNext()) + for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->Next()) { for (FlowEdge* const edge : bDst->PredEdges()) { @@ -4767,7 +4767,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() } else { - otherDst = bSrc->GetBBNext(); + otherDst = bSrc->Next(); } otherEdge = fgGetPredForBlock(otherDst, bSrc); @@ -4842,7 +4842,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() JITDUMP("\n -- step 2 --\n"); - for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->GetBBNext()) + for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->Next()) { weight_t bDstWeight = bDst->bbWeight; diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index 0977e202677d2e..d50a03260a130b 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -290,7 +290,7 @@ bool ProfileSynthesis::IsLoopExitEdge(FlowEdge* edge) // void ProfileSynthesis::AssignLikelihoodNext(BasicBlock* block) { - FlowEdge* const edge = m_comp->fgGetPredForBlock(block->GetBBNext(), block); + FlowEdge* const edge = m_comp->fgGetPredForBlock(block->Next(), block); edge->setLikelihood(1.0); } @@ -317,7 +317,7 @@ void ProfileSynthesis::AssignLikelihoodJump(BasicBlock* block) void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) { BasicBlock* const jump = block->bbJumpDest; - BasicBlock* const next = block->GetBBNext(); + BasicBlock* const next = block->Next(); // Watch for degenerate case // @@ -1221,7 +1221,7 @@ void ProfileSynthesis::ComputeCyclicProbabilities(SimpleLoop* loop) exitBlock->bbNum, exitEdge->getLikelihood()); BasicBlock* const jump = exitBlock->bbJumpDest; - BasicBlock* const next = exitBlock->GetBBNext(); + BasicBlock* const next = exitBlock->Next(); FlowEdge* const jumpEdge = m_comp->fgGetPredForBlock(jump, exitBlock); FlowEdge* const nextEdge = m_comp->fgGetPredForBlock(next, exitBlock); weight_t const exitLikelihood = (missingExitWeight + currentExitWeight) / exitBlockWeight; diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 5fc49925f07868..7fd5a41b4f8ef4 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -80,7 +80,7 @@ PhaseStatus Compiler::fgInsertGCPolls() // Walk through the blocks and hunt for a block that needs a GC Poll // - for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->GetBBNext()) + for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->Next()) { compCurBB = block; @@ -256,7 +256,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) if (top->KindIs(BBJ_COND)) { - topFallThrough = top->GetBBNext(); + topFallThrough = top->Next(); lpIndexFallThrough = topFallThrough->bbNatLoopNum; } @@ -384,7 +384,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) switch (oldJumpKind) { case BBJ_NONE: - fgReplacePred(bottom->GetBBNext(), top, bottom); + fgReplacePred(bottom->Next(), top, bottom); break; case BBJ_RETURN: case BBJ_THROW: @@ -393,7 +393,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) case BBJ_COND: // replace predecessor in the fall through block. noway_assert(!bottom->IsLast()); - fgReplacePred(bottom->GetBBNext(), top, bottom); + fgReplacePred(bottom->Next(), top, bottom); // fall through for the jump target FALLTHROUGH; @@ -1562,7 +1562,7 @@ void Compiler::fgAddSyncMethodEnterExit() // Create a block for the start of the try region, where the monitor enter call // will go. BasicBlock* const tryBegBB = fgSplitBlockAtEnd(fgFirstBB); - BasicBlock* const tryNextBB = tryBegBB->GetBBNext(); + BasicBlock* const tryNextBB = tryBegBB->Next(); BasicBlock* const tryLastBB = fgLastBB; // If we have profile data the new block will inherit the next block's weight @@ -1633,7 +1633,7 @@ void Compiler::fgAddSyncMethodEnterExit() // to point to the new try handler. BasicBlock* tmpBB; - for (tmpBB = tryBegBB->GetBBNext(); tmpBB != faultBB; tmpBB = tmpBB->GetBBNext()) + for (tmpBB = tryBegBB->Next(); tmpBB != faultBB; tmpBB = tmpBB->Next()) { if (!tmpBB->hasTryIndex()) { @@ -2594,7 +2594,7 @@ PhaseStatus Compiler::fgAddInternal() // Visit the BBJ_RETURN blocks and merge as necessary. - for (BasicBlock* block = fgFirstBB; !lastBlockBeforeGenReturns->NextIs(block); block = block->GetBBNext()) + for (BasicBlock* block = fgFirstBB; !lastBlockBeforeGenReturns->NextIs(block); block = block->Next()) { if (block->KindIs(BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { @@ -3004,7 +3004,7 @@ BasicBlock* Compiler::fgLastBBInMainFunction() if (fgFirstFuncletBB != nullptr) { - return fgFirstFuncletBB->GetBBPrev(); + return fgFirstFuncletBB->Prev(); } #endif // FEATURE_EH_FUNCLETS @@ -3062,7 +3062,7 @@ BasicBlock* Compiler::fgGetDomSpeculatively(const BasicBlock* block) /***************************************************************************************************** * * Function to return the first basic block after the main part of the function. With funclets, it is - * the block of the first funclet. Otherwise it is NULL if there are no funclets (fgLastBB->GetBBNext()). + * the block of the first funclet. Otherwise it is NULL if there are no funclets (fgLastBB->Next()). * This is equivalent to fgLastBBInMainFunction()->bbNext * An exclusive end of the main method. */ @@ -3302,7 +3302,7 @@ PhaseStatus Compiler::fgCreateFunclets() // bool Compiler::fgFuncletsAreCold() { - for (BasicBlock* block = fgFirstFuncletBB; block != nullptr; block = block->GetBBNext()) + for (BasicBlock* block = fgFirstFuncletBB; block != nullptr; block = block->Next()) { if (!block->isRunRarely()) { @@ -3365,7 +3365,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() if (forceSplit) { - firstColdBlock = fgFirstBB->GetBBNext(); + firstColdBlock = fgFirstBB->Next(); prevToFirstColdBlock = fgFirstBB; JITDUMP("JitStressProcedureSplitting is enabled: Splitting after the first basic block\n"); } @@ -3373,7 +3373,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() { bool inFuncletSection = false; - for (lblk = nullptr, block = fgFirstBB; block != nullptr; lblk = block, block = block->GetBBNext()) + for (lblk = nullptr, block = fgFirstBB; block != nullptr; lblk = block, block = block->Next()) { bool blockMustBeInHotSection = false; @@ -3413,7 +3413,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() if (fgFuncletsAreCold()) { firstColdBlock = fgFirstFuncletBB; - prevToFirstColdBlock = fgFirstFuncletBB->GetBBPrev(); + prevToFirstColdBlock = fgFirstFuncletBB->Prev(); } break; @@ -3515,7 +3515,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // assert(prevToFirstColdBlock->isBBCallAlwaysPair()); firstColdBlock = - firstColdBlock->GetBBNext(); // Note that this assignment could make firstColdBlock == nullptr + firstColdBlock->Next(); // Note that this assignment could make firstColdBlock == nullptr break; case BBJ_COND: @@ -3526,7 +3526,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() if (firstColdBlock->isEmpty() && firstColdBlock->KindIs(BBJ_ALWAYS)) { // We can just use this block as the transitionBlock - firstColdBlock = firstColdBlock->GetBBNext(); + firstColdBlock = firstColdBlock->Next(); // Note that this assignment could make firstColdBlock == NULL } else @@ -3554,7 +3554,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() } } - for (block = firstColdBlock; block != nullptr; block = block->GetBBNext()) + for (block = firstColdBlock; block != nullptr; block = block->Next()) { block->bbFlags |= BBF_COLD; block->unmarkLoopAlign(this DEBUG_ARG("Loop alignment disabled for cold blocks")); diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index baccc44b0a2803..0ab8e106a3d32c 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -824,7 +824,7 @@ template GetBBNext()) + for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->Next()) { if (skipRarelyRunBlocks && block->isRunRarely()) { diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index 7d84bad99d7610..9a09dd1540770a 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -122,7 +122,7 @@ bool OptIfConversionDsc::IfConvertCheckInnerBlockFlow(BasicBlock* block) bool OptIfConversionDsc::IfConvertCheckThenFlow() { m_flowFound = false; - BasicBlock* thenBlock = m_startBlock->GetBBNext(); + BasicBlock* thenBlock = m_startBlock->Next(); for (int thenLimit = 0; thenLimit < m_checkLimit; thenLimit++) { @@ -385,7 +385,7 @@ void OptIfConversionDsc::IfConvertDump() { assert(m_startBlock != nullptr); m_comp->fgDumpBlock(m_startBlock); - for (BasicBlock* dumpBlock = m_startBlock->GetBBNext(); dumpBlock != m_finalBlock; + for (BasicBlock* dumpBlock = m_startBlock->Next(); dumpBlock != m_finalBlock; dumpBlock = dumpBlock->GetUniqueSucc()) { m_comp->fgDumpBlock(dumpBlock); @@ -575,7 +575,7 @@ bool OptIfConversionDsc::optIfConvert() } // Check the Then and Else blocks have a single operation each. - if (!IfConvertCheckStmts(m_startBlock->GetBBNext(), &m_thenOperation)) + if (!IfConvertCheckStmts(m_startBlock->Next(), &m_thenOperation)) { return false; } @@ -742,7 +742,7 @@ bool OptIfConversionDsc::optIfConvert() } // Update the flow from the original block. - m_comp->fgRemoveAllRefPreds(m_startBlock->GetBBNext(), m_startBlock); + m_comp->fgRemoveAllRefPreds(m_startBlock->Next(), m_startBlock); m_startBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); #ifdef DEBUG @@ -789,7 +789,7 @@ PhaseStatus Compiler::optIfConversion() { OptIfConversionDsc optIfConversionDsc(this, block); madeChanges |= optIfConversionDsc.optIfConvert(); - block = block->GetBBPrev(); + block = block->Prev(); } #endif diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index f4df7141ff27e1..076fb70d2fc1db 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -1953,7 +1953,7 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H impPushOnStack(tree, typeInfo(clsHnd)); - return hndBlk->GetBBNext(); + return hndBlk->Next(); } } @@ -7305,7 +7305,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (block->KindIs(BBJ_COND)) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", - block->bbNum, block->GetBBNext()->bbNum); + block->bbNum, block->Next()->bbNum); fgRemoveRefPred(block->bbJumpDest, block); block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } @@ -7371,14 +7371,14 @@ void Compiler::impImportBlockCode(BasicBlock* block) { if (foldedJumpKind == BBJ_NONE) { - JITDUMP("\nThe block falls through into the next " FMT_BB "\n", block->GetBBNext()->bbNum); + JITDUMP("\nThe block falls through into the next " FMT_BB "\n", block->Next()->bbNum); fgRemoveRefPred(block->bbJumpDest, block); } else { JITDUMP("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->bbJumpDest->bbNum); - fgRemoveRefPred(block->GetBBNext(), block); + fgRemoveRefPred(block->Next(), block); } block->SetBBJumpKind(foldedJumpKind DEBUG_ARG(this)); } @@ -7551,7 +7551,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (block->KindIs(BBJ_COND)) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", - block->bbNum, block->GetBBNext()->bbNum); + block->bbNum, block->Next()->bbNum); fgRemoveRefPred(block->bbJumpDest, block); block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } @@ -11289,12 +11289,12 @@ void Compiler::impImportBlock(BasicBlock* block) /* Note if the next block has more than one ancestor */ - multRef |= block->GetBBNext()->bbRefs; + multRef |= block->Next()->bbRefs; /* Does the next block have temps assigned? */ - baseTmp = block->GetBBNext()->bbStkTempsIn; - tgtBlock = block->GetBBNext(); + baseTmp = block->Next()->bbStkTempsIn; + tgtBlock = block->Next(); if (baseTmp != NO_BASE_TMP) { @@ -11315,9 +11315,9 @@ void Compiler::impImportBlock(BasicBlock* block) break; case BBJ_NONE: - multRef |= block->GetBBNext()->bbRefs; - baseTmp = block->GetBBNext()->bbStkTempsIn; - tgtBlock = block->GetBBNext(); + multRef |= block->Next()->bbRefs; + baseTmp = block->Next()->bbStkTempsIn; + tgtBlock = block->Next(); break; case BBJ_SWITCH: @@ -12119,7 +12119,7 @@ void Compiler::impImport() if (entryBlock->KindIs(BBJ_NONE)) { - entryBlock = entryBlock->GetBBNext(); + entryBlock = entryBlock->Next(); } else if (opts.IsOSR() && entryBlock->KindIs(BBJ_ALWAYS)) { @@ -12253,7 +12253,7 @@ void Compiler::impFixPredLists() continue; } - BasicBlock* const continuation = predBlock->GetBBNext(); + BasicBlock* const continuation = predBlock->Next(); fgAddRefPred(continuation, finallyBlock); if (!added) diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index d77d9052748103..000e99f47d4867 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -1071,7 +1071,7 @@ class IndirectCallTransformer // Find the hot/cold predecessors. (Consider: just record these when // we did the scouting). // - BasicBlock* const coldBlock = checkBlock->GetBBPrev(); + BasicBlock* const coldBlock = checkBlock->Prev(); if (!coldBlock->KindIs(BBJ_NONE)) { @@ -1079,7 +1079,7 @@ class IndirectCallTransformer return; } - BasicBlock* const hotBlock = coldBlock->GetBBPrev(); + BasicBlock* const hotBlock = coldBlock->Prev(); if (!hotBlock->KindIs(BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock)) { diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index a69c6ac7142b42..6756329c995311 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -32,7 +32,7 @@ BasicBlock* EHblkDsc::BBFilterLast() noway_assert(ebdHndBeg != nullptr); // The last block of the filter is the block immediately preceding the first block of the handler. - return ebdHndBeg->GetBBPrev(); + return ebdHndBeg->Prev(); } BasicBlock* EHblkDsc::ExFlowBlock() @@ -107,7 +107,7 @@ bool EHblkDsc::HasFinallyOrFaultHandler() bool EHblkDsc::InBBRange(BasicBlock* pBlk, BasicBlock* pStart, BasicBlock* pEnd) { - for (BasicBlock* pWalk = pStart; pWalk != pEnd; pWalk = pWalk->GetBBNext()) + for (BasicBlock* pWalk = pStart; pWalk != pEnd; pWalk = pWalk->Next()) { if (pWalk == pBlk) { @@ -119,7 +119,7 @@ bool EHblkDsc::InBBRange(BasicBlock* pBlk, BasicBlock* pStart, BasicBlock* pEnd) bool EHblkDsc::InTryRegionBBRange(BasicBlock* pBlk) { - return InBBRange(pBlk, ebdTryBeg, ebdTryLast->GetBBNext()); + return InBBRange(pBlk, ebdTryBeg, ebdTryLast->Next()); } bool EHblkDsc::InFilterRegionBBRange(BasicBlock* pBlk) @@ -129,7 +129,7 @@ bool EHblkDsc::InFilterRegionBBRange(BasicBlock* pBlk) bool EHblkDsc::InHndRegionBBRange(BasicBlock* pBlk) { - return InBBRange(pBlk, ebdHndBeg, ebdHndLast->GetBBNext()); + return InBBRange(pBlk, ebdHndBeg, ebdHndLast->Next()); } unsigned EHblkDsc::ebdGetEnclosingRegionIndex(bool* inTryRegion) @@ -836,7 +836,7 @@ void Compiler::ehUpdateForDeletedBlock(BasicBlock* block) return; } - BasicBlock* bPrev = block->GetBBPrev(); + BasicBlock* bPrev = block->Prev(); assert(bPrev != nullptr); ehUpdateLastBlocks(block, bPrev); @@ -865,7 +865,7 @@ bool Compiler::ehCanDeleteEmptyBlock(BasicBlock* block) if (ehIsBlockEHLast(block)) { - BasicBlock* bPrev = block->GetBBPrev(); + BasicBlock* bPrev = block->Prev(); if ((bPrev != nullptr) && ehIsBlockEHLast(bPrev)) { return false; @@ -941,18 +941,18 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** be if (inTryRegion) { *begBlk = ehDsc->ebdTryBeg; - *endBlk = ehDsc->ebdTryLast->GetBBNext(); + *endBlk = ehDsc->ebdTryLast->Next(); } else { *begBlk = ehDsc->ebdHndBeg; - *endBlk = ehDsc->ebdHndLast->GetBBNext(); + *endBlk = ehDsc->ebdHndLast->Next(); } } #else // !FEATURE_EH_CALLFINALLY_THUNKS EHblkDsc* ehDsc = ehGetDsc(finallyIndex); *begBlk = ehDsc->ebdTryBeg; - *endBlk = ehDsc->ebdTryLast->GetBBNext(); + *endBlk = ehDsc->ebdTryLast->Next(); #endif // !FEATURE_EH_CALLFINALLY_THUNKS } @@ -1320,10 +1320,10 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab) bLast = nullptr; // Find the first non-removed block after the 'try' region to end our iteration. - bEnd = handlerTab->ebdTryLast->GetBBNext(); + bEnd = handlerTab->ebdTryLast->Next(); while ((bEnd != nullptr) && (bEnd->bbFlags & BBF_REMOVED)) { - bEnd = bEnd->GetBBNext(); + bEnd = bEnd->Next(); } // Update bLast to account for any removed blocks @@ -1335,7 +1335,7 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab) bLast = block; } - block = block->GetBBNext(); + block = block->Next(); if (block == bEnd) { @@ -1349,10 +1349,10 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab) bLast = nullptr; // Find the first non-removed block after the handler region to end our iteration. - bEnd = handlerTab->ebdHndLast->GetBBNext(); + bEnd = handlerTab->ebdHndLast->Next(); while ((bEnd != nullptr) && (bEnd->bbFlags & BBF_REMOVED)) { - bEnd = bEnd->GetBBNext(); + bEnd = bEnd->Next(); } // Update bLast to account for any removed blocks @@ -1364,7 +1364,7 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab) bLast = block; } - block = block->GetBBNext(); + block = block->Next(); if (block == bEnd) { break; @@ -2295,7 +2295,7 @@ bool Compiler::fgNormalizeEHCase2() // outwards in enclosing try index order, and we'll get to them later. // Move the insert block backwards, to the one we just inserted. - insertBeforeBlk = insertBeforeBlk->GetBBPrev(); + insertBeforeBlk = insertBeforeBlk->Prev(); assert(insertBeforeBlk == newTryStart); modified = true; @@ -3428,8 +3428,8 @@ void Compiler::fgVerifyHandlerTab() { BasicBlock* blockEnd; - for (block = HBtab->ebdTryBeg, blockEnd = HBtab->ebdTryLast->GetBBNext(); block != blockEnd; - block = block->GetBBNext()) + for (block = HBtab->ebdTryBeg, blockEnd = HBtab->ebdTryLast->Next(); block != blockEnd; + block = block->Next()) { if (blockTryIndex[block->bbNum] == 0) { @@ -3438,8 +3438,8 @@ void Compiler::fgVerifyHandlerTab() } for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), - blockEnd = HBtab->ebdHndLast->GetBBNext(); - block != blockEnd; block = block->GetBBNext()) + blockEnd = HBtab->ebdHndLast->Next(); + block != blockEnd; block = block->Next()) { if (blockHndIndex[block->bbNum] == 0) { @@ -3467,8 +3467,8 @@ void Compiler::fgVerifyHandlerTab() BasicBlock* blockEnd; for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), - blockEnd = HBtab->ebdHndLast->GetBBNext(); - block != blockEnd; block = block->GetBBNext()) + blockEnd = HBtab->ebdHndLast->Next(); + block != blockEnd; block = block->Next()) { if (blockTryIndex[block->bbNum] == 0) { @@ -4060,7 +4060,7 @@ void Compiler::fgClearFinallyTargetBit(BasicBlock* block) { if (predBlock->KindIs(BBJ_ALWAYS) && predBlock->bbJumpDest == block) { - BasicBlock* pPrev = predBlock->GetBBPrev(); + BasicBlock* pPrev = predBlock->Prev(); if (pPrev != nullptr) { if (pPrev->KindIs(BBJ_CALLFINALLY)) @@ -4351,7 +4351,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) { assert(!block->IsFirst()); - BasicBlock* bPrev = block->GetBBPrev(); + BasicBlock* bPrev = block->Prev(); bPrev->copyEHRegion(block); @@ -4468,7 +4468,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) void Compiler::fgExtendEHRegionAfter(BasicBlock* block) { - BasicBlock* newBlk = block->GetBBNext(); + BasicBlock* newBlk = block->Next(); assert(newBlk != nullptr); newBlk->copyEHRegion(block); diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 55c563540bb950..1ac0c42910de44 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -365,7 +365,7 @@ void Compiler::fgPerBlockLocalVarLiveness() } } - for (block = fgFirstBB; block; block = block->GetBBNext()) + for (block = fgFirstBB; block; block = block->Next()) { // Strictly speaking, the assignments for the "Def" cases aren't necessary here. // The empty set would do as well. Use means "use-before-def", so as long as that's @@ -407,7 +407,7 @@ void Compiler::fgPerBlockLocalVarLiveness() // memory that is not a GC Heap def. byrefStatesMatchGcHeapStates = true; - for (block = fgFirstBB; block; block = block->GetBBNext()) + for (block = fgFirstBB; block; block = block->Next()) { VarSetOps::ClearD(this, fgCurUseSet); VarSetOps::ClearD(this, fgCurDefSet); @@ -890,7 +890,7 @@ void Compiler::fgExtendDbgLifetimes() { case BBJ_NONE: PREFIX_ASSUME(!block->IsLast()); - VarSetOps::UnionD(this, initVars, block->GetBBNext()->bbScope); + VarSetOps::UnionD(this, initVars, block->Next()->bbScope); break; case BBJ_ALWAYS: @@ -904,14 +904,14 @@ void Compiler::fgExtendDbgLifetimes() { assert(block->isBBCallAlwaysPair()); PREFIX_ASSUME(!block->IsLast()); - VarSetOps::UnionD(this, initVars, block->GetBBNext()->bbScope); + VarSetOps::UnionD(this, initVars, block->Next()->bbScope); } VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope); break; case BBJ_COND: PREFIX_ASSUME(!block->IsLast()); - VarSetOps::UnionD(this, initVars, block->GetBBNext()->bbScope); + VarSetOps::UnionD(this, initVars, block->Next()->bbScope); VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope); break; @@ -1305,11 +1305,11 @@ class LiveVarAnalysis m_memoryLiveIn = emptyMemoryKindSet; m_memoryLiveOut = emptyMemoryKindSet; - for (BasicBlock* block = m_compiler->fgLastBB; block; block = block->GetBBPrev()) + for (BasicBlock* block = m_compiler->fgLastBB; block; block = block->Prev()) { // sometimes block numbers are not monotonically increasing which // would cause us not to identify backedges - if (!block->IsLast() && block->GetBBNext()->bbNum <= block->bbNum) + if (!block->IsLast() && block->Next()->bbNum <= block->bbNum) { m_hasPossibleBackEdge = true; } diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index a44c5a07ed8e53..f29a5178b0c5d1 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -1800,7 +1800,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd) // that block; this is one of those cases. This could be fixed fairly easily; for example, // we could add a dummy nop block after the (cloned) loop bottom, in the same handler scope as the // loop. This is just a corner to cut to get this working faster. - BasicBlock* bbAfterLoop = loop.lpBottom->GetBBNext(); + BasicBlock* bbAfterLoop = loop.lpBottom->Next(); if (bbAfterLoop != nullptr && bbIsHandlerBeg(bbAfterLoop)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Next block after bottom is a handler start.\n", loopInd); @@ -2074,7 +2074,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) { assert(b->KindIs(BBJ_COND)); - BasicBlock* x = b->GetBBNext(); + BasicBlock* x = b->Next(); if (x != nullptr) { JITDUMP("Create branch around cloned loop\n"); @@ -2188,7 +2188,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) switch (newblk->GetBBJumpKind()) { case BBJ_NONE: - fgAddRefPred(newblk->GetBBNext(), newblk); + fgAddRefPred(newblk->Next(), newblk); break; case BBJ_ALWAYS: @@ -2197,7 +2197,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) break; case BBJ_COND: - fgAddRefPred(newblk->GetBBNext(), newblk); + fgAddRefPred(newblk->Next(), newblk); fgAddRefPred(newblk->bbJumpDest, newblk); break; @@ -2268,8 +2268,8 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Add the fall-through path pred (either to T/E for fall-through from conditions to fast path, // or H2 if branch to E of fast path). assert(condLast->KindIs(BBJ_COND)); - JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", condLast->bbNum, condLast->GetBBNext()->bbNum); - fgAddRefPred(condLast->GetBBNext(), condLast); + JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", condLast->bbNum, condLast->Next()->bbNum); + fgAddRefPred(condLast->Next(), condLast); // Don't unroll loops that we've cloned -- the unroller expects any loop it should unroll to // initialize the loop counter immediately before entering the loop, but we've left a shared @@ -2921,8 +2921,8 @@ bool Compiler::optCheckLoopCloningGDVTestProfitable(GenTreeOp* guard, LoopCloneV // Check for (4) // - BasicBlock* const hotSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->bbJumpDest : typeTestBlock->GetBBNext(); - BasicBlock* const coldSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->GetBBNext() : typeTestBlock->bbJumpDest; + BasicBlock* const hotSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->bbJumpDest : typeTestBlock->Next(); + BasicBlock* const coldSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->Next() : typeTestBlock->bbJumpDest; if (!hotSuccessor->hasProfileWeight() || !coldSuccessor->hasProfileWeight()) { diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 230b26cb8222cc..81330c9c23a40b 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -847,7 +847,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) var_types tempLclType = temp->TypeGet(); BasicBlock* defaultBB = jumpTab[jumpCnt - 1]; - BasicBlock* followingBB = originalSwitchBB->GetBBNext(); + BasicBlock* followingBB = originalSwitchBB->Next(); /* Is the number of cases right for a test and jump switch? */ const bool fFirstCaseFollows = (followingBB == jumpTab[0]); @@ -1064,7 +1064,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // There is a fall-through to the following block. In the loop // above, we deleted all the predecessor edges from the switch. // In this case, we need to add one back. - comp->fgAddRefPred(currentBlock->GetBBNext(), currentBlock); + comp->fgAddRefPred(currentBlock->Next(), currentBlock); } if (!fUsedAfterDefaultCondBlock) diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index a308fbf9d3c1ee..ad91940777e86d 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -1028,7 +1028,7 @@ void LinearScan::setBlockSequence() // For layout order, simply use bbNext if (isTraversalLayoutOrder()) { - nextBlock = block->GetBBNext(); + nextBlock = block->Next(); continue; } @@ -1483,15 +1483,15 @@ void LinearScan::recordVarLocationsAtStartOfBB(BasicBlock* bb) varDsc->SetRegNum(newRegNum); count++; - BasicBlock* prevReportedBlock = bb->GetBBPrev(); - if (!bb->IsFirst() && bb->GetBBPrev()->isBBCallAlwaysPairTail()) + BasicBlock* prevReportedBlock = bb->Prev(); + if (!bb->IsFirst() && bb->Prev()->isBBCallAlwaysPairTail()) { // For callf+always pair we generate the code for the always // block in genCallFinally and skip it, so we don't report // anything for it (it has only trivial instructions, so that // does not matter much). So whether we need to rehome or not // depends on what we reported at the end of the callf block. - prevReportedBlock = bb->GetBBPrev()->GetBBPrev(); + prevReportedBlock = bb->Prev()->Prev(); } if (prevReportedBlock != nullptr && VarSetOps::IsMember(compiler, prevReportedBlock->bbLiveOut, varIndex)) @@ -2548,7 +2548,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, { // Special handling to improve matching on backedges. BasicBlock* otherBlock = - predBlock->NextIs(block) ? predBlock->bbJumpDest : predBlock->GetBBNext(); + predBlock->NextIs(block) ? predBlock->bbJumpDest : predBlock->Next(); noway_assert(otherBlock != nullptr); if (isBlockVisited(otherBlock) && !blockInfo[otherBlock->bbNum].hasEHBoundaryIn) { diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index d4c2f7842e52e8..96133d2d4064a8 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -7482,7 +7482,7 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa // block removal on it. fgEnsureFirstBBisScratch(); fgFirstBB->bbFlags |= BBF_DONT_REMOVE; - block->bbJumpDest = fgFirstBB->GetBBNext(); + block->bbJumpDest = fgFirstBB->Next(); } // Finish hooking things up. @@ -13154,7 +13154,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); - noway_assert((block->GetBBNext()->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); + noway_assert((block->Next()->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); if (condTree != cond) { @@ -13181,7 +13181,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bTaken = block->bbJumpDest; - bNotTaken = block->GetBBNext(); + bNotTaken = block->Next(); } else { @@ -13196,7 +13196,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) /* JTRUE 0 - transform the basic block into a BBJ_NONE */ block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); - bTaken = block->GetBBNext(); + bTaken = block->Next(); bNotTaken = block->bbJumpDest; } @@ -13253,24 +13253,24 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) switch (bUpdated->GetBBJumpKind()) { case BBJ_NONE: - edge = fgGetPredForBlock(bUpdated->GetBBNext(), bUpdated); + edge = fgGetPredForBlock(bUpdated->Next(), bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); - edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->GetBBNext()); + edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->Next()); break; case BBJ_COND: - edge = fgGetPredForBlock(bUpdated->GetBBNext(), bUpdated); + edge = fgGetPredForBlock(bUpdated->Next(), bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); - edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->GetBBNext()); + edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->Next()); FALLTHROUGH; case BBJ_ALWAYS: edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); - edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->GetBBNext()); + edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->Next()); break; default: @@ -13925,7 +13925,7 @@ void Compiler::fgMorphBlocks() } } - block = block->GetBBNext(); + block = block->Next(); } while (block != nullptr); // We are done with the global morphing phase diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 796fcd58f18750..b9728f78991794 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -848,7 +848,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() } else { - edge2 = m_comp->fgGetPredForBlock(m_b2->GetBBNext(), m_b2); + edge2 = m_comp->fgGetPredForBlock(m_b2->Next(), m_b2); m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1); @@ -900,7 +900,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() // // Replace pred 'm_b2' for 'm_b2->bbNext' with 'm_b1' // Remove pred 'm_b2' for 'm_b2->bbJumpDest' - m_comp->fgReplacePred(m_b2->GetBBNext(), m_b2, m_b1); + m_comp->fgReplacePred(m_b2->Next(), m_b2, m_b1); m_comp->fgRemoveRefPred(m_b2->bbJumpDest, m_b2); } @@ -1463,7 +1463,7 @@ PhaseStatus Compiler::optOptimizeBools() numPasses++; change = false; - for (BasicBlock* b1 = fgFirstBB; b1 != nullptr; b1 = retry ? b1 : b1->GetBBNext()) + for (BasicBlock* b1 = fgFirstBB; b1 != nullptr; b1 = retry ? b1 : b1->Next()) { retry = false; @@ -1476,7 +1476,7 @@ PhaseStatus Compiler::optOptimizeBools() // If there is no next block, we're done - BasicBlock* b2 = b1->GetBBNext(); + BasicBlock* b2 = b1->Next(); if (b2 == nullptr) { break; diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index d2095c346cc2d6..8f48ba57a30102 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -490,7 +490,7 @@ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmar { reportBefore(); /* The loop has a new head - Just update the loop table */ - loop.lpHead = block->GetBBPrev(); + loop.lpHead = block->Prev(); } reportAfter(); @@ -743,7 +743,7 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT { if (predBlock->KindIs(BBJ_NONE) && predBlock->NextIs(optLoopTable[loopInd].lpEntry) && (predBlock->countOfInEdges() == 1) && (predBlock->firstStmt() == nullptr) && - !predBlock->IsFirst() && predBlock->GetBBPrev()->bbFallsThrough()) + !predBlock->IsFirst() && predBlock->Prev()->bbFallsThrough()) { initBlockOk = true; } @@ -1151,9 +1151,9 @@ bool Compiler::optExtractInitTestIncr( // the first time, which might be empty if no hoisting has yet occurred. In this case, look a // little harder for the possible loop initialization statement. if (initBlock->KindIs(BBJ_NONE) && initBlock->NextIs(top) && (initBlock->countOfInEdges() == 1) && - !initBlock->IsFirst() && initBlock->GetBBPrev()->bbFallsThrough()) + !initBlock->IsFirst() && initBlock->Prev()->bbFallsThrough()) { - initBlock = initBlock->GetBBPrev(); + initBlock = initBlock->Prev(); phdrStmt = initBlock->firstStmt(); } } @@ -1377,7 +1377,7 @@ void Compiler::optCheckPreds() { // make sure this pred is part of the BB list BasicBlock* bb; - for (bb = fgFirstBB; bb; bb = bb->GetBBNext()) + for (bb = fgFirstBB; bb; bb = bb->Next()) { if (bb == predBlock) { @@ -1889,7 +1889,7 @@ class LoopSearch // of an outer loop. For the dominance test, if `predBlock` is a new block, use // its unique predecessor since the dominator tree has info for that. BasicBlock* effectivePred = - (predBlock->bbNum > oldBlockMaxNum ? predBlock->GetBBPrev() : predBlock); + (predBlock->bbNum > oldBlockMaxNum ? predBlock->Prev() : predBlock); if (comp->fgDominate(entry, effectivePred)) { // Outer loop back-edge @@ -1925,13 +1925,13 @@ class LoopSearch } if (isFirstVisit && !predBlock->IsLast() && - (PositionNum(predBlock->GetBBNext()) == predBlock->bbNum)) + (PositionNum(predBlock->Next()) == predBlock->bbNum)) { // We've created a new block immediately after `predBlock` to // reconnect what was fall-through. Mark it as in-loop also; // it needs to stay with `prev` and if it exits the loop we'd // just need to re-create it if we tried to move it out. - loopBlocks.Insert(predBlock->GetBBNext()->bbNum); + loopBlocks.Insert(predBlock->Next()->bbNum); } } } @@ -1963,7 +1963,7 @@ class LoopSearch // block. assert(block->PrevIs(block->bbPreds->getSourceBlock())); assert(block->bbPreds->getNextPredEdge() == nullptr); - return block->GetBBPrev()->bbNum; + return block->Prev()->bbNum; } return block->bbNum; } @@ -1983,9 +1983,9 @@ class LoopSearch // Compaction (if it needs to happen) will require an insertion point. BasicBlock* moveAfter = nullptr; - for (BasicBlock* previous = top->GetBBPrev(); previous != bottom;) + for (BasicBlock* previous = top->Prev(); previous != bottom;) { - BasicBlock* block = previous->GetBBNext(); + BasicBlock* block = previous->Next(); if (loopBlocks.IsMember(block->bbNum)) { @@ -2009,11 +2009,11 @@ class LoopSearch // If so, give up on recognition of this loop. // BasicBlock* lastNonLoopBlock = block; - BasicBlock* nextLoopBlock = block->GetBBNext(); + BasicBlock* nextLoopBlock = block->Next(); while ((nextLoopBlock != nullptr) && !loopBlocks.IsMember(nextLoopBlock->bbNum)) { lastNonLoopBlock = nextLoopBlock; - nextLoopBlock = nextLoopBlock->GetBBNext(); + nextLoopBlock = nextLoopBlock->Next(); } if (nextLoopBlock == nullptr) @@ -2049,7 +2049,7 @@ class LoopSearch } // Now physically move the blocks. - BasicBlock* moveBefore = moveAfter->GetBBNext(); + BasicBlock* moveBefore = moveAfter->Next(); comp->fgUnlinkRange(block, lastNonLoopBlock); comp->fgMoveBlocksAfter(block, lastNonLoopBlock, moveAfter); @@ -2136,7 +2136,7 @@ class LoopSearch // BasicBlock* TryAdvanceInsertionPoint(BasicBlock* oldMoveAfter) { - BasicBlock* newMoveAfter = oldMoveAfter->GetBBNext(); + BasicBlock* newMoveAfter = oldMoveAfter->Next(); if (!BasicBlock::sameEHRegion(oldMoveAfter, newMoveAfter)) { @@ -2325,7 +2325,7 @@ class LoopSearch else if (block->KindIs(BBJ_ALWAYS) && (block->bbJumpDest == newNext)) { // We've made `block`'s jump target its bbNext, so remove the jump. - if (!comp->fgOptimizeBranchToNext(block, newNext, block->GetBBPrev())) + if (!comp->fgOptimizeBranchToNext(block, newNext, block->Prev())) { // If optimizing away the goto-next failed for some reason, mark it KEEP_BBJ_ALWAYS to // prevent assertions from complaining about it. @@ -2464,7 +2464,7 @@ class LoopSearch break; } - if (block->bbFallsThrough() && !loopBlocks.IsMember(block->GetBBNext()->bbNum)) + if (block->bbFallsThrough() && !loopBlocks.IsMember(block->Next()->bbNum)) { // Found a fall-through exit. lastExit = block; @@ -2503,9 +2503,9 @@ void Compiler::optFindNaturalLoops() LoopSearch search(this); - for (BasicBlock* head = fgFirstBB; !head->IsLast(); head = head->GetBBNext()) + for (BasicBlock* head = fgFirstBB; !head->IsLast(); head = head->Next()) { - BasicBlock* top = head->GetBBNext(); + BasicBlock* top = head->Next(); // Blocks that are rarely run have a zero bbWeight and should never be optimized here. if (top->bbWeight == BB_ZERO_WEIGHT) @@ -2734,7 +2734,7 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R if (addPreds && blk->bbFallsThrough()) { - fgAddRefPred(blk->GetBBNext(), blk); + fgAddRefPred(blk->Next(), blk); } BasicBlock* newJumpDest = nullptr; @@ -3435,7 +3435,7 @@ BasicBlock* Compiler::optLoopEntry(BasicBlock* preHeader) if (preHeader->KindIs(BBJ_NONE)) { - return preHeader->GetBBNext(); + return preHeader->Next(); } else { @@ -4348,7 +4348,7 @@ PhaseStatus Compiler::optUnrollLoops() BlockToBlockMap blockMap(getAllocator(CMK_LoopOpt)); BasicBlock* insertAfter = bottom; - BasicBlock* const tail = bottom->GetBBNext(); + BasicBlock* const tail = bottom->Next(); BasicBlock::loopNumber newLoopNum = loop.lpParent; bool anyNestedLoopsUnrolledThisLoop = false; int lval; @@ -4359,7 +4359,7 @@ PhaseStatus Compiler::optUnrollLoops() // Note: we can't use the loop.LoopBlocks() iterator, as it captures loop.lpBottom->bbNext at the // beginning of iteration, and we insert blocks before that. So we need to evaluate lpBottom->bbNext // every iteration. - for (BasicBlock* block = loop.lpTop; !loop.lpBottom->NextIs(block); block = block->GetBBNext()) + for (BasicBlock* block = loop.lpTop; !loop.lpBottom->NextIs(block); block = block->Next()) { BasicBlock* newBlock = insertAfter = fgNewBBafter(block->GetBBJumpKind(), insertAfter, /*extendRegion*/ true); @@ -4371,7 +4371,7 @@ PhaseStatus Compiler::optUnrollLoops() // to clone a block in the loop, splice out and forget all the blocks we cloned so far: // put the loop blocks back to how they were before we started cloning blocks, // and abort unrolling the loop. - bottom->SetBBNext(tail); + bottom->SetNext(tail); loop.lpFlags |= LPFLG_DONT_UNROLL; // Mark it so we don't try to unroll it again. INDEBUG(++unrollFailures); JITDUMP("Failed to unroll loop " FMT_LP ": block cloning failed on " FMT_BB "\n", lnum, @@ -4422,7 +4422,7 @@ PhaseStatus Compiler::optUnrollLoops() // Now redirect any branches within the newly-cloned iteration. // Don't include `bottom` in the iteration, since we've already changed the // newBlock->bbJumpKind, above. - for (BasicBlock* block = loop.lpTop; block != loop.lpBottom; block = block->GetBBNext()) + for (BasicBlock* block = loop.lpTop; block != loop.lpBottom; block = block->Next()) { BasicBlock* newBlock = blockMap[block]; optCopyBlkDest(block, newBlock); @@ -4434,7 +4434,7 @@ PhaseStatus Compiler::optUnrollLoops() // After doing this, all the newly cloned blocks now have proper flow and pred lists. // BasicBlock* const clonedTop = blockMap[loop.lpTop]; - fgAddRefPred(clonedTop, clonedTop->GetBBPrev()); + fgAddRefPred(clonedTop, clonedTop->Prev()); /* update the new value for the unrolled iterator */ @@ -4735,7 +4735,7 @@ bool Compiler::optReachWithoutCall(BasicBlock* topBB, BasicBlock* botBB) } } - curBB = curBB->GetBBNext(); + curBB = curBB->Next(); } // If we didn't find any blocks that contained a gc safe point and @@ -4867,7 +4867,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // Since bTest is a BBJ_COND it will have a bbNext // - BasicBlock* const bJoin = bTest->GetBBNext(); + BasicBlock* const bJoin = bTest->Next(); noway_assert(bJoin != nullptr); // 'block' must be in the same try region as the condition, since we're going to insert a duplicated condition @@ -4879,7 +4879,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) return false; } - // The duplicated condition block will branch to bTest->GetBBNext(), so that also better be in the + // The duplicated condition block will branch to bTest->Next(), so that also better be in the // same try region (or no try region) to avoid generating illegal flow. if (bJoin->hasTryIndex() && !BasicBlock::sameTryRegion(block, bJoin)) { @@ -5216,15 +5216,15 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) weight_t const testToAfterWeight = weightTop * testToAfterLikelihood; FlowEdge* const edgeTestToNext = fgGetPredForBlock(bTop, bTest); - FlowEdge* const edgeTestToAfter = fgGetPredForBlock(bTest->GetBBNext(), bTest); + FlowEdge* const edgeTestToAfter = fgGetPredForBlock(bTest->Next(), bTest); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (iterate loop)\n", bTest->bbNum, bTop->bbNum, testToNextWeight); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (exit loop)\n", bTest->bbNum, - bTest->GetBBNext()->bbNum, testToAfterWeight); + bTest->Next()->bbNum, testToAfterWeight); edgeTestToNext->setEdgeWeights(testToNextWeight, testToNextWeight, bTop); - edgeTestToAfter->setEdgeWeights(testToAfterWeight, testToAfterWeight, bTest->GetBBNext()); + edgeTestToAfter->setEdgeWeights(testToAfterWeight, testToAfterWeight, bTest->Next()); // Adjust edges out of block, using the same distribution. // @@ -5236,15 +5236,15 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) weight_t const blockToNextWeight = weightBlock * blockToNextLikelihood; weight_t const blockToAfterWeight = weightBlock * blockToAfterLikelihood; - FlowEdge* const edgeBlockToNext = fgGetPredForBlock(bNewCond->GetBBNext(), bNewCond); + FlowEdge* const edgeBlockToNext = fgGetPredForBlock(bNewCond->Next(), bNewCond); FlowEdge* const edgeBlockToAfter = fgGetPredForBlock(bNewCond->bbJumpDest, bNewCond); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (enter loop)\n", bNewCond->bbNum, - bNewCond->GetBBNext()->bbNum, blockToNextWeight); + bNewCond->Next()->bbNum, blockToNextWeight); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (avoid loop)\n", bNewCond->bbNum, bNewCond->bbJumpDest->bbNum, blockToAfterWeight); - edgeBlockToNext->setEdgeWeights(blockToNextWeight, blockToNextWeight, bNewCond->GetBBNext()); + edgeBlockToNext->setEdgeWeights(blockToNextWeight, blockToNextWeight, bNewCond->Next()); edgeBlockToAfter->setEdgeWeights(blockToAfterWeight, blockToAfterWeight, bNewCond->bbJumpDest); #ifdef DEBUG @@ -5253,7 +5253,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) if ((activePhaseChecks & PhaseChecks::CHECK_PROFILE) == PhaseChecks::CHECK_PROFILE) { const ProfileChecks checks = (ProfileChecks)JitConfig.JitProfileChecks(); - const bool nextProfileOk = fgDebugCheckIncomingProfileData(bNewCond->GetBBNext(), checks); + const bool nextProfileOk = fgDebugCheckIncomingProfileData(bNewCond->Next(), checks); const bool jumpProfileOk = fgDebugCheckIncomingProfileData(bNewCond->bbJumpDest, checks); if (hasFlag(checks, ProfileChecks::RAISE_ASSERT)) @@ -5269,7 +5269,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) if (verbose) { printf("\nDuplicated loop exit block at " FMT_BB " for loop (" FMT_BB " - " FMT_BB ")\n", bNewCond->bbNum, - bNewCond->GetBBNext()->bbNum, bTest->bbNum); + bNewCond->Next()->bbNum, bTest->bbNum); printf("Estimated code size expansion is %d\n", estDupCostSz); fgDumpBlock(bNewCond); @@ -6215,7 +6215,7 @@ bool Compiler::optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, break; } - beg = beg->GetBBNext(); + beg = beg->Next(); } return false; @@ -6278,7 +6278,7 @@ bool Compiler::optIsVarAssgLoop(unsigned lnum, unsigned var) return true; } - return optIsVarAssigned(optLoopTable[lnum].lpHead->GetBBNext(), optLoopTable[lnum].lpBottom, nullptr, var); + return optIsVarAssigned(optLoopTable[lnum].lpHead->Next(), optLoopTable[lnum].lpBottom, nullptr, var); } } @@ -8206,7 +8206,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) } else { - skipLoopBlock = head->GetBBNext(); + skipLoopBlock = head->Next(); } assert(skipLoopBlock != entry); diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index d870740f2d367f..7c99c264439ca2 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -52,7 +52,7 @@ class PatchpointTransformer } int count = 0; - for (BasicBlock* const block : compiler->Blocks(compiler->fgFirstBB->GetBBNext())) + for (BasicBlock* const block : compiler->Blocks(compiler->fgFirstBB->Next())) { if (block->bbFlags & BBF_PATCHPOINT) { diff --git a/src/coreclr/jit/promotionliveness.cpp b/src/coreclr/jit/promotionliveness.cpp index 727f153905e53a..422dc3f7e710ab 100644 --- a/src/coreclr/jit/promotionliveness.cpp +++ b/src/coreclr/jit/promotionliveness.cpp @@ -299,9 +299,9 @@ void PromotionLiveness::InterBlockLiveness() { changed = false; - for (BasicBlock* block = m_compiler->fgLastBB; block != nullptr; block = block->GetBBPrev()) + for (BasicBlock* block = m_compiler->fgLastBB; block != nullptr; block = block->Prev()) { - m_hasPossibleBackEdge |= !block->IsLast() && (block->GetBBNext()->bbNum <= block->bbNum); + m_hasPossibleBackEdge |= !block->IsLast() && (block->Next()->bbNum <= block->bbNum); changed |= PerBlockLiveness(block); } diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index 07255f54b6a134..a8365f7b93bdfd 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -48,7 +48,7 @@ PhaseStatus Compiler::optRedundantBranches() { bool madeChangesThisBlock = m_compiler->optRedundantRelop(block); - BasicBlock* const bbNext = block->GetBBNext(); + BasicBlock* const bbNext = block->Next(); BasicBlock* const bbJump = block->bbJumpDest; madeChangesThisBlock |= m_compiler->optRedundantBranch(block); @@ -568,7 +568,7 @@ bool Compiler::optRedundantBranch(BasicBlock* const block) (rii.vnRelation == ValueNumStore::VN_RELATION_KIND::VRK_Swap); BasicBlock* const trueSuccessor = domBlock->bbJumpDest; - BasicBlock* const falseSuccessor = domBlock->GetBBNext(); + BasicBlock* const falseSuccessor = domBlock->Next(); // If we can trace the flow from the dominating relop, we can infer its value. // @@ -613,7 +613,7 @@ bool Compiler::optRedundantBranch(BasicBlock* const block) // const bool relopIsFalse = rii.reverseSense ^ (domIsSameRelop | domIsInferredRelop); JITDUMP("Fall through successor " FMT_BB " of " FMT_BB " reaches, relop [%06u] must be %s\n", - domBlock->GetBBNext()->bbNum, domBlock->bbNum, dspTreeID(tree), + domBlock->Next()->bbNum, domBlock->bbNum, dspTreeID(tree), relopIsFalse ? "false" : "true"); relopValue = relopIsFalse ? 0 : 1; break; @@ -711,7 +711,7 @@ struct JumpThreadInfo JumpThreadInfo(Compiler* comp, BasicBlock* block) : m_block(block) , m_trueTarget(block->bbJumpDest) - , m_falseTarget(block->GetBBNext()) + , m_falseTarget(block->Next()) , m_fallThroughPred(nullptr) , m_ambiguousVNBlock(nullptr) , m_truePreds(BlockSetOps::MakeEmpty(comp)) @@ -1072,8 +1072,8 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl // latter should prove useful in subsequent work, where we aim to enable jump // threading in cases where block has side effects. // - BasicBlock* const domTrueSuccessor = domIsSameRelop ? domBlock->bbJumpDest : domBlock->GetBBNext(); - BasicBlock* const domFalseSuccessor = domIsSameRelop ? domBlock->GetBBNext() : domBlock->bbJumpDest; + BasicBlock* const domTrueSuccessor = domIsSameRelop ? domBlock->bbJumpDest : domBlock->Next(); + BasicBlock* const domFalseSuccessor = domIsSameRelop ? domBlock->Next() : domBlock->bbJumpDest; JumpThreadInfo jti(this, block); for (BasicBlock* const predBlock : block->PredBlocks()) diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp index 072552cccfdc76..fe1ecb8d39d744 100644 --- a/src/coreclr/jit/switchrecognition.cpp +++ b/src/coreclr/jit/switchrecognition.cpp @@ -26,7 +26,7 @@ PhaseStatus Compiler::optSwitchRecognition() // a series of ccmp instruction (see ifConvert phase). #ifdef TARGET_XARCH bool modified = false; - for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->GetBBNext()) + for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->Next()) { // block->KindIs(BBJ_COND) check is for better throughput. if (block->KindIs(BBJ_COND) && !block->isRunRarely() && optSwitchDetectAndConvert(block)) @@ -95,8 +95,8 @@ bool IsConstantTestCondBlock(const BasicBlock* block, } *isReversed = rootNode->gtGetOp1()->OperIs(GT_NE); - *blockIfTrue = *isReversed ? block->GetBBNext() : block->bbJumpDest; - *blockIfFalse = *isReversed ? block->bbJumpDest : block->GetBBNext(); + *blockIfTrue = *isReversed ? block->Next() : block->bbJumpDest; + *blockIfFalse = *isReversed ? block->bbJumpDest : block->Next(); if (block->NextIs(block->bbJumpDest) || (block->bbJumpDest == block)) { @@ -166,7 +166,7 @@ bool Compiler::optSwitchDetectAndConvert(BasicBlock* firstBlock) const BasicBlock* prevBlock = firstBlock; // Now walk the next blocks and see if they are basically the same type of test - for (const BasicBlock* currBb = firstBlock->GetBBNext(); currBb != nullptr; currBb = currBb->GetBBNext()) + for (const BasicBlock* currBb = firstBlock->Next(); currBb != nullptr; currBb = currBb->Next()) { GenTree* currVariableNode = nullptr; ssize_t currCns = 0; @@ -309,7 +309,7 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* const BasicBlock* lastBlock = firstBlock; for (int i = 0; i < testsCount - 1; i++) { - lastBlock = lastBlock->GetBBNext(); + lastBlock = lastBlock->Next(); } BasicBlock* blockIfTrue = nullptr; @@ -338,11 +338,11 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* gtUpdateStmtSideEffects(firstBlock->lastStmt()); // Unlink and remove the whole chain of conditional blocks - BasicBlock* blockToRemove = firstBlock->GetBBNext(); + BasicBlock* blockToRemove = firstBlock->Next(); fgRemoveRefPred(blockToRemove, firstBlock); while (!lastBlock->NextIs(blockToRemove)) { - BasicBlock* nextBlock = blockToRemove->GetBBNext(); + BasicBlock* nextBlock = blockToRemove->Next(); fgRemoveBlock(blockToRemove, true); blockToRemove = nextBlock; } @@ -356,7 +356,7 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* firstBlock->bbJumpSwt->bbsCount = jumpCount + 1; firstBlock->bbJumpSwt->bbsHasDefault = true; firstBlock->bbJumpSwt->bbsDstTab = jmpTab; - firstBlock->SetBBNext(isReversed ? blockIfTrue : blockIfFalse); + firstBlock->SetNext(isReversed ? blockIfTrue : blockIfFalse); // Splitting doesn't work well with jump-tables currently opts.compProcedureSplitting = false; diff --git a/src/coreclr/jit/unwind.cpp b/src/coreclr/jit/unwind.cpp index 7348fa2ee53f6e..366f5ce4d72346 100644 --- a/src/coreclr/jit/unwind.cpp +++ b/src/coreclr/jit/unwind.cpp @@ -129,7 +129,7 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, *ppStartLoc = new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndBeg)); *ppEndLoc = HBtab->ebdHndLast->IsLast() ? nullptr - : new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndLast->GetBBNext())); + : new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndLast->Next())); } } } From 59e94556423407ac718b381fcdd62a903bfbfc3e Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Thu, 5 Oct 2023 13:35:30 -0400 Subject: [PATCH 11/14] Style --- src/coreclr/jit/codegenlinear.cpp | 3 +-- src/coreclr/jit/fgbasic.cpp | 6 ++---- src/coreclr/jit/fgdiagnostic.cpp | 5 ++--- src/coreclr/jit/fgehopt.cpp | 6 ++---- src/coreclr/jit/jiteh.cpp | 6 ++---- src/coreclr/jit/lsra.cpp | 3 +-- src/coreclr/jit/optimizer.cpp | 6 ++---- src/coreclr/jit/unwind.cpp | 6 +++--- 8 files changed, 15 insertions(+), 26 deletions(-) diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index bc9ce42fe7ae6e..325f7c4d05f460 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -330,8 +330,7 @@ void CodeGen::genCodeForBBlist() // // Note: We need to have set compCurBB before calling emitAddLabel // - if (!block->IsFirst() && block->Prev()->KindIs(BBJ_COND) && - (block->bbWeight != block->Prev()->bbWeight)) + if (!block->IsFirst() && block->Prev()->KindIs(BBJ_COND) && (block->bbWeight != block->Prev()->bbWeight)) { JITDUMP("Adding label due to BB weight difference: BBJ_COND " FMT_BB " with weight " FMT_WT " different from " FMT_BB " with weight " FMT_WT "\n", diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index ca80c75789f0f2..2ce02ee3bc1718 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -5060,8 +5060,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else if (bPrev->KindIs(BBJ_ALWAYS) && block->NextIs(bPrev->bbJumpDest) && - !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && - !block->IsLastHotBlock(this)) + !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && !block->IsLastHotBlock(this)) { // previous block is a BBJ_ALWAYS to the next block: change to BBJ_NONE. // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), @@ -5455,8 +5454,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) // If bSrc is an unconditional branch to the next block // then change it to a BBJ_NONE block // - if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && - bSrc->NextIs(bSrc->bbJumpDest)) + if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && bSrc->NextIs(bSrc->bbJumpDest)) { bSrc->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); JITDUMP("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index 953b003f036232..eddec9820a4f05 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -1685,7 +1685,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) childCount++; bbCur = child->m_bbEnd->Next(); // Next, output blocks after this child. - child = child->m_rgnNext; // Move to the next child, if any. + child = child->m_rgnNext; // Move to the next child, if any. childCurBB = (child == nullptr) ? nullptr : child->m_bbStart; } } @@ -1745,8 +1745,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) if (ehDsc->HasFilter()) { sprintf_s(name, sizeof(name), "EH#%u filter", XTnum); - rgnGraph.Insert(name, RegionGraph::RegionType::EH, ehDsc->ebdFilter, - ehDsc->ebdHndBeg->Prev()); + rgnGraph.Insert(name, RegionGraph::RegionType::EH, ehDsc->ebdFilter, ehDsc->ebdHndBeg->Prev()); } } } diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 5bd8d296df1f84..829a5ae4d9c98b 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -419,8 +419,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // Try must be a callalways pair of blocks. if (!firstTryBlock->NextIs(lastTryBlock)) { - JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n", XTnum, - firstTryBlock->Next()->bbNum); + JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n", XTnum, firstTryBlock->Next()->bbNum); XTnum++; continue; } @@ -436,8 +435,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() bool verifiedSingleCallfinally = true; ehGetCallFinallyBlockRange(XTnum, &firstCallFinallyRangeBlock, &endCallFinallyRangeBlock); - for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; - block = block->Next()) + for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; block = block->Next()) { if (block->KindIs(BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) { diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 6756329c995311..97ba02897703c2 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -3428,8 +3428,7 @@ void Compiler::fgVerifyHandlerTab() { BasicBlock* blockEnd; - for (block = HBtab->ebdTryBeg, blockEnd = HBtab->ebdTryLast->Next(); block != blockEnd; - block = block->Next()) + for (block = HBtab->ebdTryBeg, blockEnd = HBtab->ebdTryLast->Next(); block != blockEnd; block = block->Next()) { if (blockTryIndex[block->bbNum] == 0) { @@ -3437,8 +3436,7 @@ void Compiler::fgVerifyHandlerTab() } } - for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), - blockEnd = HBtab->ebdHndLast->Next(); + for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), blockEnd = HBtab->ebdHndLast->Next(); block != blockEnd; block = block->Next()) { if (blockHndIndex[block->bbNum] == 0) diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index ad91940777e86d..95120f5d285320 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -2547,8 +2547,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, if (predBlock->KindIs(BBJ_COND)) { // Special handling to improve matching on backedges. - BasicBlock* otherBlock = - predBlock->NextIs(block) ? predBlock->bbJumpDest : predBlock->Next(); + BasicBlock* otherBlock = predBlock->NextIs(block) ? predBlock->bbJumpDest : predBlock->Next(); noway_assert(otherBlock != nullptr); if (isBlockVisited(otherBlock) && !blockInfo[otherBlock->bbNum].hasEHBoundaryIn) { diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 8f48ba57a30102..ae16f3cc01cd40 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -1888,8 +1888,7 @@ class LoopSearch // otherwise the loop is still valid and this may be a (flow-wise) back-edge // of an outer loop. For the dominance test, if `predBlock` is a new block, use // its unique predecessor since the dominator tree has info for that. - BasicBlock* effectivePred = - (predBlock->bbNum > oldBlockMaxNum ? predBlock->Prev() : predBlock); + BasicBlock* effectivePred = (predBlock->bbNum > oldBlockMaxNum ? predBlock->Prev() : predBlock); if (comp->fgDominate(entry, effectivePred)) { // Outer loop back-edge @@ -1924,8 +1923,7 @@ class LoopSearch isFirstVisit = true; } - if (isFirstVisit && !predBlock->IsLast() && - (PositionNum(predBlock->Next()) == predBlock->bbNum)) + if (isFirstVisit && !predBlock->IsLast() && (PositionNum(predBlock->Next()) == predBlock->bbNum)) { // We've created a new block immediately after `predBlock` to // reconnect what was fall-through. Mark it as in-loop also; diff --git a/src/coreclr/jit/unwind.cpp b/src/coreclr/jit/unwind.cpp index 366f5ce4d72346..e035b1188a22f8 100644 --- a/src/coreclr/jit/unwind.cpp +++ b/src/coreclr/jit/unwind.cpp @@ -127,9 +127,9 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, { assert(func->funKind == FUNC_HANDLER); *ppStartLoc = new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndBeg)); - *ppEndLoc = HBtab->ebdHndLast->IsLast() - ? nullptr - : new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndLast->Next())); + *ppEndLoc = HBtab->ebdHndLast->IsLast() ? nullptr + : new (this, CMK_UnwindInfo) + emitLocation(ehEmitCookie(HBtab->ebdHndLast->Next())); } } } From 1fb10a8d3f84e464393cd01d11989e41931969e9 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Thu, 5 Oct 2023 18:50:29 -0400 Subject: [PATCH 12/14] Add IsFirstColdBlock --- src/coreclr/jit/block.cpp | 15 +++++++++++++++ src/coreclr/jit/block.h | 2 ++ src/coreclr/jit/codegenlinear.cpp | 4 ++-- src/coreclr/jit/fgbasic.cpp | 8 ++++---- src/coreclr/jit/fgdiagnostic.cpp | 2 +- src/coreclr/jit/fgopt.cpp | 4 ++-- 6 files changed, 26 insertions(+), 9 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 5b6d4b828342fd..34d1156b0c3c9c 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -201,6 +201,21 @@ bool BasicBlock::IsLastHotBlock(Compiler* compiler) const return (bbNext == compiler->fgFirstColdBlock); } +//------------------------------------------------------------------------ +// IsFirstColdBlock: see if this is the first block in the cold section +// +// Arguments: +// compiler - current compiler instance +// +// Returns: +// true if this is fgFirstColdBlock +// (fgFirstColdBlock is null if there is no cold code) +// +bool BasicBlock::IsFirstColdBlock(Compiler* compiler) const +{ + return (this == compiler->fgFirstColdBlock); +} + //------------------------------------------------------------------------ // checkPredListOrder: see if pred list is properly ordered // diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 098ae712e13b3d..1d85fb435dc39c 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -581,6 +581,8 @@ struct BasicBlock : private LIR::Range bool IsLastHotBlock(Compiler* compiler) const; + bool IsFirstColdBlock(Compiler* compiler) const; + /* The following union describes the jump target(s) of this block */ union { unsigned bbJumpOffs; // PC offset (temporary only) diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 325f7c4d05f460..23a2acbec3a107 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -310,7 +310,7 @@ void CodeGen::genCodeForBBlist() // bool needLabel = (block->bbFlags & BBF_HAS_LABEL) != 0; - if (block == compiler->fgFirstColdBlock) + if (block->IsFirstColdBlock(compiler)) { #ifdef DEBUG if (compiler->verbose) @@ -354,7 +354,7 @@ void CodeGen::genCodeForBBlist() gcInfo.gcRegByrefSetCur, false DEBUG_ARG(block)); } - if (block == compiler->fgFirstColdBlock) + if (block->IsFirstColdBlock(compiler)) { // We require the block that starts the Cold section to have a label noway_assert(block->bbEmitCookie); diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 2ce02ee3bc1718..70fd0abb775366 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -4984,7 +4984,7 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) } // If bEnd was the first Cold basic block update fgFirstColdBlock - if (fgFirstColdBlock == bEnd) + if (bEnd->IsFirstColdBlock(this)) { fgFirstColdBlock = bPrev->Next(); } @@ -5060,7 +5060,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else if (bPrev->KindIs(BBJ_ALWAYS) && block->NextIs(bPrev->bbJumpDest) && - !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && !block->IsLastHotBlock(this)) + !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && !block->IsFirstColdBlock(this) && !block->IsLastHotBlock(this)) { // previous block is a BBJ_ALWAYS to the next block: change to BBJ_NONE. // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), @@ -5070,7 +5070,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } // If this is the first Cold basic block update fgFirstColdBlock - if (block == fgFirstColdBlock) + if (block->IsFirstColdBlock(this)) { fgFirstColdBlock = block->Next(); } @@ -5168,7 +5168,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } // If this is the first Cold basic block update fgFirstColdBlock - if (block == fgFirstColdBlock) + if (block->IsFirstColdBlock(this)) { fgFirstColdBlock = block->Next(); } diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index eddec9820a4f05..c848f1c862faa8 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -2352,7 +2352,7 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, printf("bad prev link!\n"); } - if (inDefaultOrder && (block == fgFirstColdBlock)) + if (inDefaultOrder && (block->IsFirstColdBlock(this))) { printf( "~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~" diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 48a6f66fd0d2a2..3d3c81172663b5 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -6171,7 +6171,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) bNext->KindIs(BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block bNext->isEmpty() && // and it is an empty block (bNext != bNext->bbJumpDest) && // special case for self jumps - (bDest != fgFirstColdBlock) && + !bDest->IsFirstColdBlock(this) && (!fgInDifferentRegions(block, bDest))) // do not cross hot/cold sections { // case (a) @@ -6331,7 +6331,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) bNext->unmarkLoopAlign(this DEBUG_ARG("Optimized jump")); // If this is the first Cold basic block update fgFirstColdBlock - if (bNext == fgFirstColdBlock) + if (bNext->IsFirstColdBlock(this)) { fgFirstColdBlock = bNext->Next(); } From 5bb73fee590bfd576d50af79f8db96ae15dcdfa0 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Thu, 5 Oct 2023 19:20:35 -0400 Subject: [PATCH 13/14] Address feedback --- src/coreclr/jit/block.h | 4 ++-- src/coreclr/jit/codegenarmarch.cpp | 4 ++-- src/coreclr/jit/codegencommon.cpp | 11 +++++------ src/coreclr/jit/codegenlinear.cpp | 16 ++++++++++------ src/coreclr/jit/codegenloongarch64.cpp | 4 ++-- src/coreclr/jit/codegenriscv64.cpp | 4 ++-- src/coreclr/jit/codegenxarch.cpp | 2 +- src/coreclr/jit/compiler.cpp | 8 ++++---- src/coreclr/jit/emit.cpp | 2 +- src/coreclr/jit/emitxarch.cpp | 2 +- src/coreclr/jit/fgbasic.cpp | 5 +++-- src/coreclr/jit/fgopt.cpp | 2 +- src/coreclr/jit/gentree.cpp | 2 +- src/coreclr/jit/importercalls.cpp | 2 +- src/coreclr/jit/lclvars.cpp | 4 ++-- src/coreclr/jit/liveness.cpp | 8 ++++---- src/coreclr/jit/morph.cpp | 2 +- src/coreclr/jit/morphblock.cpp | 4 ++-- src/coreclr/jit/rangecheck.cpp | 2 +- 19 files changed, 46 insertions(+), 42 deletions(-) diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 1d85fb435dc39c..fb44614fec95eb 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -1478,8 +1478,8 @@ class BasicBlockIterator { assert(m_block != nullptr); // Check that we haven't been spliced out of the list. - assert((m_block->IsLast()) || m_block->Next()->PrevIs(m_block)); - assert((m_block->IsFirst()) || m_block->Prev()->NextIs(m_block)); + assert(m_block->IsLast() || m_block->Next()->PrevIs(m_block)); + assert(m_block->IsFirst() || m_block->Prev()->NextIs(m_block)); m_block = m_block->Next(); return *this; diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index c203223d78f95d..fd0b2de289b613 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -3338,8 +3338,8 @@ void CodeGen::genCall(GenTreeCall* call) #ifdef FEATURE_READYTORUN else if (call->IsR2ROrVirtualStubRelativeIndir()) { - assert(((call->IsR2RRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_PVALUE)) || - ((call->IsVirtualStubRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_VALUE))); + assert((call->IsR2RRelativeIndir() && (call->gtEntryPoint.accessType == IAT_PVALUE)) || + (call->IsVirtualStubRelativeIndir() && (call->gtEntryPoint.accessType == IAT_VALUE))); assert(call->gtControlExpr == nullptr); regNumber tmpReg = call->GetSingleTempReg(); diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index bfd8db45ce0233..495af5eb27158b 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -3235,7 +3235,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere regArgTab[regArgNum + i].writeThru = (varDsc->lvIsInReg() && varDsc->lvLiveInOutOfHndlr); /* mark stack arguments since we will take care of those first */ - regArgTab[regArgNum + i].stackArg = (varDsc->lvIsInReg()) ? false : true; + regArgTab[regArgNum + i].stackArg = varDsc->lvIsInReg() ? false : true; /* If it goes on the stack or in a register that doesn't hold * an argument anymore -> CANNOT form a circular dependency */ @@ -5210,8 +5210,8 @@ void CodeGen::genReserveEpilog(BasicBlock* block) assert(block != nullptr); const VARSET_TP& gcrefVarsArg(GetEmitter()->emitThisGCrefVars); - bool last = (block->IsLast()); - GetEmitter()->emitCreatePlaceholderIG(IGPT_EPILOG, block, gcrefVarsArg, gcrefRegsArg, byrefRegsArg, last); + GetEmitter()->emitCreatePlaceholderIG(IGPT_EPILOG, block, gcrefVarsArg, gcrefRegsArg, byrefRegsArg, + block->IsLast()); } #if defined(FEATURE_EH_FUNCLETS) @@ -5257,9 +5257,8 @@ void CodeGen::genReserveFuncletEpilog(BasicBlock* block) JITDUMP("Reserving funclet epilog IG for block " FMT_BB "\n", block->bbNum); - bool last = (block->IsLast()); GetEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_EPILOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, - gcInfo.gcRegByrefSetCur, last); + gcInfo.gcRegByrefSetCur, block->IsLast()); } #endif // FEATURE_EH_FUNCLETS @@ -5812,7 +5811,7 @@ void CodeGen::genFnProlog() { excludeMask |= RBM_PINVOKE_FRAME; - assert((!compiler->opts.ShouldUsePInvokeHelpers()) || (compiler->info.compLvFrameListRoot == BAD_VAR_NUM)); + assert(!compiler->opts.ShouldUsePInvokeHelpers() || (compiler->info.compLvFrameListRoot == BAD_VAR_NUM)); if (!compiler->opts.ShouldUsePInvokeHelpers()) { excludeMask |= (RBM_PINVOKE_TCB | RBM_PINVOKE_SCRATCH); diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 23a2acbec3a107..5ff4d9cc1b6f17 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -546,10 +546,14 @@ void CodeGen::genCodeForBBlist() /* Is this the last block, and are there any open scopes left ? */ - bool isLastBlockProcessed = (block->IsLast()); + bool isLastBlockProcessed; if (block->isBBCallAlwaysPair()) { - isLastBlockProcessed = (block->Next()->IsLast()); + isLastBlockProcessed = block->Next()->IsLast(); + } + else + { + isLastBlockProcessed = block->IsLast(); } if (compiler->opts.compDbgInfo && isLastBlockProcessed) @@ -614,7 +618,7 @@ void CodeGen::genCodeForBBlist() // Note: we may be generating a few too many NOPs for the case of call preceding an epilog. Technically, // if the next block is a BBJ_RETURN, an epilog will be generated, but there may be some instructions // generated before the OS epilog starts, such as a GS cookie check. - if ((block->IsLast()) || !BasicBlock::sameEHRegion(block, block->Next())) + if (block->IsLast() || !BasicBlock::sameEHRegion(block, block->Next())) { // We only need the NOP if we're not going to generate any more code as part of the block end. @@ -678,7 +682,7 @@ void CodeGen::genCodeForBBlist() // 2. If this is this is the last block of the hot section. // 3. If the subsequent block is a special throw block. // 4. On AMD64, if the next block is in a different EH region. - if ((block->IsLast()) || (block->Next()->bbFlags & BBF_FUNCLET_BEG) || + if (block->IsLast() || (block->Next()->bbFlags & BBF_FUNCLET_BEG) || !BasicBlock::sameEHRegion(block, block->Next()) || (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->Next())) || block->IsLastHotBlock(compiler)) @@ -817,7 +821,7 @@ void CodeGen::genCodeForBBlist() GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->KindIs(BBJ_ALWAYS))); } - if (!block->IsLast() && (block->Next()->isLoopAlign())) + if (!block->IsLast() && block->Next()->isLoopAlign()) { if (compiler->opts.compJitHideAlignBehindJmp) { @@ -948,7 +952,7 @@ void CodeGen::genSpillVar(GenTree* tree) { // We only have 'GTF_SPILL' and 'GTF_SPILLED' on a def of a write-thru lclVar // or a single-def var that is to be spilled at its definition. - assert((varDsc->IsAlwaysAliveInMemory()) && ((tree->gtFlags & GTF_VAR_DEF) != 0)); + assert(varDsc->IsAlwaysAliveInMemory() && ((tree->gtFlags & GTF_VAR_DEF) != 0)); } if (needsSpill) diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index d57e4d18a0af9a..ee1bc3f0ac1d08 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -6480,8 +6480,8 @@ void CodeGen::genCall(GenTreeCall* call) #ifdef FEATURE_READYTORUN else if (call->IsR2ROrVirtualStubRelativeIndir()) { - assert(((call->IsR2RRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_PVALUE)) || - ((call->IsVirtualStubRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_VALUE))); + assert((call->IsR2RRelativeIndir() && (call->gtEntryPoint.accessType == IAT_PVALUE)) || + (call->IsVirtualStubRelativeIndir() && (call->gtEntryPoint.accessType == IAT_VALUE))); assert(call->gtControlExpr == nullptr); regNumber tmpReg = call->GetSingleTempReg(); diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 0fabf1daf8c190..a3af48b4a022e6 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -6173,8 +6173,8 @@ void CodeGen::genCall(GenTreeCall* call) #ifdef FEATURE_READYTORUN else if (call->IsR2ROrVirtualStubRelativeIndir()) { - assert(((call->IsR2RRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_PVALUE)) || - ((call->IsVirtualStubRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_VALUE))); + assert((call->IsR2RRelativeIndir() && (call->gtEntryPoint.accessType == IAT_PVALUE)) || + (call->IsVirtualStubRelativeIndir() && (call->gtEntryPoint.accessType == IAT_VALUE))); assert(call->gtControlExpr == nullptr); regNumber tmpReg = call->GetSingleTempReg(); diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index b2d427959ae50d..869872e5062d16 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -8248,7 +8248,7 @@ void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk) // For now, we only support the "push" case; we will push a full slot for the first field of each slot // within the struct. - assert((putArgStk->isPushKind()) && !preAdjustedStack && m_pushStkArg); + assert(putArgStk->isPushKind() && !preAdjustedStack && m_pushStkArg); // If we have pre-adjusted the stack and are simply storing the fields in order, set the offset to 0. // (Note that this mode is not currently being used.) diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index eeb03d955667c9..13f31ec559a26d 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5291,7 +5291,7 @@ PhaseStatus Compiler::placeLoopAlignInstructions() } } - if (!block->IsLast() && (block->Next()->isLoopAlign())) + if (!block->IsLast() && block->Next()->isLoopAlign()) { // Loop alignment is disabled for cold blocks assert((block->bbFlags & BBF_COLD) == 0); @@ -6379,9 +6379,9 @@ void Compiler::compCompileFinish() // Small methods cannot meaningfully have a big number of locals // or arguments. We always track arguments at the start of // the prolog which requires memory - (info.compLocalsCount <= 32) && (!opts.MinOpts()) && // We may have too many local variables, etc - (getJitStressLevel() == 0) && // We need extra memory for stress - !opts.optRepeat && // We need extra memory to repeat opts + (info.compLocalsCount <= 32) && !opts.MinOpts() && // We may have too many local variables, etc + (getJitStressLevel() == 0) && // We need extra memory for stress + !opts.optRepeat && // We need extra memory to repeat opts !compArenaAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for // DirectAlloc // Factor of 2x is because data-structures are bigger under DEBUG diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp index 61c118eacc810b..b38a00fe22ff94 100644 --- a/src/coreclr/jit/emit.cpp +++ b/src/coreclr/jit/emit.cpp @@ -9881,7 +9881,7 @@ void emitter::emitStackPop(BYTE* addr, bool isCall, unsigned char callInstrSize, // recorded (when we're doing the ptr reg map for a non-fully-interruptible method). if (emitFullGCinfo #ifndef JIT32_GCENCODER - || (emitComp->IsFullPtrRegMapRequired() && (!emitComp->GetInterruptible()) && isCall) + || (emitComp->IsFullPtrRegMapRequired() && !emitComp->GetInterruptible() && isCall) #endif // JIT32_GCENCODER ) { diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index d03bb82ea9cc5a..7cd209b6e5e71f 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -10550,7 +10550,7 @@ void emitter::emitDispAddrMode(instrDesc* id, bool noDetail) nsep = true; } - if ((id->idIsDspReloc()) && (id->idIns() != INS_i_jmp)) + if (id->idIsDspReloc() && (id->idIns() != INS_i_jmp)) { if (nsep) { diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 70fd0abb775366..4165d101471259 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -5060,7 +5060,8 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else if (bPrev->KindIs(BBJ_ALWAYS) && block->NextIs(bPrev->bbJumpDest) && - !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && !block->IsFirstColdBlock(this) && !block->IsLastHotBlock(this)) + !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && !block->IsFirstColdBlock(this) && + !block->IsLastHotBlock(this)) { // previous block is a BBJ_ALWAYS to the next block: change to BBJ_NONE. // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), @@ -5635,7 +5636,7 @@ void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBloc { printf("Relocated block%s [" FMT_BB ".." FMT_BB "] inserted after " FMT_BB "%s\n", (bStart == bEnd) ? "" : "s", bStart->bbNum, bEnd->bbNum, insertAfterBlk->bbNum, - (insertAfterBlk->IsLast()) ? " at the end of method" : ""); + insertAfterBlk->IsLast() ? " at the end of method" : ""); } #endif // DEBUG diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 3d3c81172663b5..22436d28c834ef 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -5666,7 +5666,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // or if bEnd->bbNext is in a different try region // then we cannot move the blocks // - if ((bEnd->IsLast()) || !BasicBlock::sameTryRegion(startBlk, bEnd->Next())) + if (bEnd->IsLast() || !BasicBlock::sameTryRegion(startBlk, bEnd->Next())) { goto CANNOT_MOVE; } diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index f0ffca4a5ef081..f3ab7185b41480 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -871,7 +871,7 @@ int GenTree::GetRegisterDstCount(Compiler* compiler) const assert(!isContained()); if (!IsMultiRegNode()) { - return (IsValue()) ? 1 : 0; + return IsValue() ? 1 : 0; } else if (IsMultiRegCall()) { diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index 02774119c82e16..9d92da20126b36 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -1912,7 +1912,7 @@ GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) // be at most 64 arguments - 32 lengths and 32 lower bounds. // - if ((!numArgsArg->IsCnsIntOrI()) || (numArgsArg->AsIntCon()->IconValue() < 1) || + if (!numArgsArg->IsCnsIntOrI() || (numArgsArg->AsIntCon()->IconValue() < 1) || (numArgsArg->AsIntCon()->IconValue() > 64)) { return nullptr; diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 8af56fa167317d..c414a0cd36de2e 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -4014,7 +4014,7 @@ void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, /* Is this a call to unmanaged code ? */ if (tree->IsCall() && compMethodRequiresPInvokeFrame()) { - assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); + assert(!opts.ShouldUsePInvokeHelpers() || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) { /* Get the special variable descriptor */ @@ -4246,7 +4246,7 @@ PhaseStatus Compiler::lvaMarkLocalVars() // If we have direct pinvokes, verify the frame list root local was set up properly if (compMethodRequiresPInvokeFrame()) { - assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); + assert(!opts.ShouldUsePInvokeHelpers() || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) { noway_assert(info.compLvFrameListRoot >= info.compLocalsCount && info.compLvFrameListRoot < lvaCount); diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 1ac0c42910de44..9447bf7a8dc540 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -288,7 +288,7 @@ void Compiler::fgPerNodeLocalVarLiveness(GenTree* tree) if ((call->IsUnmanaged() || call->IsTailCallViaJitHelper()) && compMethodRequiresPInvokeFrame()) { - assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); + assert(!opts.ShouldUsePInvokeHelpers() || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers() && !call->IsSuppressGCTransition()) { // Get the FrameRoot local and mark it as used. @@ -493,7 +493,7 @@ void Compiler::fgPerBlockLocalVarLiveness() if (block->KindIs(BBJ_RETURN) && compMethodRequiresPInvokeFrame()) { - assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); + assert(!opts.ShouldUsePInvokeHelpers() || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) { // 32-bit targets always pop the frame in the epilog. @@ -1391,7 +1391,7 @@ void Compiler::fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call) // This ensure that this variable is kept alive at the tail-call if (call->IsTailCallViaJitHelper() && compMethodRequiresPInvokeFrame()) { - assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); + assert(!opts.ShouldUsePInvokeHelpers() || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) { // Get the FrameListRoot local and make it live. @@ -1412,7 +1412,7 @@ void Compiler::fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call) if (call->IsUnmanaged() && compMethodRequiresPInvokeFrame()) { // Get the FrameListRoot local and make it live. - assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); + assert(!opts.ShouldUsePInvokeHelpers() || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers() && !call->IsSuppressGCTransition()) { LclVarDsc* frameVarDsc = lvaGetDesc(info.compLvFrameListRoot); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 96133d2d4064a8..e07cf9d4e0dfe9 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -13188,7 +13188,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) /* Unmark the loop if we are removing a backwards branch */ /* dest block must also be marked as a loop head and */ /* We must be able to reach the backedge block */ - if ((block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) && + if (block->bbJumpDest->isLoopHead() && (block->bbJumpDest->bbNum <= block->bbNum) && fgReachable(block->bbJumpDest, block)) { optUnmarkLoopBlocks(block->bbJumpDest, block); diff --git a/src/coreclr/jit/morphblock.cpp b/src/coreclr/jit/morphblock.cpp index 7e864a5eab25cb..75e19184cfcf90 100644 --- a/src/coreclr/jit/morphblock.cpp +++ b/src/coreclr/jit/morphblock.cpp @@ -792,13 +792,13 @@ void MorphCopyBlockHelper::MorphStructCases() } #if defined(TARGET_ARM) - if ((m_store->OperIsIndir()) && m_store->AsIndir()->IsUnaligned()) + if (m_store->OperIsIndir() && m_store->AsIndir()->IsUnaligned()) { JITDUMP(" store is unaligned"); requiresCopyBlock = true; } - if ((m_src->OperIsIndir()) && m_src->AsIndir()->IsUnaligned()) + if (m_src->OperIsIndir() && m_src->AsIndir()->IsUnaligned()) { JITDUMP(" src is unaligned"); requiresCopyBlock = true; diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp index 29e8992fdbe59e..7dcae117530c0b 100644 --- a/src/coreclr/jit/rangecheck.cpp +++ b/src/coreclr/jit/rangecheck.cpp @@ -871,7 +871,7 @@ void RangeCheck::MergeEdgeAssertions(ValueNum normalLclVN, ASSERT_VALARG_TP asse } int curCns = pRange->uLimit.cns; - int limCns = (limit.IsBinOpArray()) ? limit.cns : 0; + int limCns = limit.IsBinOpArray() ? limit.cns : 0; // Incoming limit doesn't tighten the existing upper limit. if (limCns >= curCns) From c03b7c9b022400d1adae414c56d5550663d611cf Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Thu, 5 Oct 2023 19:25:25 -0400 Subject: [PATCH 14/14] Missed one --- src/coreclr/jit/codegenriscv64.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index a3af48b4a022e6..4f5fe303cf1011 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -1182,7 +1182,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) BasicBlock* const jumpDest = nextBlock->bbJumpDest; // Now go to where the finally funclet needs to return to. - if ((jumpDest == nextBlock->Next()) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) + if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) { // Fall-through. // TODO-RISCV64-CQ: Can we get rid of this instruction, and just have the call return directly