diff --git a/src/coreclr/jit/rangecheckcloning.cpp b/src/coreclr/jit/rangecheckcloning.cpp index 15b864630f90f0..d49e57e8a2344c 100644 --- a/src/coreclr/jit/rangecheckcloning.cpp +++ b/src/coreclr/jit/rangecheckcloning.cpp @@ -151,6 +151,9 @@ static void RemoveBoundsChk(Compiler* comp, GenTree** treeUse, Statement* stmt) // nextBb: // ... // +// For BBJ_RETURN blocks where the bounds checks are in the return statement, +// both fastpathBb and fallbackBb are BBJ_RETURN blocks (no nextBb needed). +// // Arguments: // comp - The compiler instance // block - The block to clone @@ -190,7 +193,16 @@ static BasicBlock* optRangeCheckCloning_DoClone(Compiler* comp, // Now split the block at the last bounds check using fgSplitBlockAfterStatement: // TODO-RangeCheckCloning: call gtSplitTree for lastBndChkStmt as well, to cut off // the stuff we don't have to clone. - BasicBlock* lastBb = comp->fgSplitBlockAfterStatement(fastpathBb, lastStmt); + // + // For BBJ_RETURN blocks where lastStmt is the block's last statement (i.e., the return + // statement contains bounds checks), we don't need to split - both fast and slow paths + // will be BBJ_RETURN blocks directly. + bool isReturnBlock = fastpathBb->KindIs(BBJ_RETURN) && (lastStmt == fastpathBb->lastStmt()); + BasicBlock* lastBb = nullptr; + if (!isReturnBlock) + { + lastBb = comp->fgSplitBlockAfterStatement(fastpathBb, lastStmt); + } DebugInfo debugInfo = fastpathBb->firstStmt()->GetDebugInfo(); @@ -270,8 +282,9 @@ static BasicBlock* optRangeCheckCloning_DoClone(Compiler* comp, // 3) fallbackBb: // // For the fallback (slow path), we just entirely clone the fast path. + // For BBJ_RETURN blocks, the fallback is also a return block. // - BasicBlock* fallbackBb = comp->fgNewBBafter(BBJ_ALWAYS, upperBndBb, false); + BasicBlock* fallbackBb = comp->fgNewBBafter(isReturnBlock ? BBJ_RETURN : BBJ_ALWAYS, upperBndBb, false); BasicBlock::CloneBlockState(comp, fallbackBb, fastpathBb); // 4) fastBlockBb: @@ -281,12 +294,16 @@ static BasicBlock* optRangeCheckCloning_DoClone(Compiler* comp, // Wire up the edges // comp->fgRedirectEdge(prevBb->TargetEdgeRef(), lowerBndBb); - FlowEdge* fallbackToNextBb = comp->fgAddRefPred(lastBb, fallbackBb); FlowEdge* lowerBndToUpperBndEdge = comp->fgAddRefPred(upperBndBb, lowerBndBb); FlowEdge* lowerBndToFallbackEdge = comp->fgAddRefPred(fallbackBb, lowerBndBb); FlowEdge* upperBndToFastPathEdge = comp->fgAddRefPred(fastpathBb, upperBndBb); FlowEdge* upperBndToFallbackEdge = comp->fgAddRefPred(fallbackBb, upperBndBb); - fallbackBb->SetTargetEdge(fallbackToNextBb); + if (!isReturnBlock) + { + FlowEdge* fallbackToNextBb = comp->fgAddRefPred(lastBb, fallbackBb); + fallbackBb->SetTargetEdge(fallbackToNextBb); + fallbackToNextBb->setLikelihood(1.0f); + } lowerBndBb->SetTrueEdge(lowerBndToFallbackEdge); lowerBndBb->SetFalseEdge(lowerBndToUpperBndEdge); upperBndBb->SetTrueEdge(upperBndToFastPathEdge); @@ -298,7 +315,6 @@ static BasicBlock* optRangeCheckCloning_DoClone(Compiler* comp, upperBndBb->inheritWeight(prevBb); fastpathBb->inheritWeight(prevBb); fallbackBb->bbSetRunRarely(); - fallbackToNextBb->setLikelihood(1.0f); lowerBndToUpperBndEdge->setLikelihood(1.0f); lowerBndToFallbackEdge->setLikelihood(0.0f); upperBndToFastPathEdge->setLikelihood(1.0f); @@ -357,7 +373,7 @@ static BasicBlock* optRangeCheckCloning_DoClone(Compiler* comp, assert(BasicBlock::sameEHRegion(prevBb, upperBndBb)); assert(BasicBlock::sameEHRegion(prevBb, fastpathBb)); assert(BasicBlock::sameEHRegion(prevBb, fallbackBb)); - assert(BasicBlock::sameEHRegion(prevBb, lastBb)); + assert(isReturnBlock || BasicBlock::sameEHRegion(prevBb, lastBb)); return fastpathBb; } @@ -525,9 +541,21 @@ PhaseStatus Compiler::optRangeCheckCloning() stmtIdx++; if (block->HasTerminator() && (stmt == block->lastStmt())) { - // TODO-RangeCheckCloning: Splitting these blocks at the last statements - // require using gtSplitTree for the last bounds check. + // For BBJ_RETURN blocks, we can still process the last statement since + // we can duplicate return blocks without needing a diamond/join shape. + // However, we must skip if: + // - JIT32_GCENCODER is defined (hard limit on epilogue count) + // - The block is genReturnBB (shared return block) +#ifdef JIT32_GCENCODER break; +#else + if (!block->KindIs(BBJ_RETURN) || (block == genReturnBB)) + { + // TODO-RangeCheckCloning: Splitting these blocks at the last statements + // require using gtSplitTree for the last bounds check. + break; + } +#endif } // Now just record all the bounds checks in the block (in the execution order) diff --git a/src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.cs b/src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.cs new file mode 100644 index 00000000000000..cc149ec70ffdef --- /dev/null +++ b/src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.cs @@ -0,0 +1,75 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Runtime.CompilerServices; +using Xunit; + +// Tests that consecutive range checks in return blocks are combined +// via range check cloning. + +public class ReturnBlockRangeCheckCloning +{ + [MethodImpl(MethodImplOptions.NoInlining)] + static int ArrayAccessReturn(int[] abcd) + { + return abcd[0] + abcd[1] + abcd[2] + abcd[3]; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static int ArrayAccessReturnWithOffset(int[] arr, int i) + { + return arr[i] + arr[i + 1] + arr[i + 2]; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static int SpanAccessReturn(ReadOnlySpan span) + { + return span[0] + span[1] + span[2] + span[3]; + } + + [Fact] + public static int TestEntryPoint() + { + int[] arr = new int[] { 10, 20, 30, 40 }; + + if (ArrayAccessReturn(arr) != 100) + return 0; + + if (ArrayAccessReturnWithOffset(arr, 0) != 60) + return 0; + + if (ArrayAccessReturnWithOffset(arr, 1) != 90) + return 0; + + if (SpanAccessReturn(arr) != 100) + return 0; + + // Test that out-of-range still throws + bool threwArrayAccess = false; + try + { + ArrayAccessReturn(new int[] { 1, 2, 3 }); + } + catch (IndexOutOfRangeException) + { + threwArrayAccess = true; + } + if (!threwArrayAccess) + return 0; + + bool threwOffset = false; + try + { + ArrayAccessReturnWithOffset(arr, 2); + } + catch (IndexOutOfRangeException) + { + threwOffset = true; + } + if (!threwOffset) + return 0; + + return 100; + } +} diff --git a/src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.csproj b/src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.csproj new file mode 100644 index 00000000000000..de6d5e08882e86 --- /dev/null +++ b/src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.csproj @@ -0,0 +1,8 @@ + + + True + + + + +