From b8daad22f85e0e2d99fef5f9f4bd3b6e89363a40 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sat, 21 Feb 2026 16:50:37 +0000 Subject: [PATCH 1/5] Initial plan From bee5ec1719c236554dd8d12bd398b3830580b1bc Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sat, 21 Feb 2026 17:31:25 +0000 Subject: [PATCH 2/5] Allow range check cloning for BBJ_RETURN blocks and add test Co-authored-by: EgorBo <523221+EgorBo@users.noreply.github.com> --- src/coreclr/jit/rangecheckcloning.cpp | 72 +++++++++++++++--- .../ReturnBlockRangeCheckCloning.cs | 75 +++++++++++++++++++ .../ReturnBlockRangeCheckCloning.csproj | 8 ++ 3 files changed, 146 insertions(+), 9 deletions(-) create mode 100644 src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.cs create mode 100644 src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.csproj diff --git a/src/coreclr/jit/rangecheckcloning.cpp b/src/coreclr/jit/rangecheckcloning.cpp index 15b864630f90f0..717113534f5aa5 100644 --- a/src/coreclr/jit/rangecheckcloning.cpp +++ b/src/coreclr/jit/rangecheckcloning.cpp @@ -151,6 +151,31 @@ static void RemoveBoundsChk(Compiler* comp, GenTree** treeUse, Statement* stmt) // nextBb: // ... // +// For BBJ_RETURN blocks where the bounds checks are in the return statement, +// both fastpathBb and fallbackBb are BBJ_RETURN blocks (no nextBb needed): +// +// prevBb: +// goto lowerBndBb +// +// lowerBndBb: +// if (idx < 0) +// goto fallbackBb +// else +// goto upperBndBb +// +// upperBndBb: +// if (idx < len - maxConstOffset) +// goto fastpathBb +// else +// goto fallbackBb +// +// fallbackBb: +// [Original block with bounds checks, includes return] +// +// fastpathBb: +// [Cloned block with no bounds checks, includes return] +// +// // Arguments: // comp - The compiler instance // block - The block to clone @@ -190,7 +215,16 @@ static BasicBlock* optRangeCheckCloning_DoClone(Compiler* comp, // Now split the block at the last bounds check using fgSplitBlockAfterStatement: // TODO-RangeCheckCloning: call gtSplitTree for lastBndChkStmt as well, to cut off // the stuff we don't have to clone. - BasicBlock* lastBb = comp->fgSplitBlockAfterStatement(fastpathBb, lastStmt); + // + // For BBJ_RETURN blocks where lastStmt is the block's last statement (i.e., the return + // statement contains bounds checks), we don't need to split - both fast and slow paths + // will be BBJ_RETURN blocks directly. + bool isReturnBlock = fastpathBb->KindIs(BBJ_RETURN) && (lastStmt == fastpathBb->lastStmt()); + BasicBlock* lastBb = nullptr; + if (!isReturnBlock) + { + lastBb = comp->fgSplitBlockAfterStatement(fastpathBb, lastStmt); + } DebugInfo debugInfo = fastpathBb->firstStmt()->GetDebugInfo(); @@ -270,8 +304,9 @@ static BasicBlock* optRangeCheckCloning_DoClone(Compiler* comp, // 3) fallbackBb: // // For the fallback (slow path), we just entirely clone the fast path. + // For BBJ_RETURN blocks, the fallback is also a return block. // - BasicBlock* fallbackBb = comp->fgNewBBafter(BBJ_ALWAYS, upperBndBb, false); + BasicBlock* fallbackBb = comp->fgNewBBafter(isReturnBlock ? BBJ_RETURN : BBJ_ALWAYS, upperBndBb, false); BasicBlock::CloneBlockState(comp, fallbackBb, fastpathBb); // 4) fastBlockBb: @@ -281,12 +316,15 @@ static BasicBlock* optRangeCheckCloning_DoClone(Compiler* comp, // Wire up the edges // comp->fgRedirectEdge(prevBb->TargetEdgeRef(), lowerBndBb); - FlowEdge* fallbackToNextBb = comp->fgAddRefPred(lastBb, fallbackBb); FlowEdge* lowerBndToUpperBndEdge = comp->fgAddRefPred(upperBndBb, lowerBndBb); FlowEdge* lowerBndToFallbackEdge = comp->fgAddRefPred(fallbackBb, lowerBndBb); FlowEdge* upperBndToFastPathEdge = comp->fgAddRefPred(fastpathBb, upperBndBb); FlowEdge* upperBndToFallbackEdge = comp->fgAddRefPred(fallbackBb, upperBndBb); - fallbackBb->SetTargetEdge(fallbackToNextBb); + if (!isReturnBlock) + { + FlowEdge* fallbackToNextBb = comp->fgAddRefPred(lastBb, fallbackBb); + fallbackBb->SetTargetEdge(fallbackToNextBb); + } lowerBndBb->SetTrueEdge(lowerBndToFallbackEdge); lowerBndBb->SetFalseEdge(lowerBndToUpperBndEdge); upperBndBb->SetTrueEdge(upperBndToFastPathEdge); @@ -298,7 +336,6 @@ static BasicBlock* optRangeCheckCloning_DoClone(Compiler* comp, upperBndBb->inheritWeight(prevBb); fastpathBb->inheritWeight(prevBb); fallbackBb->bbSetRunRarely(); - fallbackToNextBb->setLikelihood(1.0f); lowerBndToUpperBndEdge->setLikelihood(1.0f); lowerBndToFallbackEdge->setLikelihood(0.0f); upperBndToFastPathEdge->setLikelihood(1.0f); @@ -357,7 +394,7 @@ static BasicBlock* optRangeCheckCloning_DoClone(Compiler* comp, assert(BasicBlock::sameEHRegion(prevBb, upperBndBb)); assert(BasicBlock::sameEHRegion(prevBb, fastpathBb)); assert(BasicBlock::sameEHRegion(prevBb, fallbackBb)); - assert(BasicBlock::sameEHRegion(prevBb, lastBb)); + assert(isReturnBlock || BasicBlock::sameEHRegion(prevBb, lastBb)); return fastpathBb; } @@ -525,9 +562,26 @@ PhaseStatus Compiler::optRangeCheckCloning() stmtIdx++; if (block->HasTerminator() && (stmt == block->lastStmt())) { - // TODO-RangeCheckCloning: Splitting these blocks at the last statements - // require using gtSplitTree for the last bounds check. - break; + // For BBJ_RETURN blocks, we can still process the last statement since + // we can duplicate return blocks without needing a diamond/join shape. + // However, we must skip if: + // - JIT32_GCENCODER is defined (hard limit on epilogue count) + // - The block is genReturnBB (shared return block) + if (block->KindIs(BBJ_RETURN) && (block != genReturnBB)) + { +#ifndef JIT32_GCENCODER + // Allow scanning the return statement for bounds checks. + // The DoClone path will handle BBJ_RETURN specially. +#else + break; +#endif + } + else + { + // TODO-RangeCheckCloning: Splitting these blocks at the last statements + // require using gtSplitTree for the last bounds check. + break; + } } // Now just record all the bounds checks in the block (in the execution order) diff --git a/src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.cs b/src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.cs new file mode 100644 index 00000000000000..cc149ec70ffdef --- /dev/null +++ b/src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.cs @@ -0,0 +1,75 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Runtime.CompilerServices; +using Xunit; + +// Tests that consecutive range checks in return blocks are combined +// via range check cloning. + +public class ReturnBlockRangeCheckCloning +{ + [MethodImpl(MethodImplOptions.NoInlining)] + static int ArrayAccessReturn(int[] abcd) + { + return abcd[0] + abcd[1] + abcd[2] + abcd[3]; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static int ArrayAccessReturnWithOffset(int[] arr, int i) + { + return arr[i] + arr[i + 1] + arr[i + 2]; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static int SpanAccessReturn(ReadOnlySpan span) + { + return span[0] + span[1] + span[2] + span[3]; + } + + [Fact] + public static int TestEntryPoint() + { + int[] arr = new int[] { 10, 20, 30, 40 }; + + if (ArrayAccessReturn(arr) != 100) + return 0; + + if (ArrayAccessReturnWithOffset(arr, 0) != 60) + return 0; + + if (ArrayAccessReturnWithOffset(arr, 1) != 90) + return 0; + + if (SpanAccessReturn(arr) != 100) + return 0; + + // Test that out-of-range still throws + bool threwArrayAccess = false; + try + { + ArrayAccessReturn(new int[] { 1, 2, 3 }); + } + catch (IndexOutOfRangeException) + { + threwArrayAccess = true; + } + if (!threwArrayAccess) + return 0; + + bool threwOffset = false; + try + { + ArrayAccessReturnWithOffset(arr, 2); + } + catch (IndexOutOfRangeException) + { + threwOffset = true; + } + if (!threwOffset) + return 0; + + return 100; + } +} diff --git a/src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.csproj b/src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.csproj new file mode 100644 index 00000000000000..de6d5e08882e86 --- /dev/null +++ b/src/tests/JIT/opt/RangeChecks/ReturnBlockRangeCheckCloning.csproj @@ -0,0 +1,8 @@ + + + True + + + + + From 97a9e5b27536a244552ac9b8e4d827a9e1f7b70d Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sat, 21 Feb 2026 17:34:21 +0000 Subject: [PATCH 3/5] Simplify scanning phase preprocessor pattern for BBJ_RETURN Co-authored-by: EgorBo <523221+EgorBo@users.noreply.github.com> --- src/coreclr/jit/rangecheckcloning.cpp | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/coreclr/jit/rangecheckcloning.cpp b/src/coreclr/jit/rangecheckcloning.cpp index 717113534f5aa5..f3d58f259c4950 100644 --- a/src/coreclr/jit/rangecheckcloning.cpp +++ b/src/coreclr/jit/rangecheckcloning.cpp @@ -567,16 +567,9 @@ PhaseStatus Compiler::optRangeCheckCloning() // However, we must skip if: // - JIT32_GCENCODER is defined (hard limit on epilogue count) // - The block is genReturnBB (shared return block) - if (block->KindIs(BBJ_RETURN) && (block != genReturnBB)) - { #ifndef JIT32_GCENCODER - // Allow scanning the return statement for bounds checks. - // The DoClone path will handle BBJ_RETURN specially. -#else - break; + if (!block->KindIs(BBJ_RETURN) || (block == genReturnBB)) #endif - } - else { // TODO-RangeCheckCloning: Splitting these blocks at the last statements // require using gtSplitTree for the last bounds check. From 09169816ac71e86013fd0c3c2beacafd6cae9d72 Mon Sep 17 00:00:00 2001 From: Egor Bogatov Date: Sat, 21 Feb 2026 23:38:36 +0100 Subject: [PATCH 4/5] Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/coreclr/jit/rangecheckcloning.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/coreclr/jit/rangecheckcloning.cpp b/src/coreclr/jit/rangecheckcloning.cpp index f3d58f259c4950..3d2395490bcc57 100644 --- a/src/coreclr/jit/rangecheckcloning.cpp +++ b/src/coreclr/jit/rangecheckcloning.cpp @@ -324,6 +324,7 @@ static BasicBlock* optRangeCheckCloning_DoClone(Compiler* comp, { FlowEdge* fallbackToNextBb = comp->fgAddRefPred(lastBb, fallbackBb); fallbackBb->SetTargetEdge(fallbackToNextBb); + fallbackToNextBb->setLikelihood(1.0f); } lowerBndBb->SetTrueEdge(lowerBndToFallbackEdge); lowerBndBb->SetFalseEdge(lowerBndToUpperBndEdge); @@ -567,14 +568,16 @@ PhaseStatus Compiler::optRangeCheckCloning() // However, we must skip if: // - JIT32_GCENCODER is defined (hard limit on epilogue count) // - The block is genReturnBB (shared return block) -#ifndef JIT32_GCENCODER +#ifdef JIT32_GCENCODER + break; +#else if (!block->KindIs(BBJ_RETURN) || (block == genReturnBB)) -#endif { // TODO-RangeCheckCloning: Splitting these blocks at the last statements // require using gtSplitTree for the last bounds check. break; } +#endif } // Now just record all the bounds checks in the block (in the execution order) From 3e4addbfac22dc7e4ce1bc5013836ed65364af2f Mon Sep 17 00:00:00 2001 From: Egor Bogatov Date: Sat, 21 Feb 2026 23:39:17 +0100 Subject: [PATCH 5/5] Update rangecheckcloning.cpp --- src/coreclr/jit/rangecheckcloning.cpp | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/src/coreclr/jit/rangecheckcloning.cpp b/src/coreclr/jit/rangecheckcloning.cpp index 3d2395490bcc57..d49e57e8a2344c 100644 --- a/src/coreclr/jit/rangecheckcloning.cpp +++ b/src/coreclr/jit/rangecheckcloning.cpp @@ -152,29 +152,7 @@ static void RemoveBoundsChk(Compiler* comp, GenTree** treeUse, Statement* stmt) // ... // // For BBJ_RETURN blocks where the bounds checks are in the return statement, -// both fastpathBb and fallbackBb are BBJ_RETURN blocks (no nextBb needed): -// -// prevBb: -// goto lowerBndBb -// -// lowerBndBb: -// if (idx < 0) -// goto fallbackBb -// else -// goto upperBndBb -// -// upperBndBb: -// if (idx < len - maxConstOffset) -// goto fastpathBb -// else -// goto fallbackBb -// -// fallbackBb: -// [Original block with bounds checks, includes return] -// -// fastpathBb: -// [Cloned block with no bounds checks, includes return] -// +// both fastpathBb and fallbackBb are BBJ_RETURN blocks (no nextBb needed). // // Arguments: // comp - The compiler instance