From 76f7393e3638d672353092b02c53e5e261472854 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Tue, 10 Mar 2026 20:18:43 +0000
Subject: [PATCH 1/7] Initial plan
From ac80f9ee6d08064b911dbc89f226a2ac194d04b5 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Tue, 10 Mar 2026 20:50:54 +0000
Subject: [PATCH 2/7] Enable runtime-async for netcoreapp source projects
excluding OOB, pre-net11, browser/wasm, and mobile
Co-authored-by: agocke <515774+agocke@users.noreply.github.com>
---
src/libraries/Directory.Build.targets | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/src/libraries/Directory.Build.targets b/src/libraries/Directory.Build.targets
index 947dbfc3e71a66..674ffe7dd24617 100644
--- a/src/libraries/Directory.Build.targets
+++ b/src/libraries/Directory.Build.targets
@@ -127,8 +127,15 @@
'$(IsGeneratorProject)' != 'true'">true
-
-
+
+
true
$(Features);runtime-async=on
From 6ffe3ab43621d9681adc6f0fe81f8805ab925ef5 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Wed, 11 Mar 2026 11:24:15 +0000
Subject: [PATCH 3/7] Factor RuntimeAsyncSupported, remove TestReadyToRun gate,
delete UNSUPPORTED_RuntimeAsync config and DOTNET_RuntimeAsync env var
Co-authored-by: jkotas <6668460+jkotas@users.noreply.github.com>
---
eng/testing/tests.targets | 16 ++++++++++------
src/coreclr/inc/clrconfigvalues.h | 3 ---
src/coreclr/vm/eeconfig.cpp | 4 +---
src/libraries/Directory.Build.targets | 15 ++++++++++-----
.../Interop/COM/RuntimeAsync/RuntimeAsync.csproj | 5 -----
5 files changed, 21 insertions(+), 22 deletions(-)
diff --git a/eng/testing/tests.targets b/eng/testing/tests.targets
index 1dd5cdd9dfbdaa..b14cabb2b8d1c2 100644
--- a/eng/testing/tests.targets
+++ b/eng/testing/tests.targets
@@ -1,14 +1,18 @@
+
+
+ true
+
+
true
$(Features);runtime-async=on
diff --git a/src/coreclr/inc/clrconfigvalues.h b/src/coreclr/inc/clrconfigvalues.h
index fd64be3df1b59f..78d11c697a1e88 100644
--- a/src/coreclr/inc/clrconfigvalues.h
+++ b/src/coreclr/inc/clrconfigvalues.h
@@ -715,9 +715,6 @@ RETAIL_CONFIG_DWORD_INFO(EXTERNAL_EnableRiscV64Zbb, W("EnableRiscV64
RETAIL_CONFIG_DWORD_INFO(EXTERNAL_EnableRiscV64Zbs, W("EnableRiscV64Zbs"), 1, "Allows RiscV64 Zbs hardware intrinsics to be disabled")
#endif
-// Runtime-async
-RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_RuntimeAsync, W("RuntimeAsync"), 1, "Enables runtime async method support")
-
///
/// Uncategorized
///
diff --git a/src/coreclr/vm/eeconfig.cpp b/src/coreclr/vm/eeconfig.cpp
index 776febb4045381..7a1846f29aef73 100644
--- a/src/coreclr/vm/eeconfig.cpp
+++ b/src/coreclr/vm/eeconfig.cpp
@@ -233,7 +233,7 @@ HRESULT EEConfig::Init()
fGDBJitEmitDebugFrame = false;
#endif
- runtimeAsync = false;
+ runtimeAsync = true;
return S_OK;
}
@@ -848,8 +848,6 @@ HRESULT EEConfig::sync()
fUseCachedInterfaceDispatch = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_UseCachedInterfaceDispatch) != 0;
#endif // defined(FEATURE_CACHED_INTERFACE_DISPATCH) && defined(FEATURE_VIRTUAL_STUB_DISPATCH)
- runtimeAsync = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_RuntimeAsync) != 0;
-
return hr;
}
diff --git a/src/libraries/Directory.Build.targets b/src/libraries/Directory.Build.targets
index 674ffe7dd24617..32287b87d19ac7 100644
--- a/src/libraries/Directory.Build.targets
+++ b/src/libraries/Directory.Build.targets
@@ -127,15 +127,20 @@
'$(IsGeneratorProject)' != 'true'">true
+
+
+ true
+
+
+ and '$(RuntimeAsyncSupported)' == 'true'">
true
$(Features);runtime-async=on
diff --git a/src/tests/Interop/COM/RuntimeAsync/RuntimeAsync.csproj b/src/tests/Interop/COM/RuntimeAsync/RuntimeAsync.csproj
index 4a015bc375e61e..c3dcb6a823d350 100644
--- a/src/tests/Interop/COM/RuntimeAsync/RuntimeAsync.csproj
+++ b/src/tests/Interop/COM/RuntimeAsync/RuntimeAsync.csproj
@@ -1,7 +1,5 @@
-
- true
$(Features);runtime-async=on
@@ -13,7 +11,4 @@
-
-
-
From c10423f0026ec0fc925c5cc660a7dc573ed88ab5 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Wed, 11 Mar 2026 11:25:28 +0000
Subject: [PATCH 4/7] Add comment explaining runtimeAsync is always enabled
Co-authored-by: jkotas <6668460+jkotas@users.noreply.github.com>
---
src/coreclr/vm/eeconfig.cpp | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/coreclr/vm/eeconfig.cpp b/src/coreclr/vm/eeconfig.cpp
index 7a1846f29aef73..0650cd0748955f 100644
--- a/src/coreclr/vm/eeconfig.cpp
+++ b/src/coreclr/vm/eeconfig.cpp
@@ -233,6 +233,7 @@ HRESULT EEConfig::Init()
fGDBJitEmitDebugFrame = false;
#endif
+ // Runtime async is always enabled; the UNSUPPORTED_RuntimeAsync config knob has been removed.
runtimeAsync = true;
return S_OK;
From cffb194dca55895a81f4ce6b1e117f3b9e8eb256 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Wed, 11 Mar 2026 14:40:31 +0000
Subject: [PATCH 5/7] Remove runtimeAsync field, RuntimeAsync() method, and its
caller guard
Co-authored-by: jkotas <6668460+jkotas@users.noreply.github.com>
---
src/coreclr/vm/eeconfig.cpp | 3 ---
src/coreclr/vm/eeconfig.h | 4 ----
src/coreclr/vm/method.cpp | 7 -------
3 files changed, 14 deletions(-)
diff --git a/src/coreclr/vm/eeconfig.cpp b/src/coreclr/vm/eeconfig.cpp
index 0650cd0748955f..97cc3a04c58df2 100644
--- a/src/coreclr/vm/eeconfig.cpp
+++ b/src/coreclr/vm/eeconfig.cpp
@@ -233,9 +233,6 @@ HRESULT EEConfig::Init()
fGDBJitEmitDebugFrame = false;
#endif
- // Runtime async is always enabled; the UNSUPPORTED_RuntimeAsync config knob has been removed.
- runtimeAsync = true;
-
return S_OK;
}
diff --git a/src/coreclr/vm/eeconfig.h b/src/coreclr/vm/eeconfig.h
index fecb76eb69fb41..d57ea26c4bc1d5 100644
--- a/src/coreclr/vm/eeconfig.h
+++ b/src/coreclr/vm/eeconfig.h
@@ -449,8 +449,6 @@ class EEConfig
#endif
- bool RuntimeAsync() const { LIMITED_METHOD_CONTRACT; return runtimeAsync; }
-
#ifdef FEATURE_INTERPRETER
bool EnableInterpreter() const { LIMITED_METHOD_CONTRACT; return enableInterpreter; }
#endif
@@ -654,8 +652,6 @@ class EEConfig
bool fUseCachedInterfaceDispatch;
#endif // defined(FEATURE_CACHED_INTERFACE_DISPATCH) && defined(FEATURE_VIRTUAL_STUB_DISPATCH)
- bool runtimeAsync; // True if the runtime supports async methods
-
public:
enum BitForMask {
diff --git a/src/coreclr/vm/method.cpp b/src/coreclr/vm/method.cpp
index 32d9a717280647..b11a65865c1bda 100644
--- a/src/coreclr/vm/method.cpp
+++ b/src/coreclr/vm/method.cpp
@@ -2392,13 +2392,6 @@ bool IsTypeDefOrRefImplementedInSystemModule(Module* pModule, mdToken tk)
MethodReturnKind ClassifyMethodReturnKind(SigPointer sig, Module* pModule, ULONG* offsetOfAsyncDetails, bool *isValueTask)
{
- // Without runtime async, every declared method is classified as a NormalMethod.
- // Thus code that handles runtime async scenarios becomes unreachable.
- if (!g_pConfig->RuntimeAsync())
- {
- return MethodReturnKind::NormalMethod;
- }
-
PCCOR_SIGNATURE initialSig = sig.GetPtr();
uint32_t data;
IfFailThrow(sig.GetCallingConvInfo(&data));
From 55a587604283f4df2a9674fcb77cad0c232250fb Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Wed, 11 Mar 2026 18:33:15 +0000
Subject: [PATCH 6/7] Exclude OOB assemblies (IsPackable=true) from
runtime-async enablement
Co-authored-by: agocke <515774+agocke@users.noreply.github.com>
---
src/libraries/Directory.Build.targets | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/src/libraries/Directory.Build.targets b/src/libraries/Directory.Build.targets
index 32287b87d19ac7..87342ee832c6f6 100644
--- a/src/libraries/Directory.Build.targets
+++ b/src/libraries/Directory.Build.targets
@@ -136,8 +136,9 @@
and '$(RuntimeFlavor)' != 'Mono'">true
-
+
From cd8b1e4194ef67d023367f58ab38cfcfd4af7f44 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Wed, 11 Mar 2026 22:56:28 +0000
Subject: [PATCH 7/7] Merge main and resolve conflict in
eng/testing/tests.targets
Co-authored-by: agocke <515774+agocke@users.noreply.github.com>
---
.config/dotnet-tools.json | 2 +-
.github/copilot-instructions.md | 11 +-
THIRD-PARTY-NOTICES.TXT | 33 +
docs/design/coreclr/botr/clr-abi.md | 1 +
docs/design/datacontracts/BuiltInCOM.md | 56 +-
docs/design/datacontracts/Loader.md | 8 +-
eng/Version.Details.props | 6 +-
eng/Version.Details.xml | 12 +-
.../runtime-extra-platforms-ioslike.yml | 2 +-
...ntime-extra-platforms-ioslikesimulator.yml | 2 +-
.../runtime-extra-platforms-maccatalyst.yml | 4 +-
eng/testing/BrowserVersions.props | 2 +-
src/coreclr/interpreter/compiler.cpp | 2 -
src/coreclr/jit/codegen.h | 7 +-
src/coreclr/jit/codegenlinear.cpp | 10 +
src/coreclr/jit/codegenloongarch64.cpp | 96 +--
src/coreclr/jit/codegenwasm.cpp | 25 +-
src/coreclr/jit/compiler.h | 13 +
src/coreclr/jit/compiler.hpp | 61 +-
src/coreclr/jit/emit.cpp | 5 +-
src/coreclr/jit/emitloongarch64.cpp | 31 +-
src/coreclr/jit/gentree.cpp | 38 +-
src/coreclr/jit/hwintrinsic.cpp | 16 +-
src/coreclr/jit/hwintrinsiccodegenarm64.cpp | 73 ++
src/coreclr/jit/hwintrinsiclistarm64sve.h | 20 +-
src/coreclr/jit/ifconversion.cpp | 100 +--
src/coreclr/jit/importer.cpp | 5 +-
src/coreclr/jit/lclmorph.cpp | 46 +-
src/coreclr/jit/lclvars.cpp | 8 +-
src/coreclr/jit/lower.cpp | 4 +-
src/coreclr/jit/lsraarm64.cpp | 5 +
src/coreclr/jit/morph.cpp | 70 +-
src/coreclr/jit/utils.h | 3 +-
.../nativeaot/Runtime/loongarch64/GcProbe.S | 37 +-
.../Runtime/unix/unixasmmacrosloongarch64.inc | 1 +
src/coreclr/scripts/superpmi.py | 3 +-
.../DependencyAnalysis/AssemblyStubNode.cs | 3 +
.../INodeWithTypeSignature.cs | 18 +
.../Compiler/DependencyAnalysis/ObjectNode.cs | 8 +
.../Compiler/DependencyAnalysis/Relocation.cs | 6 +-
.../Target_Wasm/WasmTypes.cs | 1 +
.../{Target_Wasm => }/WasmTypeNode.cs | 5 +-
.../Compiler/ObjectWriter/ObjectWriter.cs | 31 +-
.../Compiler/ObjectWriter/PEObjectWriter.cs | 7 +-
.../Compiler/ObjectWriter/WasmNative.cs | 1 +
.../Compiler/ObjectWriter/WasmObjectWriter.cs | 13 +-
.../tools/Common/JitInterface/WasmLowering.cs | 2 +
.../DependencyAnalysis/NodeFactory.cs | 17 +
.../ILCompiler.Compiler.csproj | 3 +-
.../ReadyToRun/DelayLoadHelperMethodImport.cs | 2 +-
.../ReadyToRun/DelayLoadMethodImport.cs | 2 +-
.../ReadyToRun/MethodWithGCInfo.cs | 2 +-
.../ReadyToRun/Target_Wasm/ImportThunk.cs | 1 +
.../ReadyToRunCodegenNodeFactory.cs | 8 +
.../ILCompiler.ReadyToRun.csproj | 3 +-
.../JitInterface/CorInfoImpl.ReadyToRun.cs | 1 +
.../DependencyAnalysis/MethodCodeNode.cs | 2 +-
src/coreclr/utilcode/util.cpp | 4 +
src/coreclr/vm/appdomain.cpp | 6 +-
src/coreclr/vm/comcache.h | 9 +
.../vm/datadescriptor/datadescriptor.inc | 8 +
src/coreclr/vm/loongarch64/asmhelpers.S | 12 +-
src/coreclr/vm/loongarch64/cgencpu.h | 5 +
src/coreclr/vm/loongarch64/stubs.cpp | 2 +
src/coreclr/vm/prestub.cpp | 5 +
src/coreclr/vm/runtimecallablewrapper.h | 2 +-
.../src/SourceGenerators/DiagnosticInfo.cs | 60 --
.../Cryptography/Asn1Reader/AsnValueReader.cs | 258 -------
.../AsnWriter/AsnWriterExtensions.cs | 38 +
...y.Cryptography.AsnWriter.Shared.projitems} | 4 +-
.../Security/Cryptography/RSAOpenSsl.cs | 47 +-
.../CompositeMLDsaTestHelpers.cs | 4 +-
.../src/Microsoft.Bcl.Cryptography.csproj | 2 +-
.../Microsoft.Bcl.Cryptography.Tests.csproj | 2 +-
.../ConfigurationBindingGenerator.Parser.cs | 10 +-
.../gen/ConfigurationBindingGenerator.cs | 45 +-
...nfiguration.Binder.SourceGeneration.csproj | 1 -
.../SourceGenerationTests/GeneratorTests.cs | 125 ++++
.../gen/LoggerMessageGenerator.Parser.cs | 10 +-
.../gen/LoggerMessageGenerator.Roslyn4.0.cs | 127 ++--
...soft.Extensions.Logging.Generators.targets | 1 -
.../LoggerMessageGeneratorParserTests.cs | 31 +
.../Net/NetworkInformation/Ping.Windows.cs | 12 +-
.../tests/FunctionalTests/PingTest.cs | 56 ++
.../src/System.Net.Security.csproj | 2 -
...egotiateAuthenticationPal.ManagedSpnego.cs | 12 +-
.../Arm/Sve.PlatformNotSupported.cs | 124 ----
.../src/System/Runtime/Intrinsics/Arm/Sve.cs | 124 ----
.../Arm/Sve2.PlatformNotSupported.cs | 578 +++++++++++++++
.../src/System/Runtime/Intrinsics/Arm/Sve2.cs | 578 +++++++++++++++
.../ComClassGeneratorDiagnosticsAnalyzer.cs | 106 +++
.../ComClassGenerator.cs | 26 +-
.../gen/ComInterfaceGenerator/ComClassInfo.cs | 36 +-
.../DiagnosticOr.cs | 26 -
...eneratorInitializationContextExtensions.cs | 20 -
.../ComClassGeneratorDiagnostics.cs | 110 ++-
.../ComClassGeneratorOutputShape.cs | 10 +-
.../ref/System.Runtime.Intrinsics.cs | 109 ++-
.../Directory/Delete_MountVolume.cs | 28 +-
.../System.Security.Cryptography.Pkcs.csproj | 2 +-
.../src/System.Security.Cryptography.csproj | 2 +-
.../System.Security.Cryptography.Tests.csproj | 2 +-
.../gen/JsonSourceGenerator.Parser.cs | 4 +-
.../gen/JsonSourceGenerator.Roslyn3.11.cs | 4 +-
.../gen/JsonSourceGenerator.Roslyn4.0.cs | 48 +-
.../System.Text.Json.SourceGeneration.targets | 1 -
.../JsonSourceGeneratorDiagnosticsTests.cs | 29 +
.../JsonSourceGeneratorIncrementalTests.cs | 1 -
.../gen/RegexGenerator.Parser.cs | 40 +-
.../gen/RegexGenerator.cs | 132 ++--
.../RegexGeneratorHelper.netcoreapp.cs | 2 +-
.../RegexGeneratorParserTests.cs | 22 +
...iCompatBaseline.NetCoreAppLatestStable.xml | 96 +++
.../App/Layout/NavMenu.razor | 2 +-
.../Contracts/IBuiltInCOM.cs | 2 +
.../DataType.cs | 1 +
.../Constants.cs | 1 +
.../Contracts/BuiltInCOM_1.cs | 19 +
.../Contracts/Loader_1.cs | 3 +-
.../Data/InterfaceEntry.cs | 19 +
.../Data/RCW.cs | 14 +
.../ClrDataAppDomain.cs | 130 +++-
.../ClrDataFrame.cs | 92 ++-
.../ClrDataTask.cs | 2 +-
.../ISOSDacInterface.cs | 16 +-
.../IXCLRData.cs | 4 +-
.../SOSDacImpl.cs | 91 ++-
.../managed/cdac/tests/BuiltInCOMTests.cs | 211 ++++++
.../Debuggees/RCWInterfaces/Program.cs | 119 +++
.../RCWInterfaces/RCWInterfaces.csproj | 7 +
.../DumpTests/Debuggees/StackWalk/Program.cs | 18 +-
.../cdac/tests/DumpTests/DumpTestBase.cs | 16 +-
.../DumpTests/IXCLRDataAppDomainDumpTests.cs | 218 ++++++
.../DumpTests/IXCLRDataFrameDumpTests.cs | 308 ++++++++
.../tests/DumpTests/RCWInterfacesDumpTests.cs | 75 ++
.../GenerateHWIntrinsicTests/Arm/Sve2Tests.cs | 101 +++
.../GenerateHWIntrinsicTests/Arm/SveTests.cs | 18 -
.../Github/Runtime_76219/Runtime_76219.csproj | 3 +
.../JIT/Methodical/Boxing/morph/sin3double.il | 6 -
.../flowgraph/bug619534/moduleHandleCache.cs | 2 -
src/tests/JIT/Methodical/switch/switch6.il | 7 -
.../JIT/opt/InstructionCombining/Casts.cs | 694 ++++++++++++++++++
.../JIT/opt/InstructionCombining/Casts.csproj | 17 +
src/tests/nativeaot/Directory.Build.props | 6 -
src/tests/nativeaot/Directory.Build.targets | 13 -
.../MobileSmokeTest/MobileSmokeTest.cs | 28 +
.../MobileSmokeTest/MobileSmokeTest.csproj | 14 +
.../nativeaot/StartupHook/StartupHook.csproj | 2 +-
src/tests/nativeaot/nativeaot.csproj | 7 +
.../TestFrameworkTests.g.cs | 12 +
.../Assertions/KeptAttributeAttribute.cs | 13 +
.../Reflection/TypeMap.cs | 25 +-
.../VerifyKeptAttributeAttributeWorks.cs | 310 ++++++++
.../TestFramework/VerifyLocalsAreChanged.cs | 44 ++
.../TestFramework/VerifyLocalsAreChanged.xml | 11 +
.../TestCasesRunner/AssemblyChecker.cs | 170 ++++-
156 files changed, 5630 insertions(+), 1355 deletions(-)
create mode 100644 src/coreclr/tools/Common/Compiler/DependencyAnalysis/INodeWithTypeSignature.cs
rename src/coreclr/tools/Common/Compiler/DependencyAnalysis/{Target_Wasm => }/WasmTypeNode.cs (96%)
delete mode 100644 src/libraries/Common/src/SourceGenerators/DiagnosticInfo.cs
delete mode 100644 src/libraries/Common/src/System/Security/Cryptography/Asn1Reader/AsnValueReader.cs
create mode 100644 src/libraries/Common/src/System/Security/Cryptography/AsnWriter/AsnWriterExtensions.cs
rename src/libraries/Common/src/System/Security/Cryptography/{Asn1Reader/System.Security.Cryptography.Asn1Reader.Shared.projitems => AsnWriter/System.Security.Cryptography.AsnWriter.Shared.projitems} (66%)
create mode 100644 src/libraries/System.Runtime.InteropServices/gen/ComInterfaceGenerator/Analyzers/ComClassGeneratorDiagnosticsAnalyzer.cs
create mode 100644 src/native/managed/cdac/Microsoft.Diagnostics.DataContractReader.Contracts/Data/InterfaceEntry.cs
create mode 100644 src/native/managed/cdac/tests/DumpTests/Debuggees/RCWInterfaces/Program.cs
create mode 100644 src/native/managed/cdac/tests/DumpTests/Debuggees/RCWInterfaces/RCWInterfaces.csproj
create mode 100644 src/native/managed/cdac/tests/DumpTests/IXCLRDataAppDomainDumpTests.cs
create mode 100644 src/native/managed/cdac/tests/DumpTests/IXCLRDataFrameDumpTests.cs
create mode 100644 src/native/managed/cdac/tests/DumpTests/RCWInterfacesDumpTests.cs
create mode 100644 src/tests/JIT/opt/InstructionCombining/Casts.cs
create mode 100644 src/tests/JIT/opt/InstructionCombining/Casts.csproj
delete mode 100644 src/tests/nativeaot/Directory.Build.targets
create mode 100644 src/tests/nativeaot/MobileSmokeTest/MobileSmokeTest.cs
create mode 100644 src/tests/nativeaot/MobileSmokeTest/MobileSmokeTest.csproj
create mode 100644 src/tests/nativeaot/nativeaot.csproj
create mode 100644 src/tools/illink/test/Mono.Linker.Tests.Cases/TestFramework/VerifyKeptAttributeAttributeWorks.cs
create mode 100644 src/tools/illink/test/Mono.Linker.Tests.Cases/TestFramework/VerifyLocalsAreChanged.cs
create mode 100644 src/tools/illink/test/Mono.Linker.Tests.Cases/TestFramework/VerifyLocalsAreChanged.xml
diff --git a/.config/dotnet-tools.json b/.config/dotnet-tools.json
index 380e3141ecebc9..6b46169cb36d04 100644
--- a/.config/dotnet-tools.json
+++ b/.config/dotnet-tools.json
@@ -15,7 +15,7 @@
]
},
"microsoft.dotnet.xharness.cli": {
- "version": "11.0.0-prerelease.26064.3",
+ "version": "11.0.0-prerelease.26160.2",
"commands": [
"xharness"
]
diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md
index f405371e14de23..d3c1221e713383 100644
--- a/.github/copilot-instructions.md
+++ b/.github/copilot-instructions.md
@@ -8,7 +8,9 @@ You MUST make your best effort to ensure any code changes satisfy those criteria
If you make code changes, do not complete without checking the relevant code builds and relevant tests still pass after the last edits you make. Do not simply assume that your changes fix test failures you see, actually build and run those tests again to confirm.
-Before completing, use the `code-review` skill to review your code changes. Any issues flagged as errors or warnings should be addressed before completing.
+When running under CCA and before completing, use the `code-review` skill to review your code changes. Any issues flagged as errors or warnings should be addressed before the task is considered complete.
+
+When NOT running under CCA, skip the `code-review` skill if the user has stated they will review the changes themselves.
Before making changes to a directory, search for `README.md` files in that directory and its parent directories up to the repository root. Read any you find — they contain conventions, patterns, and architectural context relevant to your work.
@@ -38,6 +40,13 @@ In addition to the rules enforced by `.editorconfig`, you SHOULD:
- For markdown (`.md`) files, ensure there is no trailing whitespace at the end of any line.
- When adding XML documentation to APIs, follow the guidelines at [`docs.prompt.md`](/.github/prompts/docs.prompt.md).
+When NOT running under CCA, guidance for creating commits and pushing changes:
+
+- Never squash and force push unless explicitly instructed. Always push incremental commits on top of previous PR changes.
+- Never push to an active PR without being explicitly asked, even in autopilot/yolo mode. Always wait for explicit instruction to push.
+- Never chain commit and push in the same command. Always commit first, report what was committed, then wait for an explicit push instruction. This creates a mandatory decision point.
+- Prefer creating a new commit rather than amending an existing one. Exceptions: (1) explicitly asked to amend, or (2) the existing commit is obviously broken with something minor (e.g., typo or comment fix) and hasn't been pushed yet.
+
---
# Building & Testing in dotnet/runtime
diff --git a/THIRD-PARTY-NOTICES.TXT b/THIRD-PARTY-NOTICES.TXT
index 033db7e5e46e5d..7f020cf8e67da5 100644
--- a/THIRD-PARTY-NOTICES.TXT
+++ b/THIRD-PARTY-NOTICES.TXT
@@ -1424,3 +1424,36 @@ NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF
You are solely responsible for determining the appropriateness of using and distributing the software and you assume all risks associated with its use, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and the unavailability or interruption of operation. This software is not intended to be used in any situation where a failure could cause risk of injury or damage to property. The software developed by NIST employees is not subject to copyright protection within the United States.
+License notice for ANTLR 4
+-------------------------------
+
+https://github.com/antlr/antlr4
+
+Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither name of copyright holders nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/docs/design/coreclr/botr/clr-abi.md b/docs/design/coreclr/botr/clr-abi.md
index 889a4c9cde656b..3f1cf8dd3fa090 100644
--- a/docs/design/coreclr/botr/clr-abi.md
+++ b/docs/design/coreclr/botr/clr-abi.md
@@ -116,6 +116,7 @@ To return `Continuation` we use a volatile/calee-trash register that cannot be u
| arm | r2 |
| arm64 | x2 |
| risc-v | a2 |
+| loongarch64 | a2 |
### Passing `Continuation` argument
The `Continuation` parameter is passed at the same position as generic instantiation parameter or immediately after, if both present. For x86 the argument order is reversed.
diff --git a/docs/design/datacontracts/BuiltInCOM.md b/docs/design/datacontracts/BuiltInCOM.md
index 4e7cae7200443b..c9e28915501d52 100644
--- a/docs/design/datacontracts/BuiltInCOM.md
+++ b/docs/design/datacontracts/BuiltInCOM.md
@@ -5,6 +5,20 @@ This contract is for getting information related to built-in COM.
## APIs of contract
``` csharp
+public struct COMInterfacePointerData
+{
+ // Address of the slot in ComCallWrapper that holds the COM interface pointer.
+ public TargetPointer InterfacePointerAddress;
+ // MethodTable for this interface, or TargetPointer.Null for slot 0 (IUnknown/IDispatch).
+ public TargetPointer MethodTable;
+}
+
+public record struct RCWCleanupInfo(
+ TargetPointer RCW,
+ TargetPointer Context,
+ TargetPointer STAThread,
+ bool IsFreeThreaded);
+
public ulong GetRefCount(TargetPointer ccw);
// Check whether the COM wrappers handle is weak.
public bool IsHandleWeak(TargetPointer ccw);
@@ -14,20 +28,13 @@ public TargetPointer GetCCWFromInterfacePointer(TargetPointer interfacePointer);
// Enumerate the COM interfaces exposed by the ComCallWrapper chain.
// ccw may be any ComCallWrapper in the chain; the implementation navigates to the start.
public IEnumerable GetCCWInterfaces(TargetPointer ccw);
-```
-
-where `COMInterfacePointerData` is:
-``` csharp
-public struct COMInterfacePointerData
-{
- // Address of the slot in ComCallWrapper that holds the COM interface pointer.
- public TargetPointer InterfacePointerAddress;
- // MethodTable for this interface, or TargetPointer.Null for slot 0 (IUnknown/IDispatch).
- public TargetPointer MethodTable;
-}
// Enumerate entries in the RCW cleanup list.
// If cleanupListPtr is Null, the global g_pRCWCleanupList is used.
public IEnumerable GetRCWCleanupList(TargetPointer cleanupListPtr);
+// Enumerate the interface entries cached in an RCW.
+public IEnumerable<(TargetPointer MethodTable, TargetPointer Unknown)> GetRCWInterfaces(TargetPointer rcw);
+// Get the COM context cookie for an RCW.
+public TargetPointer GetRCWContext(TargetPointer rcw);
```
## Version 1
@@ -51,6 +58,9 @@ Data descriptors used:
| `RCW` | `CtxCookie` | COM context cookie for the RCW |
| `RCW` | `CtxEntry` | Pointer to `CtxEntry` (bit 0 is a synchronization flag; must be masked off before use) |
| `CtxEntry` | `STAThread` | STA thread pointer for the context entry |
+| `RCW` | `InterfaceEntries` | Offset of the inline interface entry cache array within the RCW struct |
+| `InterfaceEntry` | `MethodTable` | MethodTable pointer for the cached COM interface |
+| `InterfaceEntry` | `Unknown` | `IUnknown*` pointer for the cached COM interface |
Global variables used:
| Global Name | Type | Purpose |
@@ -62,6 +72,7 @@ Global variables used:
| `TearOffAddRefSimple` | pointer | Address of `Unknown_AddRefSpecial`; identifies `SimpleComCallWrapper` interface pointers |
| `TearOffAddRefSimpleInner` | pointer | Address of `Unknown_AddRefInner`; identifies inner `SimpleComCallWrapper` interface pointers |
| `RCWCleanupList` | `pointer` | Pointer to the global `g_pRCWCleanupList` instance |
+| `RCWInterfaceCacheSize` | `uint32` | Number of entries in the inline interface entry cache (`INTERFACE_ENTRY_CACHE_SIZE`) |
### Contract Constants:
| Name | Type | Purpose | Value |
@@ -156,5 +167,28 @@ public IEnumerable GetRCWCleanupList(TargetPointer cleanupListPt
bucketPtr = _target.ReadPointer(bucketPtr + /* RCW::NextCleanupBucket offset */);
}
}
+
+public IEnumerable<(TargetPointer MethodTable, TargetPointer Unknown)> GetRCWInterfaces(TargetPointer rcw)
+{
+ // InterfaceEntries is an inline array — the offset gives the address of the first element.
+ TargetPointer interfaceEntriesAddr = rcw + /* RCW::InterfaceEntries offset */;
+ uint cacheSize = _target.ReadGlobal("RCWInterfaceCacheSize");
+ uint entrySize = /* size of InterfaceEntry */;
+
+ for (uint i = 0; i < cacheSize; i++)
+ {
+ TargetPointer entryAddress = interfaceEntriesAddr + i * entrySize;
+ TargetPointer methodTable = _target.ReadPointer(entryAddress + /* InterfaceEntry::MethodTable offset */);
+ TargetPointer unknown = _target.ReadPointer(entryAddress + /* InterfaceEntry::Unknown offset */);
+ // An entry is free if Unknown == null (matches InterfaceEntry::IsFree())
+ if (unknown != TargetPointer.Null)
+ yield return (methodTable, unknown);
+ }
+}
+
+public TargetPointer GetRCWContext(TargetPointer rcw)
+{
+ return _target.ReadPointer(rcw + /* RCW::CtxCookie offset */);
+}
```
diff --git a/docs/design/datacontracts/Loader.md b/docs/design/datacontracts/Loader.md
index 0be000bb750063..95f1652cf1b805 100644
--- a/docs/design/datacontracts/Loader.md
+++ b/docs/design/datacontracts/Loader.md
@@ -182,6 +182,7 @@ IReadOnlyDictionary GetLoaderAllocatorHeaps(TargetPointer
| Name | Type | Purpose | Value |
| --- | --- | --- | --- |
| `ASSEMBLY_NOTIFYFLAGS_PROFILER_NOTIFIED` | uint | Flag in Assembly NotifyFlags indicating the Assembly will notify profilers. | `0x1` |
+| `DefaultDomainFriendlyName` | string | Friendly name returned when `AppDomain.FriendlyName` is null (matches native `DEFAULT_DOMAIN_FRIENDLY_NAME`) | `"DefaultDomain"` |
Contracts used:
| Contract Name |
@@ -327,8 +328,11 @@ string ILoader.GetAppDomainFriendlyName()
{
TargetPointer appDomainPointer = target.ReadGlobalPointer("AppDomain");
TargetPointer appDomain = target.ReadPointer(appDomainPointer)
- TargetPointer pathStart = appDomain + /* AppDomain::FriendlyName offset */;
- char[] name = // Read from target starting at pathStart until null terminator
+ TargetPointer namePtr = appDomain + /* AppDomain::FriendlyName offset */;
+ // Match native AppDomain::GetFriendlyName(): return "DefaultDomain" when pointer is null.
+ if (namePtr == TargetPointer.Null)
+ return "DefaultDomain";
+ char[] name = // Read from target starting at namePtr until null terminator
return new string(name);
}
diff --git a/eng/Version.Details.props b/eng/Version.Details.props
index b4f5bc132e70e3..a2269cd6d6c096 100644
--- a/eng/Version.Details.props
+++ b/eng/Version.Details.props
@@ -113,9 +113,9 @@ This file should be imported by eng/Versions.props
11.0.0-beta.26159.1
11.0.0-beta.26159.1
- 11.0.0-prerelease.26064.3
- 11.0.0-prerelease.26064.3
- 11.0.0-prerelease.26064.3
+ 11.0.0-prerelease.26160.2
+ 11.0.0-prerelease.26160.2
+ 11.0.0-prerelease.26160.2
diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml
index d87a8ed73c30a3..e8afb73f0d2a56 100644
--- a/eng/Version.Details.xml
+++ b/eng/Version.Details.xml
@@ -287,17 +287,17 @@
https://github.com/dotnet/dotnet
5507d7a2f05bb6c073a055ead6ce1c4bbe396cda
-
+
https://github.com/dotnet/xharness
- 31e0b8e08f57890f7b7004b93361d69cd4b21079
+ c32a7777a0f8f7a4fc8d9920d445f5f4b5658d38
-
+
https://github.com/dotnet/xharness
- 31e0b8e08f57890f7b7004b93361d69cd4b21079
+ c32a7777a0f8f7a4fc8d9920d445f5f4b5658d38
-
+
https://github.com/dotnet/xharness
- 31e0b8e08f57890f7b7004b93361d69cd4b21079
+ c32a7777a0f8f7a4fc8d9920d445f5f4b5658d38
https://github.com/dotnet/dotnet
diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslike.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslike.yml
index da7fff53452b34..d5a9d77ac0b842 100644
--- a/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslike.yml
+++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslike.yml
@@ -163,7 +163,7 @@ jobs:
- template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml
parameters:
creator: dotnet-bot
- testBuildArgs: tree nativeaot/SmokeTests /p:BuildNativeAOTRuntimePack=true
+ testBuildArgs: tree nativeaot/MobileSmokeTest /p:BuildNativeAOTRuntimePack=true
testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig)
buildAllTestsAsStandalone: true
diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslikesimulator.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslikesimulator.yml
index 9a5d8939d1f06a..f5044b5d50630f 100644
--- a/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslikesimulator.yml
+++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslikesimulator.yml
@@ -130,7 +130,7 @@ jobs:
- template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml
parameters:
creator: dotnet-bot
- testBuildArgs: tree nativeaot/SmokeTests /p:BuildNativeAOTRuntimePack=true
+ testBuildArgs: tree nativeaot/MobileSmokeTest /p:BuildNativeAOTRuntimePack=true
testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig)
buildAllTestsAsStandalone: true
diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-maccatalyst.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-maccatalyst.yml
index cd2bba5421b151..92cba45048e616 100644
--- a/eng/pipelines/extra-platforms/runtime-extra-platforms-maccatalyst.yml
+++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-maccatalyst.yml
@@ -162,7 +162,7 @@ jobs:
parameters:
creator: dotnet-bot
buildAllTestsAsStandalone: true
- testBuildArgs: tree nativeaot/SmokeTests /p:BuildNativeAOTRuntimePack=true
+ testBuildArgs: tree nativeaot/MobileSmokeTest /p:BuildNativeAOTRuntimePack=true
testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig)
#
@@ -201,7 +201,7 @@ jobs:
parameters:
creator: dotnet-bot
buildAllTestsAsStandalone: true
- testBuildArgs: tree nativeaot/SmokeTests /p:BuildNativeAOTRuntimePack=true /p:DevTeamProvisioning=adhoc /p:EnableAppSandbox=true
+ testBuildArgs: tree nativeaot/MobileSmokeTest /p:BuildNativeAOTRuntimePack=true /p:DevTeamProvisioning=adhoc /p:EnableAppSandbox=true
testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig)
#
diff --git a/eng/testing/BrowserVersions.props b/eng/testing/BrowserVersions.props
index dd5f54e162bf27..0725d73db6ce2b 100644
--- a/eng/testing/BrowserVersions.props
+++ b/eng/testing/BrowserVersions.props
@@ -8,7 +8,7 @@
1536371
https://storage.googleapis.com/chromium-browser-snapshots/Mac_Arm/1536376
14.3.127
- 146.0.7680.31
+ 146.0.7680.66
1582197
https://storage.googleapis.com/chromium-browser-snapshots/Win_x64/1582218
14.6.202
diff --git a/src/coreclr/interpreter/compiler.cpp b/src/coreclr/interpreter/compiler.cpp
index aa7282f0cbb3d4..10c649fa958f13 100644
--- a/src/coreclr/interpreter/compiler.cpp
+++ b/src/coreclr/interpreter/compiler.cpp
@@ -3096,7 +3096,6 @@ void InterpCompiler::EmitBinaryArithmeticOp(int32_t opBase)
}
else
{
-#if TARGET_64BIT
if (type1 == StackTypeI8 && type2 == StackTypeI4)
{
EmitConv(m_pStackPointer - 1, StackTypeI8, InterpOpForWideningArgForImplicitUpcast((InterpOpcode)opBase));
@@ -3107,7 +3106,6 @@ void InterpCompiler::EmitBinaryArithmeticOp(int32_t opBase)
EmitConv(m_pStackPointer - 2, StackTypeI8, InterpOpForWideningArgForImplicitUpcast((InterpOpcode)opBase));
type1 = StackTypeI8;
}
-#endif
if (type1 == StackTypeR8 && type2 == StackTypeR4)
{
EmitConv(m_pStackPointer - 1, StackTypeR8, INTOP_CONV_R8_R4);
diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h
index 8472dfe8c0233d..6b2ca6af8484d4 100644
--- a/src/coreclr/jit/codegen.h
+++ b/src/coreclr/jit/codegen.h
@@ -332,8 +332,11 @@ class CodeGen final : public CodeGenInterface
// Prolog functions and data (there are a few exceptions for more generally used things)
//
- void genEstablishFramePointer(int delta, bool reportUnwindData);
- void genHomeRegisterParams(regNumber initReg, bool* initRegStillZeroed);
+ void genEstablishFramePointer(int delta, bool reportUnwindData);
+ void genHomeRegisterParams(regNumber initReg, bool* initRegStillZeroed);
+#ifdef TARGET_WASM
+ void genHomeRegisterParamsOutsideProlog();
+#endif
regMaskTP genGetParameterHomingTempRegisterCandidates();
var_types genParamStackType(LclVarDsc* dsc, const ABIPassingSegment& seg);
diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp
index 7823547e9edbe2..2be7f9c272edad 100644
--- a/src/coreclr/jit/codegenlinear.cpp
+++ b/src/coreclr/jit/codegenlinear.cpp
@@ -410,6 +410,16 @@ void CodeGen::genCodeForBBlist()
}
#endif
+#ifdef TARGET_WASM
+ // genHomeRegisterParams can generate arbitrary amounts of code on Wasm, so
+ // we have moved it out of the prolog to the first basic block in order to
+ // work around the restriction that the prolog can only be one insGroup.
+ if (block->IsFirst())
+ {
+ genHomeRegisterParamsOutsideProlog();
+ }
+#endif
+
#ifndef TARGET_WASM // TODO-WASM: enable genPoisonFrame
// Emit poisoning into the init BB that comes right after prolog.
// We cannot emit this code in the prolog as it might make the prolog too large.
diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp
index feb4d28396a082..d64235083b03e9 100644
--- a/src/coreclr/jit/codegenloongarch64.cpp
+++ b/src/coreclr/jit/codegenloongarch64.cpp
@@ -557,14 +557,6 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
delta_PSP -= TARGET_POINTER_SIZE;
}
- if ((m_compiler->lvaAsyncExecutionContextVar != BAD_VAR_NUM) && !m_compiler->opts.IsOSR())
- {
- delta_PSP -= TARGET_POINTER_SIZE;
- }
- if ((m_compiler->lvaAsyncSynchronizationContextVar != BAD_VAR_NUM) && !m_compiler->opts.IsOSR())
- {
- delta_PSP -= TARGET_POINTER_SIZE;
- }
funcletFrameSize = funcletFrameSize - delta_PSP;
funcletFrameSize = roundUp((unsigned)funcletFrameSize, STACK_ALIGN);
@@ -2279,7 +2271,7 @@ void CodeGen::genJumpTable(GenTree* treeNode)
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
- GetEmitter()->emitIns_R_C(INS_bl, emitActualTypeSize(TYP_I_IMPL), treeNode->GetRegNum(), REG_NA,
+ GetEmitter()->emitIns_R_C(INS_bl, EA_PTRSIZE, treeNode->GetRegNum(), REG_NA,
m_compiler->eeFindJitDataOffs(jmpTabBase), 0);
genProduceReg(treeNode);
}
@@ -2292,7 +2284,16 @@ void CodeGen::genJumpTable(GenTree* treeNode)
//
void CodeGen::genAsyncResumeInfo(GenTreeVal* treeNode)
{
- GetEmitter()->emitIns_R_C(INS_bl, emitActualTypeSize(TYP_I_IMPL), treeNode->GetRegNum(), REG_NA,
+ // INS_bl/b are placeholders that is not the final instruction.
+ instruction ins = INS_bl;
+ emitAttr attr = EA_PTRSIZE;
+ if (m_compiler->eeDataWithCodePointersNeedsRelocs())
+ {
+ ins = INS_b;
+ attr = EA_SET_FLG(EA_PTRSIZE, EA_CNS_RELOC_FLG);
+ }
+
+ GetEmitter()->emitIns_R_C(ins, attr, treeNode->GetRegNum(), REG_NA,
genEmitAsyncResumeInfo((unsigned)treeNode->gtVal1), 0);
genProduceReg(treeNode);
}
@@ -3341,8 +3342,17 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree)
}
else
{
- emit->emitIns_I_la(EA_PTRSIZE, REG_RA, imm + 1);
- emit->emitIns_R_R_R(IsUnsigned ? INS_sltu : INS_slt, EA_PTRSIZE, targetReg, regOp1, REG_RA);
+ assert(!(!IsUnsigned && (imm == INT64_MAX)));
+ if (IsUnsigned && (imm == ~0))
+ {
+ // unsigned (a <= ~0) is always true.
+ emit->emitIns_R_R_I(INS_addi_d, EA_PTRSIZE, targetReg, REG_R0, 1);
+ }
+ else
+ {
+ emit->emitIns_I_la(EA_PTRSIZE, REG_RA, imm + 1);
+ emit->emitIns_R_R_R(IsUnsigned ? INS_sltu : INS_slt, EA_PTRSIZE, targetReg, regOp1, REG_RA);
+ }
}
}
else if (tree->OperIs(GT_GT))
@@ -3711,14 +3721,6 @@ int CodeGenInterface::genSPtoFPdelta() const
{
delta -= TARGET_POINTER_SIZE;
}
- if ((m_compiler->lvaAsyncExecutionContextVar != BAD_VAR_NUM) && !m_compiler->opts.IsOSR())
- {
- delta -= TARGET_POINTER_SIZE;
- }
- if ((m_compiler->lvaAsyncSynchronizationContextVar != BAD_VAR_NUM) && !m_compiler->opts.IsOSR())
- {
- delta -= TARGET_POINTER_SIZE;
- }
assert(delta >= 0);
return delta;
@@ -6089,41 +6091,15 @@ void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize,
// Now we can actually use those slot ID's to declare live ranges.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
+#ifdef FEATURE_REMAP_FUNCTION
if (m_compiler->opts.compDbgEnC)
{
- // what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
- // which is:
- // -return address
- // -saved off RBP
- // -saved 'this' pointer and bool for synchronized methods
-
- // 4 slots for RBP + return address + RSI + RDI
- int preservedAreaSize = 4 * REGSIZE_BYTES;
-
- if (m_compiler->info.compFlags & CORINFO_FLG_SYNCH)
- {
- if (!(m_compiler->info.compFlags & CORINFO_FLG_STATIC))
- {
- preservedAreaSize += REGSIZE_BYTES;
- }
-
- preservedAreaSize += 1; // bool for synchronized methods
- }
-
- if (m_compiler->lvaAsyncExecutionContextVar != BAD_VAR_NUM)
- {
- preservedAreaSize += TARGET_POINTER_SIZE;
- }
-
- if (m_compiler->lvaAsyncSynchronizationContextVar != BAD_VAR_NUM)
- {
- preservedAreaSize += TARGET_POINTER_SIZE;
- }
-
- // Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
- // frame
- gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
+ // TODO: lvaMonAcquired, lvaAsyncExecutionContextVar and lvaAsyncExecutionContextVar locals are special
+ // that is necessary to allocate in the top of the stack frame and included as part of the EnC frame header
+ // for EnC to work.
+ NYI_LOONGARCH64("compDbgEnc in CodeGen::genCreateAndStoreGCInfo() ---unimplemented/unused on LA64 yet---");
}
+#endif // FEATURE_REMAP_FUNCTION
if (m_compiler->opts.IsReversePInvoke())
{
@@ -6765,14 +6741,6 @@ void CodeGen::genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroe
{
localFrameSize -= TARGET_POINTER_SIZE;
}
- if ((m_compiler->lvaAsyncExecutionContextVar != BAD_VAR_NUM) && !m_compiler->opts.IsOSR())
- {
- localFrameSize -= TARGET_POINTER_SIZE;
- }
- if ((m_compiler->lvaAsyncSynchronizationContextVar != BAD_VAR_NUM) && !m_compiler->opts.IsOSR())
- {
- localFrameSize -= TARGET_POINTER_SIZE;
- }
#ifdef DEBUG
if (m_compiler->opts.disAsm)
@@ -6839,14 +6807,6 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
{
localFrameSize -= TARGET_POINTER_SIZE;
}
- if ((m_compiler->lvaAsyncExecutionContextVar != BAD_VAR_NUM) && !m_compiler->opts.IsOSR())
- {
- localFrameSize -= TARGET_POINTER_SIZE;
- }
- if ((m_compiler->lvaAsyncSynchronizationContextVar != BAD_VAR_NUM) && !m_compiler->opts.IsOSR())
- {
- localFrameSize -= TARGET_POINTER_SIZE;
- }
JITDUMP("Frame type. #outsz=%d; #framesz=%d; #calleeSaveRegsPushed:%d; "
"localloc? %s\n",
diff --git a/src/coreclr/jit/codegenwasm.cpp b/src/coreclr/jit/codegenwasm.cpp
index 4dc8af8418ac9b..32f5ef4cdb950b 100644
--- a/src/coreclr/jit/codegenwasm.cpp
+++ b/src/coreclr/jit/codegenwasm.cpp
@@ -121,6 +121,22 @@ void CodeGen::genEnregisterOSRArgsAndLocals()
//------------------------------------------------------------------------
// genHomeRegisterParams: place register arguments into their RA-assigned locations.
//
+// We can't actually do this task here because the prolog will overflow. Instead, we
+// do this later on and inject all the relevant code into the first basic block.
+// See genHomeRegisterParamsOutsideProlog, below.
+//
+// Arguments:
+// initReg - Unused
+// initRegStillZeroed - Unused
+//
+void CodeGen::genHomeRegisterParams(regNumber initReg, bool* initRegStillZeroed)
+{
+ // Intentionally empty
+}
+
+//------------------------------------------------------------------------
+// genHomeRegisterParamsOutsideProlog: place register arguments into their RA-assigned locations.
+//
// For the WASM RA, we have a much simplified (compared to LSRA) contract of:
// - If an argument is live on entry in a set of registers, then the RA will
// assign those registers to that argument on entry.
@@ -129,14 +145,9 @@ void CodeGen::genEnregisterOSRArgsAndLocals()
// The main motivation for this (along with the obvious CQ implications) is
// obviating the need to adapt the general "RegGraph"-based algorithm to
// !HAS_FIXED_REGISTER_SET constraints (no reg masks).
-//
-// Arguments:
-// initReg - Unused
-// initRegStillZeroed - Unused
-//
-void CodeGen::genHomeRegisterParams(regNumber initReg, bool* initRegStillZeroed)
+void CodeGen::genHomeRegisterParamsOutsideProlog()
{
- JITDUMP("*************** In genHomeRegisterParams()\n");
+ JITDUMP("*************** In genHomeRegisterParamsOutsideProlog()\n");
auto spillParam = [this](unsigned lclNum, unsigned offset, unsigned paramLclNum, const ABIPassingSegment& segment) {
assert(segment.IsPassedInRegister());
diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h
index d9e256c164e345..cedc714d270ec9 100644
--- a/src/coreclr/jit/compiler.h
+++ b/src/coreclr/jit/compiler.h
@@ -4210,6 +4210,17 @@ class Compiler
unsigned lvaLclExactSize(unsigned varNum);
ValueSize lvaLclValueSize(unsigned varNum);
+ //-----------------------------------------------------------------------------
+ // lvaIsUnknownSizeLocal: Does the local have an unknown size at compile-time?
+ //
+ // Returns:
+ // True if the local does not have an exact size, else false.
+ //
+ bool lvaIsUnknownSizeLocal(unsigned varNum)
+ {
+ return !lvaLclValueSize(varNum).IsExact();
+ }
+
bool lvaHaveManyLocals(float percent = 1.0f) const;
unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason));
@@ -6878,6 +6889,8 @@ class Compiler
public:
bool fgIsBigOffset(size_t offset);
bool IsValidLclAddr(unsigned lclNum, unsigned offset);
+ bool IsEntireAccess(unsigned lclNum, unsigned offset, ValueSize accessSize);
+ bool IsWideAccess(unsigned lclNum, unsigned offset, ValueSize accessSize);
bool IsPotentialGCSafePoint(GenTree* tree) const;
private:
diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp
index 2a618c5c2f1232..4484a8ae95075c 100644
--- a/src/coreclr/jit/compiler.hpp
+++ b/src/coreclr/jit/compiler.hpp
@@ -3324,7 +3324,7 @@ inline bool Compiler::fgIsBigOffset(size_t offset)
}
//------------------------------------------------------------------------
-// IsValidLclAddr: Can the given local address be represented as "LCL_FLD_ADDR"?
+// IsValidLclAddr: Can the given local address be represented as "LCL_ADDR"?
//
// Local address nodes cannot point beyond the local and can only store
// 16 bits worth of offset.
@@ -3334,19 +3334,72 @@ inline bool Compiler::fgIsBigOffset(size_t offset)
// offset - The address' offset
//
// Return Value:
-// Whether "LCL_FLD_ADDR [+offset]" would be valid IR.
+// Whether "LCL_ADDR [+offset]" would be valid IR.
//
inline bool Compiler::IsValidLclAddr(unsigned lclNum, unsigned offset)
{
#ifdef TARGET_ARM64
- if (varTypeHasUnknownSize(lvaGetDesc(lclNum)))
+ if (lvaIsUnknownSizeLocal(lclNum))
{
- return false;
+ return (offset == 0);
}
#endif
return (offset < UINT16_MAX) && (offset < lvaLclExactSize(lclNum));
}
+//------------------------------------------------------------------------
+// IsEntireAccess: Is the access to a local entire?
+//
+// The access is entire when the size of the access is equivalent to the size
+// of the local, and the access address is equivalent to the local address
+// (offset == 0).
+//
+// Arguments:
+// lclNum - The local's number
+// offset - The access offset
+// accessSize - The size of the access (may be unknown at compile-time)
+//
+// Return Value:
+// True is the access is entire by the definition above, else false.
+//
+inline bool Compiler::IsEntireAccess(unsigned lclNum, unsigned offset, ValueSize accessSize)
+{
+ return (lvaLclValueSize(lclNum) == accessSize) && (offset == 0);
+}
+
+//------------------------------------------------------------------------
+// IsWideAccess: Is the access to a local wide?
+//
+// An access is wide when the access overflows the end of the local. If the
+// access size is unknown, the access is assumed wide if it is not entire.
+//
+// Arguments:
+// lclNum - The local's number
+// offset - The access offset
+// accessSize - The size of the access (may be unknown at compile-time)
+//
+// Return Value:
+// True is the access is wide by the definition above, else false.
+//
+inline bool Compiler::IsWideAccess(unsigned lclNum, unsigned offset, ValueSize accessSize)
+{
+ assert(!accessSize.IsNull());
+ if (accessSize.IsExact())
+ {
+ ClrSafeInt extent = ClrSafeInt(offset) + ClrSafeInt(accessSize.GetExact());
+ // The access is wide if:
+ // * The offset computation overflows uint16_t.
+ // * The address at `offset + accessSize - 1`, (the last byte of the access) is out of bounds of the local.
+ return extent.IsOverflow() || !FitsIn(extent.Value()) || !IsValidLclAddr(lclNum, extent.Value() - 1);
+ }
+ else
+ {
+ // If we don't know the size of the access or the local at compile time, we assume any access overflows if
+ // it is not an entire access to the local.
+ return !IsEntireAccess(lclNum, offset, accessSize);
+ }
+}
+
//------------------------------------------------------------------------
// IsPotentialGCSafePoint: Can the given tree be effectively a gc safe point?
//
diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp
index 3eafb5c5bc9344..e620d149ca10b6 100644
--- a/src/coreclr/jit/emit.cpp
+++ b/src/coreclr/jit/emit.cpp
@@ -9765,7 +9765,8 @@ insGroup* emitter::emitAllocIG()
ig = (insGroup*)emitGetMem(sz);
#ifdef DEBUG
- ig->igSelf = ig;
+ ig->igSelf = ig;
+ ig->igDataSize = 0;
#endif
#if EMITTER_STATS
@@ -9832,6 +9833,8 @@ void emitter::emitInitIG(insGroup* ig)
// Explicitly call init, since IGs don't actually have a constructor.
ig->igBlocks.jitstd::list::init(m_compiler->getAllocator(CMK_DebugOnly));
#endif
+
+ ig->igData = nullptr;
}
/*****************************************************************************
diff --git a/src/coreclr/jit/emitloongarch64.cpp b/src/coreclr/jit/emitloongarch64.cpp
index 83d6425480678e..6f752c830ba97f 100644
--- a/src/coreclr/jit/emitloongarch64.cpp
+++ b/src/coreclr/jit/emitloongarch64.cpp
@@ -1972,8 +1972,8 @@ void emitter::emitIns_R_R_R_R(
/*****************************************************************************
*
* Add an instruction with a register + static member operands.
- * Constant is stored into JIT data which is adjacent to code.
- * For LOONGARCH64, maybe not the best, here just supports the func-interface.
+ * Usually constants are stored into JIT data adjacent to code, in which case no
+ * relocation is needed. PC-relative offset will be encoded directly into instruction.
*
*/
void emitter::emitIns_R_C(
@@ -1982,10 +1982,13 @@ void emitter::emitIns_R_C(
assert(offs >= 0);
assert(instrDesc::fitsInSmallCns(offs)); // can optimize.
- // when id->idIns == bl, for reloc! 4-ins.
+ // when id->idIns == b, for AsyncResumeInfo reloc.
+ // pcalau12i reg, off-hi-20bits
+ // addi_d reg, offs_lo-12bits(reg)
+ // when id->idIns == bl, for reloc! 2-ins.
// pcaddu12i reg, off-hi-20bits
// addi_d reg, reg, off-lo-12bits
- // when id->idIns == load-ins, for reloc! 4-ins.
+ // when id->idIns == load-ins, for reloc! 2-ins.
// pcaddu12i reg, off-hi-20bits
// load reg, offs_lo-12bits(reg)
//
@@ -2000,6 +2003,7 @@ void emitter::emitIns_R_C(
// load reg, r21 + addr_bits[11:0]
instrDesc* id = emitNewInstr(attr);
+ id->idSetRelocFlags(attr);
id->idIns(ins);
assert(reg != REG_R0); // for special. reg Must not be R0.
@@ -2007,7 +2011,7 @@ void emitter::emitIns_R_C(
id->idSmallCns(offs); // usually is 0.
id->idInsOpt(INS_OPTS_RC);
- if (m_compiler->opts.compReloc)
+ if (m_compiler->opts.compReloc || id->idIsReloc())
{
id->idSetIsDspReloc();
id->idCodeSize(8);
@@ -2030,7 +2034,6 @@ void emitter::emitIns_R_C(
id->idOpSize(EA_PTRSIZE);
}
- // TODO-LoongArch64: this maybe deleted.
id->idSetIsBound(); // We won't patch address since we will know the exact distance
// once JIT code and data are allocated together.
@@ -3376,6 +3379,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
{
// Reference to JIT data
+ // when id->idIns == b, for AsyncResumeInfo reloc.
+ // pcalau12i reg, off-hi-20bits
+ // addi_d reg, offs_lo-12bits(reg)
// when id->idIns == bl, for reloc!
// pcaddu12i r21, off-hi-20bits
// addi_d reg, r21, off-lo-12bits
@@ -3408,7 +3414,18 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
ins = id->idIns();
regNumber reg1 = id->idReg1();
- if (id->idIsReloc())
+ if (ins == INS_b)
+ {
+ // relocation for AsyncResumeInfo.
+ *(code_t*)dstRW = 0x1a000000 | (code_t)reg1;
+ dstRW += 4;
+ ins = INS_addi_d;
+ *(code_t*)dstRW = 0x02c00000 | (code_t)reg1 | (code_t)(reg1 << 5);
+ dstRW += 4;
+ emitRecordRelocation(dstRW - 8 - writeableOffset, emitDataOffsetToPtr(dataOffs),
+ CorInfoReloc::LOONGARCH64_PC);
+ }
+ else if (id->idIsReloc())
{
// get the addr-offset of the data.
imm = (ssize_t)emitDataOffsetToPtr(dataOffs) - (ssize_t)(dstRW - writeableOffset);
diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp
index 5a65ddcadf0943..3dfca0ba725a0a 100644
--- a/src/coreclr/jit/gentree.cpp
+++ b/src/coreclr/jit/gentree.cpp
@@ -7358,9 +7358,6 @@ ExceptionSetFlags GenTree::OperExceptions(Compiler* comp)
case GT_CKFINITE:
return ExceptionSetFlags::ArithmeticException;
- case GT_LCLHEAP:
- return ExceptionSetFlags::StackOverflowException;
-
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
@@ -28370,8 +28367,8 @@ bool GenTreeHWIntrinsic::OperIsMemoryLoad(GenTree** pAddr) const
case NI_Sve_GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting:
case NI_Sve_GatherVectorUInt32ZeroExtend:
case NI_Sve_GatherVectorUInt32ZeroExtendFirstFaulting:
- case NI_Sve_GatherVectorWithByteOffsetFirstFaulting:
case NI_Sve_GatherVectorWithByteOffsets:
+ case NI_Sve_GatherVectorWithByteOffsetFirstFaulting:
case NI_Sve_LoadVector:
case NI_Sve_LoadVectorNonTemporal:
case NI_Sve_LoadVector128AndReplicateToVector:
@@ -28434,6 +28431,18 @@ bool GenTreeHWIntrinsic::OperIsMemoryLoad(GenTree** pAddr) const
case NI_Sve_LoadVectorUInt16NonFaultingZeroExtendToUInt64:
case NI_Sve_LoadVectorUInt32NonFaultingZeroExtendToInt64:
case NI_Sve_LoadVectorUInt32NonFaultingZeroExtendToUInt64:
+ case NI_Sve2_GatherVectorByteZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorInt16SignExtendNonTemporal:
+ case NI_Sve2_GatherVectorInt16WithByteOffsetsSignExtendNonTemporal:
+ case NI_Sve2_GatherVectorInt32SignExtendNonTemporal:
+ case NI_Sve2_GatherVectorInt32WithByteOffsetsSignExtendNonTemporal:
+ case NI_Sve2_GatherVectorNonTemporal:
+ case NI_Sve2_GatherVectorSByteSignExtendNonTemporal:
+ case NI_Sve2_GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorUInt16ZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorUInt32ZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorWithByteOffsetsNonTemporal:
addr = Op(2);
break;
@@ -28526,9 +28535,24 @@ bool GenTreeHWIntrinsic::OperIsMemoryLoad(GenTree** pAddr) const
NI_Sve_GatherVectorUInt32WithByteOffsetsZeroExtend,
NI_Sve_GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting,
NI_Sve_GatherVectorUInt32ZeroExtend, NI_Sve_GatherVectorUInt32ZeroExtendFirstFaulting));
- assert(varTypeIsI(addr) ||
- (varTypeIsSIMD(addr) && ((intrinsicId >= NI_Sve_GatherVector) &&
- (intrinsicId <= NI_Sve_GatherVectorUInt32ZeroExtendFirstFaulting))));
+
+ static_assert(AreContiguous(NI_Sve2_GatherVectorByteZeroExtendNonTemporal,
+ NI_Sve2_GatherVectorInt16SignExtendNonTemporal,
+ NI_Sve2_GatherVectorInt16WithByteOffsetsSignExtendNonTemporal,
+ NI_Sve2_GatherVectorInt32SignExtendNonTemporal,
+ NI_Sve2_GatherVectorInt32WithByteOffsetsSignExtendNonTemporal,
+ NI_Sve2_GatherVectorNonTemporal, NI_Sve2_GatherVectorSByteSignExtendNonTemporal,
+ NI_Sve2_GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal,
+ NI_Sve2_GatherVectorUInt16ZeroExtendNonTemporal,
+ NI_Sve2_GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal,
+ NI_Sve2_GatherVectorUInt32ZeroExtendNonTemporal,
+ NI_Sve2_GatherVectorWithByteOffsetsNonTemporal));
+
+ bool isSveGatherLoad =
+ (intrinsicId >= NI_Sve_GatherVector) && (intrinsicId <= NI_Sve_GatherVectorUInt32ZeroExtendFirstFaulting);
+ bool isSve2GatherLoad = (intrinsicId >= NI_Sve2_GatherVectorByteZeroExtendNonTemporal) &&
+ (intrinsicId <= NI_Sve2_GatherVectorWithByteOffsetsNonTemporal);
+ assert(varTypeIsI(addr) || (varTypeIsSIMD(addr) && (isSveGatherLoad || isSve2GatherLoad)));
#else
assert(varTypeIsI(addr));
#endif
diff --git a/src/coreclr/jit/hwintrinsic.cpp b/src/coreclr/jit/hwintrinsic.cpp
index 406e6a1369aeeb..1a59fbc869bab2 100644
--- a/src/coreclr/jit/hwintrinsic.cpp
+++ b/src/coreclr/jit/hwintrinsic.cpp
@@ -2490,9 +2490,21 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
case NI_Sve_GatherVectorUInt32WithByteOffsetsZeroExtend:
case NI_Sve_GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting:
case NI_Sve_GatherVectorUInt32ZeroExtend:
- case NI_Sve_GatherVectorWithByteOffsetFirstFaulting:
- case NI_Sve_GatherVectorWithByteOffsets:
case NI_Sve_GatherVectorUInt32ZeroExtendFirstFaulting:
+ case NI_Sve_GatherVectorWithByteOffsets:
+ case NI_Sve_GatherVectorWithByteOffsetFirstFaulting:
+ case NI_Sve2_GatherVectorByteZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorInt16SignExtendNonTemporal:
+ case NI_Sve2_GatherVectorInt16WithByteOffsetsSignExtendNonTemporal:
+ case NI_Sve2_GatherVectorInt32SignExtendNonTemporal:
+ case NI_Sve2_GatherVectorInt32WithByteOffsetsSignExtendNonTemporal:
+ case NI_Sve2_GatherVectorNonTemporal:
+ case NI_Sve2_GatherVectorSByteSignExtendNonTemporal:
+ case NI_Sve2_GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorUInt16ZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorUInt32ZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorWithByteOffsetsNonTemporal:
assert(varTypeIsSIMD(op3->TypeGet()));
if (numArgs == 3)
{
diff --git a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp
index 9016b8a91b2fd3..cb446cba8fcdc0 100644
--- a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp
+++ b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp
@@ -2368,6 +2368,79 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
break;
}
+ case NI_Sve2_GatherVectorInt16SignExtendNonTemporal:
+ case NI_Sve2_GatherVectorInt32SignExtendNonTemporal:
+ case NI_Sve2_GatherVectorNonTemporal:
+ case NI_Sve2_GatherVectorUInt16ZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorUInt32ZeroExtendNonTemporal:
+ {
+ if (!varTypeIsSIMD(intrin.op2->gtType))
+ {
+ // GatherVector...(Vector mask, T* address, Vector indices)
+
+ assert(intrin.numOperands == 3);
+
+ ssize_t shift = 0;
+ regNumber tempReg = internalRegisters.GetSingle(node, RBM_ALLFLOAT);
+
+ if ((intrin.id == NI_Sve2_GatherVectorInt16SignExtendNonTemporal) ||
+ (intrin.id == NI_Sve2_GatherVectorUInt16ZeroExtendNonTemporal))
+ {
+ shift = 1;
+ }
+ else if ((intrin.id == NI_Sve2_GatherVectorInt32SignExtendNonTemporal) ||
+ (intrin.id == NI_Sve2_GatherVectorUInt32ZeroExtendNonTemporal))
+ {
+ shift = 2;
+ }
+ else
+ {
+ assert(intrin.id == NI_Sve2_GatherVectorNonTemporal);
+ assert(emitActualTypeSize(intrin.baseType) == EA_8BYTE);
+ shift = 3;
+ }
+
+ // The SVE2 instructions only support byte offsets. Convert indices to bytes.
+ GetEmitter()->emitIns_R_R_I(INS_sve_lsl, emitSize, tempReg, op3Reg, shift, opt);
+
+ GetEmitter()->emitIns_R_R_R_R(ins, emitSize, targetReg, op1Reg, tempReg, op2Reg, opt);
+ }
+ else
+ {
+ // GatherVector...(Vector mask, Vector addresses)
+ assert(intrin.numOperands == 2);
+ GetEmitter()->emitIns_R_R_R_R(ins, emitSize, targetReg, op1Reg, op2Reg, REG_ZR, opt);
+ }
+ break;
+ }
+
+ case NI_Sve2_GatherVectorByteZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorSByteSignExtendNonTemporal:
+ if (!varTypeIsSIMD(intrin.op2->gtType))
+ {
+ // GatherVector...(Vector mask, T* address, Vector offsets)
+ assert(intrin.numOperands == 3);
+ GetEmitter()->emitIns_R_R_R_R(ins, emitSize, targetReg, op1Reg, op3Reg, op2Reg, opt);
+ }
+ else
+ {
+ // GatherVector...(Vector mask, Vector addresses)
+ assert(intrin.numOperands == 2);
+ GetEmitter()->emitIns_R_R_R_R(ins, emitSize, targetReg, op1Reg, op2Reg, REG_ZR, opt);
+ }
+ break;
+
+ case NI_Sve2_GatherVectorInt16WithByteOffsetsSignExtendNonTemporal:
+ case NI_Sve2_GatherVectorInt32WithByteOffsetsSignExtendNonTemporal:
+ case NI_Sve2_GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorWithByteOffsetsNonTemporal:
+ // GatherVector...(Vector mask, T* address, Vector offsets)
+ assert(!varTypeIsSIMD(intrin.op2->gtType));
+ assert(intrin.numOperands == 3);
+ GetEmitter()->emitIns_R_R_R_R(ins, emitSize, targetReg, op1Reg, op3Reg, op2Reg, opt);
+ break;
+
case NI_Sve_ReverseElement:
// Use non-predicated version explicitly
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op1Reg, opt);
diff --git a/src/coreclr/jit/hwintrinsiclistarm64sve.h b/src/coreclr/jit/hwintrinsiclistarm64sve.h
index 2bd17aab0f61da..06ddd231b5ed74 100644
--- a/src/coreclr/jit/hwintrinsiclistarm64sve.h
+++ b/src/coreclr/jit/hwintrinsiclistarm64sve.h
@@ -131,10 +131,10 @@ HARDWARE_INTRINSIC(Sve, GatherVectorUInt16WithByteOffsetsZeroExtend,
HARDWARE_INTRINSIC(Sve, GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1h, INS_sve_ldff1h, INS_sve_ldff1h, INS_sve_ldff1h, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation|HW_Flag_SpecialSideEffect_Other)
HARDWARE_INTRINSIC(Sve, GatherVectorUInt16ZeroExtend, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1h, INS_sve_ld1h, INS_sve_ld1h, INS_sve_ld1h, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
HARDWARE_INTRINSIC(Sve, GatherVectorUInt16ZeroExtendFirstFaulting, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1h, INS_sve_ldff1h, INS_sve_ldff1h, INS_sve_ldff1h, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation|HW_Flag_SpecialSideEffect_Other)
-HARDWARE_INTRINSIC(Sve, GatherVectorUInt32WithByteOffsetsZeroExtend, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1w, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
-HARDWARE_INTRINSIC(Sve, GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1w, INS_sve_ldff1w, INS_sve_ldff1w, INS_sve_ldff1w, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation|HW_Flag_SpecialSideEffect_Other)
-HARDWARE_INTRINSIC(Sve, GatherVectorUInt32ZeroExtend, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1w, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
-HARDWARE_INTRINSIC(Sve, GatherVectorUInt32ZeroExtendFirstFaulting, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1w, INS_sve_ldff1w, INS_sve_ldff1w, INS_sve_ldff1w, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation|HW_Flag_SpecialSideEffect_Other)
+HARDWARE_INTRINSIC(Sve, GatherVectorUInt32WithByteOffsetsZeroExtend, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1w, INS_sve_ld1w, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
+HARDWARE_INTRINSIC(Sve, GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1w, INS_sve_ldff1w, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation|HW_Flag_SpecialSideEffect_Other)
+HARDWARE_INTRINSIC(Sve, GatherVectorUInt32ZeroExtend, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1w, INS_sve_ld1w, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
+HARDWARE_INTRINSIC(Sve, GatherVectorUInt32ZeroExtendFirstFaulting, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1w, INS_sve_ldff1w, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation|HW_Flag_SpecialSideEffect_Other)
HARDWARE_INTRINSIC(Sve, GatherVectorWithByteOffsetFirstFaulting, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1w, INS_sve_ldff1w, INS_sve_ldff1d, INS_sve_ldff1d, INS_sve_ldff1w, INS_sve_ldff1d}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation|HW_Flag_SpecialSideEffect_Other)
HARDWARE_INTRINSIC(Sve, GatherVectorWithByteOffsets, -1, 3, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1d, INS_sve_ld1d, INS_sve_ld1w, INS_sve_ld1d}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
HARDWARE_INTRINSIC(Sve, GetActiveElementCount, -1, 2, {INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_BaseTypeFromFirstArg|HW_Flag_ExplicitMaskedOperation)
@@ -351,6 +351,18 @@ HARDWARE_INTRINSIC(Sve2, DotProductRotateComplexBySelectedIndex,
HARDWARE_INTRINSIC(Sve2, FusedAddHalving, -1, -1, {INS_sve_shadd, INS_sve_uhadd, INS_sve_shadd, INS_sve_uhadd, INS_sve_shadd, INS_sve_uhadd, INS_sve_shadd, INS_sve_uhadd, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation|HW_Flag_HasRMWSemantics|HW_Flag_LowMaskedOperation)
HARDWARE_INTRINSIC(Sve2, FusedAddRoundedHalving, -1, -1, {INS_sve_srhadd, INS_sve_urhadd, INS_sve_srhadd, INS_sve_urhadd, INS_sve_srhadd, INS_sve_urhadd, INS_sve_srhadd, INS_sve_urhadd, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation|HW_Flag_HasRMWSemantics|HW_Flag_LowMaskedOperation)
HARDWARE_INTRINSIC(Sve2, FusedSubtractHalving, -1, -1, {INS_sve_shsub, INS_sve_uhsub, INS_sve_shsub, INS_sve_uhsub, INS_sve_shsub, INS_sve_uhsub, INS_sve_shsub, INS_sve_uhsub, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation|HW_Flag_HasRMWSemantics|HW_Flag_LowMaskedOperation)
+HARDWARE_INTRINSIC(Sve2, GatherVectorByteZeroExtendNonTemporal, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1b, INS_sve_ldnt1b, INS_sve_ldnt1b, INS_sve_ldnt1b, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
+HARDWARE_INTRINSIC(Sve2, GatherVectorInt16SignExtendNonTemporal, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1sh, INS_sve_ldnt1sh, INS_sve_ldnt1sh, INS_sve_ldnt1sh, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
+HARDWARE_INTRINSIC(Sve2, GatherVectorInt16WithByteOffsetsSignExtendNonTemporal, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1sh, INS_sve_ldnt1sh, INS_sve_ldnt1sh, INS_sve_ldnt1sh, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
+HARDWARE_INTRINSIC(Sve2, GatherVectorInt32SignExtendNonTemporal, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1sw, INS_sve_ldnt1sw, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
+HARDWARE_INTRINSIC(Sve2, GatherVectorInt32WithByteOffsetsSignExtendNonTemporal, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1sw, INS_sve_ldnt1sw, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
+HARDWARE_INTRINSIC(Sve2, GatherVectorNonTemporal, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1w, INS_sve_ldnt1w, INS_sve_ldnt1d, INS_sve_ldnt1d, INS_sve_ldnt1w, INS_sve_ldnt1d}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
+HARDWARE_INTRINSIC(Sve2, GatherVectorSByteSignExtendNonTemporal, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1sb, INS_sve_ldnt1sb, INS_sve_ldnt1sb, INS_sve_ldnt1sb, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
+HARDWARE_INTRINSIC(Sve2, GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1h, INS_sve_ldnt1h, INS_sve_ldnt1h, INS_sve_ldnt1h, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
+HARDWARE_INTRINSIC(Sve2, GatherVectorUInt16ZeroExtendNonTemporal, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1h, INS_sve_ldnt1h, INS_sve_ldnt1h, INS_sve_ldnt1h, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
+HARDWARE_INTRINSIC(Sve2, GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1w, INS_sve_ldnt1w, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
+HARDWARE_INTRINSIC(Sve2, GatherVectorUInt32ZeroExtendNonTemporal, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1w, INS_sve_ldnt1w, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
+HARDWARE_INTRINSIC(Sve2, GatherVectorWithByteOffsetsNonTemporal, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1w, INS_sve_ldnt1w, INS_sve_ldnt1d, INS_sve_ldnt1d, INS_sve_ldnt1w, INS_sve_ldnt1d}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ZeroingMaskedOperation)
HARDWARE_INTRINSIC(Sve2, InterleavingXorEvenOdd, -1, 3, {INS_sve_eorbt, INS_sve_eorbt, INS_sve_eorbt, INS_sve_eorbt, INS_sve_eorbt, INS_sve_eorbt, INS_sve_eorbt, INS_sve_eorbt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_HasRMWSemantics)
HARDWARE_INTRINSIC(Sve2, InterleavingXorOddEven, -1, 3, {INS_sve_eortb, INS_sve_eortb, INS_sve_eortb, INS_sve_eortb, INS_sve_eortb, INS_sve_eortb, INS_sve_eortb, INS_sve_eortb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_HasRMWSemantics)
HARDWARE_INTRINSIC(Sve2, Log2, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_flogb, INS_invalid, INS_sve_flogb, INS_invalid, INS_sve_flogb, INS_sve_flogb}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_BaseTypeFromFirstArg|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation)
diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp
index 1b49c438aab9ad..0b9e11c962e8fb 100644
--- a/src/coreclr/jit/ifconversion.cpp
+++ b/src/coreclr/jit/ifconversion.cpp
@@ -55,7 +55,6 @@ class OptIfConversionDsc
bool IfConvertCheckThenFlow();
void IfConvertFindFlow();
bool IfConvertCheckStmts(BasicBlock* fromBlock, IfConvertOperation* foundOperation);
- void IfConvertJoinStmts(BasicBlock* fromBlock);
GenTree* TryTransformSelectOperOrLocal(GenTree* oper, GenTree* lcl);
GenTree* TryTransformSelectOperOrZero(GenTree* oper, GenTree* lcl);
@@ -356,26 +355,6 @@ bool OptIfConversionDsc::IfConvertCheckStmts(BasicBlock* fromBlock, IfConvertOpe
return found;
}
-//-----------------------------------------------------------------------------
-// IfConvertJoinStmts
-//
-// Move all the statements from a block onto the end of the start block.
-//
-// Arguments:
-// fromBlock -- Source block
-//
-void OptIfConversionDsc::IfConvertJoinStmts(BasicBlock* fromBlock)
-{
- Statement* stmtList1 = m_startBlock->firstStmt();
- Statement* stmtList2 = fromBlock->firstStmt();
- Statement* stmtLast1 = m_startBlock->lastStmt();
- Statement* stmtLast2 = fromBlock->lastStmt();
- stmtLast1->SetNextStmt(stmtList2);
- stmtList2->SetPrevStmt(stmtLast1);
- stmtList1->SetPrevStmt(stmtLast2);
- fromBlock->SetFirstStmt(nullptr);
-}
-
//-----------------------------------------------------------------------------
// IfConvertDump
//
@@ -384,19 +363,24 @@ void OptIfConversionDsc::IfConvertJoinStmts(BasicBlock* fromBlock)
#ifdef DEBUG
void OptIfConversionDsc::IfConvertDump()
{
- assert(m_startBlock != nullptr);
m_compiler->fgDumpBlock(m_startBlock);
- BasicBlock* dumpBlock = m_startBlock->KindIs(BBJ_COND) ? m_startBlock->GetFalseTarget() : m_startBlock->GetTarget();
- for (; dumpBlock != m_finalBlock; dumpBlock = dumpBlock->GetUniqueSucc())
- {
- m_compiler->fgDumpBlock(dumpBlock);
- }
- if (m_doElseConversion)
+
+ bool beforeTransformation = m_startBlock->KindIs(BBJ_COND);
+ if (beforeTransformation)
{
- dumpBlock = m_startBlock->KindIs(BBJ_COND) ? m_startBlock->GetTrueTarget() : m_startBlock->GetTarget();
- for (; dumpBlock != m_finalBlock; dumpBlock = dumpBlock->GetUniqueSucc())
+ // Dump all Then blocks
+ for (BasicBlock* bb = m_startBlock->GetFalseTarget(); bb != m_finalBlock; bb = bb->GetUniqueSucc())
+ {
+ m_compiler->fgDumpBlock(bb);
+ }
+
+ if (m_doElseConversion)
{
- m_compiler->fgDumpBlock(dumpBlock);
+ // Dump all Else blocks
+ for (BasicBlock* bb = m_startBlock->GetTrueTarget(); bb != m_finalBlock; bb = bb->GetUniqueSucc())
+ {
+ m_compiler->fgDumpBlock(bb);
+ }
}
}
}
@@ -731,9 +715,8 @@ bool OptIfConversionDsc::optIfConvert(int* pReachabilityBudget)
select = m_compiler->gtNewConditionalNode(GT_SELECT, m_cond, selectTrueInput, selectFalseInput, selectType);
}
+ // Use the SELECT as the source of the Then STORE/RETURN.
m_thenOperation.node->AddAllEffectsFlags(select);
-
- // Use the select as the source of the Then operation.
if (m_mainOper == GT_STORE_LCL_VAR)
{
m_thenOperation.node->AsLclVar()->Data() = select;
@@ -745,30 +728,47 @@ bool OptIfConversionDsc::optIfConvert(int* pReachabilityBudget)
m_compiler->gtSetEvalOrder(m_thenOperation.node);
m_compiler->fgSetStmtSeq(m_thenOperation.stmt);
- // Remove statements.
- last->gtBashToNOP();
- m_compiler->gtSetEvalOrder(last);
- m_compiler->fgSetStmtSeq(m_startBlock->lastStmt());
- if (m_doElseConversion)
+ // Replace JTRUE with STORE(SELECT)/RETURN(SELECT) statement
+ m_compiler->fgInsertStmtBefore(m_startBlock, m_startBlock->lastStmt(), m_thenOperation.stmt);
+ m_compiler->fgRemoveStmt(m_startBlock, m_startBlock->lastStmt());
+ m_thenOperation.block->SetFirstStmt(nullptr);
+
+ BasicBlock* falseBb = m_startBlock->GetFalseTarget();
+ BasicBlock* trueBb = m_startBlock->GetTrueTarget();
+
+ // JTRUE block now contains SELECT. Change it's kind and make it flow
+ // directly into block where flows merge, which is null in case of GT_RETURN.
+ if (m_mainOper == GT_RETURN)
{
- m_elseOperation.node->gtBashToNOP();
- m_compiler->gtSetEvalOrder(m_elseOperation.node);
- m_compiler->fgSetStmtSeq(m_elseOperation.stmt);
+ m_startBlock->SetKindAndTargetEdge(BBJ_RETURN);
}
+ else
+ {
+ FlowEdge* newEdge =
+ m_doElseConversion ? m_compiler->fgAddRefPred(m_finalBlock, m_startBlock) : m_startBlock->GetTrueEdge();
+ m_startBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
+ }
+ assert(m_startBlock->GetUniqueSucc() == m_finalBlock);
- // Merge all the blocks.
- IfConvertJoinStmts(m_thenOperation.block);
+ // Remove all Then/Else blocks
+ auto removeBlocks = [&](BasicBlock* start) {
+ m_compiler->fgRemoveAllRefPreds(start, m_startBlock);
+ start->bbWeight = BB_ZERO_WEIGHT;
+ assert(start->bbPreds == nullptr);
+
+ for (BasicBlock* bb = start; bb != m_finalBlock;)
+ {
+ BasicBlock* next = bb->GetUniqueSucc();
+ m_compiler->fgRemoveBlock(bb, true);
+ bb = next;
+ }
+ };
+ removeBlocks(falseBb);
if (m_doElseConversion)
{
- IfConvertJoinStmts(m_elseOperation.block);
+ removeBlocks(trueBb);
}
- // Update the flow from the original block.
- FlowEdge* const removedEdge = m_compiler->fgRemoveAllRefPreds(m_startBlock->GetFalseTarget(), m_startBlock);
- FlowEdge* const retainedEdge = m_startBlock->GetTrueEdge();
- m_startBlock->SetKindAndTargetEdge(BBJ_ALWAYS, retainedEdge);
- m_compiler->fgRepairProfileCondToUncond(m_startBlock, retainedEdge, removedEdge);
-
#ifdef DEBUG
if (m_compiler->verbose)
{
diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp
index 18670bb36147ee..7513b590f4cde4 100644
--- a/src/coreclr/jit/importer.cpp
+++ b/src/coreclr/jit/importer.cpp
@@ -10077,8 +10077,9 @@ void Compiler::impImportBlockCode(BasicBlock* block)
}
op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
- // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
- op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
+ // We do not model stack overflow from localloc as an exception side effect.
+ // Obviously, we don't want locallocs to be CSE'd.
+ op1->gtFlags |= GTF_DONT_CSE;
// Request stack security for this method.
setNeedsGSSecurityCookie();
diff --git a/src/coreclr/jit/lclmorph.cpp b/src/coreclr/jit/lclmorph.cpp
index 8675ae841ed90f..61749ab4a8f947 100644
--- a/src/coreclr/jit/lclmorph.cpp
+++ b/src/coreclr/jit/lclmorph.cpp
@@ -1010,7 +1010,7 @@ class LocalAddressVisitor final : public GenTreeVisitor
break;
case GT_FIELD_ADDR:
- if (MorphStructFieldAddress(node, 0) != BAD_VAR_NUM)
+ if (MorphStructFieldAddress(node, ValueSize(0)) != BAD_VAR_NUM)
{
goto LOCAL_NODE;
}
@@ -1595,32 +1595,10 @@ class LocalAddressVisitor final : public GenTreeVisitor
unsigned lclNum = val.LclNum();
unsigned offset = val.Offset();
LclVarDsc* varDsc = m_compiler->lvaGetDesc(lclNum);
- unsigned indirSize = node->AsIndir()->Size();
- bool isWide;
+ ValueSize lclSize = m_compiler->lvaLclValueSize(lclNum);
+ ValueSize indirSize = node->AsIndir()->ValueSize();
- // TODO-Cleanup: delete "indirSize == 0", use "Compiler::IsValidLclAddr".
- if ((indirSize == 0) || ((offset + indirSize) > UINT16_MAX))
- {
- // If we can't figure out the indirection size then treat it as a wide indirection.
- // Additionally, treat indirections with large offsets as wide: local field nodes
- // and the emitter do not support them.
- isWide = true;
- }
- else
- {
- ClrSafeInt endOffset = ClrSafeInt(offset) + ClrSafeInt(indirSize);
-
- if (endOffset.IsOverflow())
- {
- isWide = true;
- }
- else
- {
- isWide = endOffset.Value() > m_compiler->lvaLclExactSize(lclNum);
- }
- }
-
- if (isWide)
+ if (indirSize.IsNull() || m_compiler->IsWideAccess(lclNum, offset, indirSize))
{
unsigned exposedLclNum = varDsc->lvIsStructField ? varDsc->lvParentLcl : lclNum;
if (m_lclAddrAssertions != nullptr)
@@ -2065,7 +2043,7 @@ class LocalAddressVisitor final : public GenTreeVisitor
return false;
}
- unsigned fieldLclNum = MorphStructFieldAddress(addr, node->Size());
+ unsigned fieldLclNum = MorphStructFieldAddress(addr, node->ValueSize());
if (fieldLclNum == BAD_VAR_NUM)
{
return false;
@@ -2104,13 +2082,13 @@ class LocalAddressVisitor final : public GenTreeVisitor
//
// Arguments:
// node - the address node
- // accessSize - load/store size if known, zero otherwise
+ // accessSize - load/store value size
//
// Return Value:
// Local number for the promoted field if the replacement was successful,
// BAD_VAR_NUM otherwise.
//
- unsigned MorphStructFieldAddress(GenTree* node, unsigned accessSize)
+ unsigned MorphStructFieldAddress(GenTree* node, ValueSize accessSize)
{
unsigned offset = 0;
bool isSpanLength = false;
@@ -2138,16 +2116,16 @@ class LocalAddressVisitor final : public GenTreeVisitor
}
LclVarDsc* fieldVarDsc = m_compiler->lvaGetDesc(fieldLclNum);
+ ValueSize fieldSize = fieldVarDsc->lvValueSize();
// Span's Length is never negative unconditionally
- if (isSpanLength && (accessSize == genTypeSize(TYP_INT)))
+ if (isSpanLength && (accessSize.GetExact() == genTypeSize(TYP_INT)))
{
- fieldVarDsc->SetIsNeverNegative(true);
+ unsigned exactSize = accessSize.GetExact();
+ unsigned exactFieldSize = fieldSize.GetExact();
}
- // Retargeting the indirection to reference the promoted field would make it "wide", exposing
- // the whole parent struct (with all of its fields).
- if (accessSize > genTypeSize(fieldVarDsc))
+ if (!accessSize.IsNull() && m_compiler->IsWideAccess(fieldLclNum, 0, accessSize))
{
return BAD_VAR_NUM;
}
diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp
index 49e339fcbf479a..acd4be1edf083e 100644
--- a/src/coreclr/jit/lclvars.cpp
+++ b/src/coreclr/jit/lclvars.cpp
@@ -4352,24 +4352,26 @@ void Compiler::lvaFixVirtualFrameOffsets()
if ((lvaMonAcquired != BAD_VAR_NUM) && !opts.IsOSR())
{
- int offset = lvaTable[lvaMonAcquired].GetStackOffset() + delta;
+ int offset = lvaTable[lvaMonAcquired].GetStackOffset() + (compCalleeRegsPushed << 3);
lvaTable[lvaMonAcquired].SetStackOffset(offset);
delta += lvaLclStackHomeSize(lvaMonAcquired);
}
+#ifndef TARGET_LOONGARCH64
if ((lvaAsyncExecutionContextVar != BAD_VAR_NUM) && !opts.IsOSR())
{
- int offset = lvaTable[lvaAsyncExecutionContextVar].GetStackOffset() + delta;
+ int offset = lvaTable[lvaAsyncExecutionContextVar].GetStackOffset() + (compCalleeRegsPushed << 3);
lvaTable[lvaAsyncExecutionContextVar].SetStackOffset(offset);
delta += lvaLclStackHomeSize(lvaAsyncExecutionContextVar);
}
if ((lvaAsyncSynchronizationContextVar != BAD_VAR_NUM) && !opts.IsOSR())
{
- int offset = lvaTable[lvaAsyncSynchronizationContextVar].GetStackOffset() + delta;
+ int offset = lvaTable[lvaAsyncSynchronizationContextVar].GetStackOffset() + (compCalleeRegsPushed << 3);
lvaTable[lvaAsyncSynchronizationContextVar].SetStackOffset(offset);
delta += lvaLclStackHomeSize(lvaAsyncSynchronizationContextVar);
}
+#endif
JITDUMP("--- delta bump %d for FP frame\n", delta);
}
diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp
index 898539684c994a..7ad423c51ab822 100644
--- a/src/coreclr/jit/lower.cpp
+++ b/src/coreclr/jit/lower.cpp
@@ -5705,7 +5705,7 @@ GenTree* Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore)
{
if (slotCount > 1)
{
-#if !defined(TARGET_RISCV64) && !defined(TARGET_LOONGARCH64)
+#if !defined(TARGET_RISCV64) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_WASM)
assert(call->HasMultiRegRetVal());
#endif
}
@@ -11703,7 +11703,7 @@ void Lowering::TransformUnusedIndirection(GenTreeIndir* ind, Compiler* m_compile
// LowerLclHeap: a common logic to lower LCLHEAP.
//
// Arguments:
-// blkNode - the LCLHEAP node we are lowering.
+// node - the LCLHEAP node we are lowering.
//
void Lowering::LowerLclHeap(GenTree* node)
{
diff --git a/src/coreclr/jit/lsraarm64.cpp b/src/coreclr/jit/lsraarm64.cpp
index cf2164fc70b831..3236db623472cd 100644
--- a/src/coreclr/jit/lsraarm64.cpp
+++ b/src/coreclr/jit/lsraarm64.cpp
@@ -1389,6 +1389,11 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree, int* pDstCou
// Build any additional special cases
switch (intrin.id)
{
+ case NI_Sve2_GatherVectorInt16SignExtendNonTemporal:
+ case NI_Sve2_GatherVectorInt32SignExtendNonTemporal:
+ case NI_Sve2_GatherVectorNonTemporal:
+ case NI_Sve2_GatherVectorUInt16ZeroExtendNonTemporal:
+ case NI_Sve2_GatherVectorUInt32ZeroExtendNonTemporal:
case NI_Sve2_Scatter16BitNarrowingNonTemporal:
case NI_Sve2_Scatter32BitNarrowingNonTemporal:
case NI_Sve2_ScatterNonTemporal:
diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp
index 0add5dd01529e4..0531aef7d53e6c 100644
--- a/src/coreclr/jit/morph.cpp
+++ b/src/coreclr/jit/morph.cpp
@@ -948,11 +948,12 @@ void CallArgs::ArgsComplete(Compiler* comp, GenTreeCall* call)
#if !FEATURE_FIXED_OUT_ARGS
// On x86 we previously recorded a stack depth of zero when
// morphing the register arguments of any GT_IND with a GTF_IND_RNGCHK flag
- // Thus we can not reorder the argument after any stack based argument
- // (Note that GT_LCLHEAP sets the GTF_EXCEPT flag so we don't need to
- // check for it explicitly.)
+ // Thus we can not reorder the argument after any stack based argument.
+ // GT_LCLHEAP has the same stack depth constraint, but it no longer sets
+ // GTF_EXCEPT, so it must be checked explicitly here.
//
- if (argx->gtFlags & GTF_EXCEPT)
+ if (((argx->gtFlags & GTF_EXCEPT) != 0) ||
+ (comp->compLocallocUsed && comp->gtTreeContainsOper(argx, GT_LCLHEAP)))
{
SetNeedsTemp(&arg);
continue;
@@ -960,15 +961,11 @@ void CallArgs::ArgsComplete(Compiler* comp, GenTreeCall* call)
#else
// For Arm/X64 we can't reorder a register argument that uses a GT_LCLHEAP
//
- if (argx->gtFlags & GTF_EXCEPT)
+ if (comp->compLocallocUsed && comp->gtTreeContainsOper(argx, GT_LCLHEAP))
{
assert(comp->compLocallocUsed);
-
- if (comp->gtTreeContainsOper(argx, GT_LCLHEAP))
- {
- SetNeedsTemp(&arg);
- continue;
- }
+ SetNeedsTemp(&arg);
+ continue;
}
#endif
}
@@ -8496,12 +8493,11 @@ GenTree* Compiler::fgMorphFinalizeIndir(GenTreeIndir* indir)
if (!indir->IsVolatile() && !indir->TypeIs(TYP_STRUCT) && addr->OperIs(GT_LCL_ADDR))
{
- unsigned size = indir->Size();
- unsigned offset = addr->AsLclVarCommon()->GetLclOffs();
- unsigned extent = offset + size;
- unsigned lclSize = lvaLclExactSize(addr->AsLclVarCommon()->GetLclNum());
+ int lclNum = addr->AsLclVarCommon()->GetLclNum();
+ unsigned offset = addr->AsLclVarCommon()->GetLclOffs();
+ ValueSize indirSize = indir->ValueSize();
- if ((extent <= lclSize) && (extent < UINT16_MAX))
+ if (!IsWideAccess(lclNum, offset, indirSize))
{
addr->ChangeType(indir->TypeGet());
if (indir->OperIs(GT_STOREIND))
@@ -8582,6 +8578,19 @@ GenTree* Compiler::fgOptimizeCast(GenTreeCast* cast)
var_types castToType = cast->CastToType();
+ // For small-int casts fed by a widening int->long, remove the widening so we truncate directly
+ // from the original int value.
+ if (varTypeIsSmall(castToType) && src->OperIs(GT_CAST) && !src->gtOverflow())
+ {
+ GenTreeCast* widening = src->AsCast();
+ if (varTypeIsLong(widening->CastToType()) && (genActualType(widening->CastFromType()) == TYP_INT))
+ {
+ cast->CastOp() = widening->CastOp();
+ DEBUG_DESTROY_NODE(widening);
+ src = cast->CastOp();
+ }
+ }
+
// For indir-like nodes, we may be able to change their type to satisfy (and discard) the cast.
if (varTypeIsSmall(castToType) && (genTypeSize(castToType) == genTypeSize(src)) &&
src->OperIs(GT_IND, GT_LCL_FLD))
@@ -10400,30 +10409,31 @@ GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add)
if (opts.OptimizationEnabled())
{
- // Reduce local addresses: "ADD(LCL_ADDR, OFFSET)" => "LCL_FLD_ADDR".
+ // Reduce local addresses: "ADD(LCL_ADDR(BASE), OFFSET)" => "LCL_ADDR(BASE+OFFSET)".
//
if (op1->OperIs(GT_LCL_ADDR) && op2->IsCnsIntOrI())
{
GenTreeLclVarCommon* lclAddrNode = op1->AsLclVarCommon();
GenTreeIntCon* offsetNode = op2->AsIntCon();
- if (FitsIn(offsetNode->IconValue()))
+ ssize_t consVal = offsetNode->IconValue();
+
+ // Note: the emitter does not expect out-of-bounds access for LCL_ADDR.
+ if (FitsIn(consVal) && IsValidLclAddr(lclAddrNode->GetLclNum(), (uint32_t)consVal))
{
- unsigned offset = lclAddrNode->GetLclOffs() + static_cast(offsetNode->IconValue());
+ ClrSafeInt newOffset =
+ ClrSafeInt(lclAddrNode->GetLclOffs()) + ClrSafeInt(consVal);
+ assert(!newOffset.IsOverflow());
- // Note: the emitter does not expect out-of-bounds access for LCL_FLD_ADDR.
- if (FitsIn(offset) && (offset < lvaLclExactSize(lclAddrNode->GetLclNum())))
- {
- lclAddrNode->SetOper(GT_LCL_ADDR);
- lclAddrNode->AsLclFld()->SetLclOffs(offset);
- assert(lvaGetDesc(lclAddrNode)->lvDoNotEnregister);
+ lclAddrNode->SetOper(GT_LCL_ADDR);
+ lclAddrNode->AsLclFld()->SetLclOffs(newOffset.Value());
+ assert(lvaGetDesc(lclAddrNode)->lvDoNotEnregister);
- lclAddrNode->SetVNsFromNode(add);
+ lclAddrNode->SetVNsFromNode(add);
- DEBUG_DESTROY_NODE(offsetNode);
- DEBUG_DESTROY_NODE(add);
+ DEBUG_DESTROY_NODE(offsetNode);
+ DEBUG_DESTROY_NODE(add);
- return lclAddrNode;
- }
+ return lclAddrNode;
}
}
diff --git a/src/coreclr/jit/utils.h b/src/coreclr/jit/utils.h
index b410c5ce6abb67..38c819e0672896 100644
--- a/src/coreclr/jit/utils.h
+++ b/src/coreclr/jit/utils.h
@@ -588,8 +588,7 @@ enum class ExceptionSetFlags : uint32_t
ArithmeticException = 0x4,
NullReferenceException = 0x8,
IndexOutOfRangeException = 0x10,
- StackOverflowException = 0x20,
- UnknownException = 0x40,
+ UnknownException = 0x20,
};
class HelperCallProperties
diff --git a/src/coreclr/nativeaot/Runtime/loongarch64/GcProbe.S b/src/coreclr/nativeaot/Runtime/loongarch64/GcProbe.S
index 118af7c53ca1ab..e022a907caa1c0 100644
--- a/src/coreclr/nativeaot/Runtime/loongarch64/GcProbe.S
+++ b/src/coreclr/nativeaot/Runtime/loongarch64/GcProbe.S
@@ -4,10 +4,11 @@
#include
#include "AsmOffsets.inc"
-#define PROBE_FRAME_SIZE 0x90 // 4 * 8 for fixed part of PInvokeTransitionFrame (fp, ra, m_pThread, m_Flags) +
+#define PROBE_FRAME_SIZE 0xA0 // 4 * 8 for fixed part of PInvokeTransitionFrame (fp, ra, m_pThread, m_Flags) +
// 9 * 8 for callee saved registers +
// 1 * 8 for caller SP +
- // 2 * 8 for int returns +
+ // 3 * 8 for int returns (a0, a1, a2) +
+ // 1 * 8 for alignment padding +
// 2 * 8 for FP returns
// See PUSH_COOP_PINVOKE_FRAME, this macro is very similar, but also saves return registers
@@ -37,13 +38,15 @@
// Slot at $sp+0x68 is reserved for caller sp
- // Save the integer return registers
+ // Save the integer return registers, a2 might contain an objectref (async continuation)
st.d $a0, $sp, 0x70
st.d $a1, $sp, 0x78
+ st.d $a2, $sp, 0x80
+ // Slot at [sp, #0x88] is alignment padding
// Save the FP return registers
- fst.d $f0, $sp, 0x80
- fst.d $f1, $sp, 0x88
+ fst.d $f0, $sp, 0x90
+ fst.d $f1, $sp, 0x98
// Perform the rest of the PInvokeTransitionFrame initialization.
st.d \threadReg, $sp, OFFSETOF__PInvokeTransitionFrame__m_pThread // Thread * (unused by stackwalker)
@@ -66,10 +69,11 @@
// Restore the integer return registers
ld.d $a0, $sp, 0x70
ld.d $a1, $sp, 0x78
+ ld.d $a2, $sp, 0x80
// Restore the FP return registers
- fld.d $f0, $sp, 0x80
- fld.d $f1, $sp, 0x88
+ fld.d $f0, $sp, 0x90
+ fld.d $f1, $sp, 0x98
// Restore callee saved registers
EPILOG_RESTORE_REG_PAIR 23, 24, 0x20
@@ -89,25 +93,26 @@
// All registers correct for return to the original return address.
//
// Register state on exit:
-// a2: thread pointer
+// a4: thread pointer
+// a0, a1, a2: preserved
//
.macro FixupHijackedCallstack
- // a2 <- GetThread()
- INLINE_GETTHREAD $a2
+ // a4 <- GetThread()
+ INLINE_GETTHREAD $a4
//
// Fix the stack by restoring the original return address
//
// Load m_pvHijackedReturnAddress
- ld.d $ra, $a2, OFFSETOF__Thread__m_pvHijackedReturnAddress
+ ld.d $ra, $a4, OFFSETOF__Thread__m_pvHijackedReturnAddress
//
// Clear hijack state
//
// Clear m_ppvHijackedReturnAddressLocation and m_pvHijackedReturnAddress
- st.d $zero, $a2, OFFSETOF__Thread__m_ppvHijackedReturnAddressLocation
- st.d $zero, $a2, OFFSETOF__Thread__m_ppvHijackedReturnAddressLocation + 8
+ st.d $zero, $a4, OFFSETOF__Thread__m_ppvHijackedReturnAddressLocation
+ st.d $zero, $a4, OFFSETOF__Thread__m_ppvHijackedReturnAddressLocation + 8
.endm
//
@@ -122,16 +127,16 @@ NESTED_ENTRY RhpGcProbeHijack, _TEXT, NoHandler
jirl $r0, $ra, 0
LOCAL_LABEL(WaitForGC):
- li.d $t3, (DEFAULT_FRAME_SAVE_FLAGS + PTFF_SAVE_R4 + PTFF_SAVE_R5 + PTFF_THREAD_HIJACK)
+ li.d $t3, (DEFAULT_FRAME_SAVE_FLAGS + PTFF_SAVE_R4 + PTFF_SAVE_R5 + PTFF_SAVE_R6 + PTFF_THREAD_HIJACK)
b C_FUNC(RhpWaitForGC)
NESTED_END RhpGcProbeHijack
.global C_FUNC(RhpThrowHwEx)
NESTED_ENTRY RhpWaitForGC, _TEXT, NoHandler
- PUSH_PROBE_FRAME $a2, $a3, $t3
+ PUSH_PROBE_FRAME $a4, $a3, $t3
- ld.d $a0, $a2, OFFSETOF__Thread__m_pDeferredTransitionFrame
+ ld.d $a0, $a4, OFFSETOF__Thread__m_pDeferredTransitionFrame
bl C_FUNC(RhpWaitForGC2)
POP_PROBE_FRAME
diff --git a/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosloongarch64.inc b/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosloongarch64.inc
index 265a188f82eb4a..cf3583aae5ba87 100644
--- a/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosloongarch64.inc
+++ b/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosloongarch64.inc
@@ -198,6 +198,7 @@ C_FUNC(\Name):
#define PTFF_SAVE_SP 0x00000200
#define PTFF_SAVE_R4 0x00000800
#define PTFF_SAVE_R5 0x00001000
+#define PTFF_SAVE_R6 0x00002000
#define PTFF_SAVE_ALL_PRESERVED 0x000001FF // NOTE: r23-r31
#define PTFF_THREAD_HIJACK 0x80000000
diff --git a/src/coreclr/scripts/superpmi.py b/src/coreclr/scripts/superpmi.py
index 057a130765a2b3..6c9740406ef036 100644
--- a/src/coreclr/scripts/superpmi.py
+++ b/src/coreclr/scripts/superpmi.py
@@ -20,6 +20,7 @@
import asyncio
import csv
import datetime
+import html
import json
import locale
import logging
@@ -2997,7 +2998,7 @@ def write_example_diffs_to_markdown_summary(write_fh, asm_diffs):
for (func_name, diff, diff_text) in examples_to_put_in_summary:
base_size = int(diff["Base ActualCodeBytes"])
diff_size = int(diff["Diff ActualCodeBytes"])
- with DetailsSection(write_fh, "{} ({}) : {}".format(format_delta(base_size, diff_size), compute_and_format_pct(base_size, diff_size), func_name)):
+ with DetailsSection(write_fh, "{} ({}) : {}".format(format_delta(base_size, diff_size), compute_and_format_pct(base_size, diff_size), html.escape(func_name))):
write_fh.write(diff_text)
################################################################################
diff --git a/src/coreclr/tools/Common/Compiler/DependencyAnalysis/AssemblyStubNode.cs b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/AssemblyStubNode.cs
index e31f93bb39bb47..9fdb87be78def2 100644
--- a/src/coreclr/tools/Common/Compiler/DependencyAnalysis/AssemblyStubNode.cs
+++ b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/AssemblyStubNode.cs
@@ -7,6 +7,9 @@
namespace ILCompiler.DependencyAnalysis
{
+ // TODO-Wasm: Some instances of AssemblyStubNode will need to implement INodeWithTypeSignature
+ // if they need to be callable from Wasm, though it may not make sense for the base
+ // class to implement INodeWithTypeSignature.
public abstract class AssemblyStubNode : ObjectNode, ISymbolDefinitionNode
{
public AssemblyStubNode()
diff --git a/src/coreclr/tools/Common/Compiler/DependencyAnalysis/INodeWithTypeSignature.cs b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/INodeWithTypeSignature.cs
new file mode 100644
index 00000000000000..0471d3ca296a63
--- /dev/null
+++ b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/INodeWithTypeSignature.cs
@@ -0,0 +1,18 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+using Internal.TypeSystem;
+
+namespace ILCompiler.DependencyAnalysis
+{
+ public interface INodeWithTypeSignature : ISymbolDefinitionNode
+ {
+ MethodSignature Signature { get; }
+ bool IsUnmanagedCallersOnly { get; }
+ }
+
+ public interface IMethodCodeNodeWithTypeSignature : IMethodNode, INodeWithTypeSignature
+ {
+ MethodSignature INodeWithTypeSignature.Signature => Method.Signature;
+ bool INodeWithTypeSignature.IsUnmanagedCallersOnly => Method.IsUnmanagedCallersOnly;
+ }
+}
diff --git a/src/coreclr/tools/Common/Compiler/DependencyAnalysis/ObjectNode.cs b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/ObjectNode.cs
index 327447571686fa..151fa1e8f587bf 100644
--- a/src/coreclr/tools/Common/Compiler/DependencyAnalysis/ObjectNode.cs
+++ b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/ObjectNode.cs
@@ -68,6 +68,14 @@ public sealed override IEnumerable GetStaticDependencies(No
}
}
+ if (factory.Target.IsWasm && this is IMethodCodeNodeWithTypeSignature wasmMethodCodeNode)
+ {
+ dependencies ??= new DependencyList();
+
+ WasmTypeNode wasmTypeNode = factory.WasmTypeNode(wasmMethodCodeNode.Method);
+ dependencies.Add(wasmTypeNode, "Wasm Method Code Nodes Require Signature");
+ }
+
if (dependencies == null)
return Array.Empty();
else
diff --git a/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Relocation.cs b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Relocation.cs
index fa68031839b1d4..d8f541b8f88d03 100644
--- a/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Relocation.cs
+++ b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Relocation.cs
@@ -449,6 +449,7 @@ private static unsafe void PutLoongArch64PC12(uint* pCode, long imm)
Debug.Assert((pcInstr & 0xFE000000) == 0x1a000000); // Must be pcalau12i
+ pcInstr &= 0xFE00001F; // keep bits 31-25, 4-0
// Assemble the pc-relative high 20 bits of 'imm' into the pcalau12i instruction
pcInstr |= (uint)((imm >> 7) & 0x1FFFFE0);
@@ -456,7 +457,8 @@ private static unsafe void PutLoongArch64PC12(uint* pCode, long imm)
pcInstr = *(pCode + 1);
- // Assemble the pc-relative low 12 bits of 'imm' into the addid or ld instruction
+ pcInstr &= 0xFFC003FF; // keep bits 31-22, 9-0
+ // Assemble the pc-relative low 12 bits of 'imm' into the addi.d or ld instruction
pcInstr |= (uint)((imm & 0xFFF) << 10);
*(pCode + 1) = pcInstr; // write the assembled instruction
@@ -493,6 +495,7 @@ private static unsafe void PutLoongArch64JIR(uint* pCode, long imm38)
long imm = imm38 + relOff;
relOff = (((imm & 0x1ffff) - relOff) >> 2) & 0xffff;
+ pcInstr &= 0xFE00001F; // keep bits 31-25, 4-0
// Assemble the pc-relative high 20 bits of 'imm38' into the pcaddu18i instruction
pcInstr |= (uint)(((imm >> 18) & 0xFFFFF) << 5);
@@ -500,6 +503,7 @@ private static unsafe void PutLoongArch64JIR(uint* pCode, long imm38)
pcInstr = *(pCode + 1);
+ pcInstr &= 0xFC0003FF; // keep bits 31-26, 9-0
// Assemble the pc-relative low 18 bits of 'imm38' into the jirl instruction
pcInstr |= (uint)(relOff << 10);
diff --git a/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Target_Wasm/WasmTypes.cs b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Target_Wasm/WasmTypes.cs
index da37129e4b501e..f226eed7b825fe 100644
--- a/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Target_Wasm/WasmTypes.cs
+++ b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Target_Wasm/WasmTypes.cs
@@ -4,6 +4,7 @@
using System;
using System.Diagnostics;
using System.Linq;
+
using ILCompiler.ObjectWriter;
using Internal.JitInterface;
diff --git a/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Target_Wasm/WasmTypeNode.cs b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/WasmTypeNode.cs
similarity index 96%
rename from src/coreclr/tools/Common/Compiler/DependencyAnalysis/Target_Wasm/WasmTypeNode.cs
rename to src/coreclr/tools/Common/Compiler/DependencyAnalysis/WasmTypeNode.cs
index 4ef0659e166c1a..20deff75fe2e83 100644
--- a/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Target_Wasm/WasmTypeNode.cs
+++ b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/WasmTypeNode.cs
@@ -2,10 +2,11 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System;
-using ILCompiler.DependencyAnalysis;
+
using ILCompiler.ObjectWriter;
+using ILCompiler.DependencyAnalysis.Wasm;
-namespace ILCompiler.DependencyAnalysis.Wasm
+namespace ILCompiler.DependencyAnalysis
{
//
// Represents a WASM type signature, e.g. "(i32, i32) -> (i64)". Used as a relocation target for things like 'call_indirect'.
diff --git a/src/coreclr/tools/Common/Compiler/ObjectWriter/ObjectWriter.cs b/src/coreclr/tools/Common/Compiler/ObjectWriter/ObjectWriter.cs
index 9f0f397a364a07..a4eca181141b60 100644
--- a/src/coreclr/tools/Common/Compiler/ObjectWriter/ObjectWriter.cs
+++ b/src/coreclr/tools/Common/Compiler/ObjectWriter/ObjectWriter.cs
@@ -12,11 +12,12 @@
using ILCompiler.DependencyAnalysisFramework;
using Internal.Text;
using Internal.TypeSystem;
+
using static ILCompiler.DependencyAnalysis.ObjectNode;
using static ILCompiler.DependencyAnalysis.RelocType;
using ObjectData = ILCompiler.DependencyAnalysis.ObjectNode.ObjectData;
+
using CodeDataLayout = CodeDataLayoutMode.CodeDataLayout;
-using ILCompiler.DependencyAnalysis.Wasm;
namespace ILCompiler.ObjectWriter
{
@@ -441,17 +442,27 @@ public virtual void EmitObject(Stream outputFileStream, IReadOnlyCollection _uniqueSignatures = new();
private Dictionary _uniqueSymbols = new();
- private int _signatureCount = 0;
private int _methodCount = 0;
private protected override void RecordMethodSignature(WasmTypeNode signature)
{
- int signatureIndex = _signatureCount;
var mangledNameBuilder = new Utf8StringBuilder();
signature.AppendMangledName(_nodeFactory.NameMangler, mangledNameBuilder);
Utf8String mangledName = mangledNameBuilder.ToUtf8String();
- // Note that we do not expect duplicates here, since crossgen's node cache should handle this and all nodes representing
- // identical signatures in a module should point to the same node instance
- _uniqueSignatures.Add(mangledName, signatureIndex);
- _signatureCount++;
+ // Note that we do not expect duplicates here, crossgen should deduplicate signatures already
+ // using the node cache, so we can simply add the new signature with the next available index.
+ _uniqueSignatures.Add(mangledName, _uniqueSignatures.Count);
}
- private protected override void RecordMethodDeclaration(ISymbolDefinitionNode symbol, MethodDesc desc)
+ private protected override void RecordMethodDeclaration(INodeWithTypeSignature node, MethodDesc desc)
{
WriteSignatureIndexForFunction(desc);
- _uniqueSymbols.Add(symbol.GetMangledName(_nodeFactory.NameMangler), _methodCount);
+ _uniqueSymbols.Add(node.GetMangledName(_nodeFactory.NameMangler), _methodCount);
_methodCount++;
}
diff --git a/src/coreclr/tools/Common/JitInterface/WasmLowering.cs b/src/coreclr/tools/Common/JitInterface/WasmLowering.cs
index 0db56ce99bd138..570f2d03c731f3 100644
--- a/src/coreclr/tools/Common/JitInterface/WasmLowering.cs
+++ b/src/coreclr/tools/Common/JitInterface/WasmLowering.cs
@@ -4,7 +4,9 @@
using System;
using System.Collections.Generic;
using System.Diagnostics;
+
using ILCompiler.DependencyAnalysis.Wasm;
+
using Internal.TypeSystem;
namespace Internal.JitInterface
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/NodeFactory.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/NodeFactory.cs
index 820cf40f5813be..44f752a62fc035 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/NodeFactory.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/NodeFactory.cs
@@ -7,6 +7,7 @@
using System.Diagnostics;
using System.Text;
+using ILCompiler.DependencyAnalysis.Wasm;
using ILCompiler.DependencyAnalysisFramework;
using Internal.IL;
@@ -613,6 +614,11 @@ private void CreateNodeCaches()
return new AnalysisCharacteristicNode(c);
});
+ _wasmTypeNodes = new NodeCache(key =>
+ {
+ return new WasmTypeNode(key);
+ });
+
NativeLayout = new NativeLayoutHelper(this);
}
@@ -1569,6 +1575,17 @@ public AnalysisCharacteristicNode AnalysisCharacteristic(string ch)
return _analysisCharacteristics.GetOrAdd(ch);
}
+ private NodeCache _wasmTypeNodes;
+
+ // TODO-Wasm: Do not use WasmFuncType directly as the key for better
+ // memory efficiency on lookup
+ public WasmTypeNode WasmTypeNode(MethodDesc desc)
+ {
+ // TODO-Wasm: Construct proper function type based on the passed in MethodDesc
+ // once we have defined lowering rules for signatures in NativeAOT.
+ throw new NotImplementedException("NAOT wasm type signature lowering not yet implemented");
+ }
+
///
/// Returns alternative symbol name that object writer should produce for given symbols
/// in addition to the regular one.
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/ILCompiler.Compiler.csproj b/src/coreclr/tools/aot/ILCompiler.Compiler/ILCompiler.Compiler.csproj
index fecf02a45a3c46..c4061f8c4af147 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/ILCompiler.Compiler.csproj
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/ILCompiler.Compiler.csproj
@@ -309,6 +309,8 @@
+
+
@@ -325,7 +327,6 @@
-
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DelayLoadHelperMethodImport.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DelayLoadHelperMethodImport.cs
index 4c73a0ab08bcab..980dfc535ceb88 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DelayLoadHelperMethodImport.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DelayLoadHelperMethodImport.cs
@@ -15,7 +15,7 @@ namespace ILCompiler.DependencyAnalysis.ReadyToRun
/// In addition to PrecodeHelperImport instances of this import type emit GC ref map
/// entries into the R2R executable.
///
- public class DelayLoadHelperMethodImport : DelayLoadHelperImport, IMethodNode
+ public class DelayLoadHelperMethodImport : DelayLoadHelperImport, IMethodCodeNodeWithTypeSignature
{
private readonly MethodWithToken _method;
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DelayLoadMethodImport.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DelayLoadMethodImport.cs
index dd0d49c8db19c2..3bdef8fb2f8be4 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DelayLoadMethodImport.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DelayLoadMethodImport.cs
@@ -9,7 +9,7 @@
namespace ILCompiler.DependencyAnalysis.ReadyToRun
{
- public class DelayLoadMethodImport : DelayLoadHelperImport, IMethodNode
+ public class DelayLoadMethodImport : DelayLoadHelperImport, IMethodCodeNodeWithTypeSignature
{
private readonly MethodWithGCInfo _localMethod;
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/MethodWithGCInfo.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/MethodWithGCInfo.cs
index aa3b9efa2f1526..0f65826a2c2d0e 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/MethodWithGCInfo.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/MethodWithGCInfo.cs
@@ -13,7 +13,7 @@
namespace ILCompiler.DependencyAnalysis.ReadyToRun
{
- public class MethodWithGCInfo : ObjectNode, IMethodBodyNode, ISymbolDefinitionNode
+ public class MethodWithGCInfo : ObjectNode, IMethodBodyNode, IMethodCodeNodeWithTypeSignature
{
public readonly MethodGCInfoNode GCInfoNode;
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/Target_Wasm/ImportThunk.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/Target_Wasm/ImportThunk.cs
index e84b9e8a4f381e..1f606488623bfa 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/Target_Wasm/ImportThunk.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/Target_Wasm/ImportThunk.cs
@@ -3,6 +3,7 @@
using System;
using System.Diagnostics;
+
using ILCompiler.DependencyAnalysis.Wasm;
namespace ILCompiler.DependencyAnalysis.ReadyToRun
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunCodegenNodeFactory.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunCodegenNodeFactory.cs
index b8fe7c2466f5f6..8a9602d24f51ef 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunCodegenNodeFactory.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunCodegenNodeFactory.cs
@@ -1096,5 +1096,13 @@ public WasmTypeNode WasmTypeNode(CorInfoWasmType[] types)
WasmFuncType funcType = WasmFuncType.FromCorInfoSignature(types);
return _wasmTypeNodes.GetOrAdd(funcType);
}
+
+ // TODO-Wasm: Do not use WasmFuncType directly as the key for better
+ // memory efficiency on lookup
+ public WasmTypeNode WasmTypeNode(MethodDesc method)
+ {
+ WasmFuncType funcType = WasmLowering.GetSignature(method);
+ return _wasmTypeNodes.GetOrAdd(funcType);
+ }
}
}
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/ILCompiler.ReadyToRun.csproj b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/ILCompiler.ReadyToRun.csproj
index 1d78e70b1f47a4..e57807fa2c0c1c 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/ILCompiler.ReadyToRun.csproj
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/ILCompiler.ReadyToRun.csproj
@@ -83,6 +83,7 @@
+
@@ -96,6 +97,7 @@
+
@@ -113,7 +115,6 @@
-
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
index 3b9259043aec10..c5e2d1f8ca40c3 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
@@ -24,6 +24,7 @@
using ILCompiler.DependencyAnalysis;
using ILCompiler.DependencyAnalysis.ReadyToRun;
using ILCompiler.DependencyAnalysis.Wasm;
+
using System.Text;
using System.Runtime.CompilerServices;
using ILCompiler.ReadyToRun.TypeSystem;
diff --git a/src/coreclr/tools/aot/ILCompiler.RyuJit/Compiler/DependencyAnalysis/MethodCodeNode.cs b/src/coreclr/tools/aot/ILCompiler.RyuJit/Compiler/DependencyAnalysis/MethodCodeNode.cs
index 7681e8c062dc27..64920d89a0b951 100644
--- a/src/coreclr/tools/aot/ILCompiler.RyuJit/Compiler/DependencyAnalysis/MethodCodeNode.cs
+++ b/src/coreclr/tools/aot/ILCompiler.RyuJit/Compiler/DependencyAnalysis/MethodCodeNode.cs
@@ -15,7 +15,7 @@
namespace ILCompiler.DependencyAnalysis
{
[DebuggerTypeProxy(typeof(MethodCodeNodeDebugView))]
- public class MethodCodeNode : ObjectNode, IMethodBodyNode, INodeWithCodeInfo, INodeWithDebugInfo, ISymbolDefinitionNode, ISpecialUnboxThunkNode
+ public class MethodCodeNode : ObjectNode, IMethodBodyNode, INodeWithCodeInfo, INodeWithDebugInfo, ISpecialUnboxThunkNode, IMethodCodeNodeWithTypeSignature
{
private MethodDesc _method;
private ObjectData _methodCode;
diff --git a/src/coreclr/utilcode/util.cpp b/src/coreclr/utilcode/util.cpp
index 4efc5bd767c77c..b85a788096c56b 100644
--- a/src/coreclr/utilcode/util.cpp
+++ b/src/coreclr/utilcode/util.cpp
@@ -2185,6 +2185,7 @@ void PutLoongArch64PC12(UINT32 * pCode, INT64 imm)
_ASSERTE((pcInstr & 0xFE000000) == 0x1a000000); // Must be pcalau12i
+ pcInstr &= 0xFE00001F; // keep bits 31-25, 4-0
// Assemble the pc-relative high 20 bits of 'imm' into the pcalau12i instruction
pcInstr |= (UINT32)((imm >> 7) & 0x1FFFFE0);
@@ -2192,6 +2193,7 @@ void PutLoongArch64PC12(UINT32 * pCode, INT64 imm)
pcInstr = *(pCode + 1);
+ pcInstr &= 0xFFC003FF; // keep bits 31-22, 9-0
// Assemble the pc-relative low 12 bits of 'imm' into the addid or ld instruction
pcInstr |= (UINT32)((imm & 0xFFF) << 10);
@@ -2218,6 +2220,7 @@ void PutLoongArch64JIR(UINT32 * pCode, INT64 imm38)
INT64 imm = imm38 + relOff;
relOff = (((imm & 0x1ffff) - relOff) >> 2) & 0xffff;
+ pcInstr &= 0xFE00001F; // keep bits 31-25, 4-0
// Assemble the pc-relative high 20 bits of 'imm38' into the pcaddu18i instruction
pcInstr |= (UINT32)(((imm >> 18) & 0xFFFFF) << 5);
@@ -2225,6 +2228,7 @@ void PutLoongArch64JIR(UINT32 * pCode, INT64 imm38)
pcInstr = *(pCode + 1);
+ pcInstr &= 0xFC0003FF; // keep bits 31-26, 9-0
// Assemble the pc-relative low 18 bits of 'imm38' into the jirl instruction
pcInstr |= (UINT32)(relOff << 10);
diff --git a/src/coreclr/vm/appdomain.cpp b/src/coreclr/vm/appdomain.cpp
index 88d6b1642b0f24..4b519cb48aa8e9 100644
--- a/src/coreclr/vm/appdomain.cpp
+++ b/src/coreclr/vm/appdomain.cpp
@@ -2537,15 +2537,15 @@ Assembly *AppDomain::LoadAssembly(FileLoadLock *pLock, FileLoadLevel targetLevel
}
CONTRACT_END;
- Assembly *pAssembly = pLock->GetAssembly();
-
// Make sure we release the lock on exit
FileLoadLockRefHolder lockRef(pLock);
// Do a quick out check for the already loaded case.
if (pLock->GetLoadLevel() >= targetLevel)
{
+ Assembly* pAssembly = pLock->GetAssembly();
_ASSERTE(pAssembly != nullptr);
+
pAssembly->ThrowIfError(targetLevel);
RETURN pAssembly;
@@ -2616,7 +2616,7 @@ Assembly *AppDomain::LoadAssembly(FileLoadLock *pLock, FileLoadLevel targetLevel
fileLoadLevelName[pLock->GetLoadLevel()]));
}
- pAssembly = pLock->GetAssembly();
+ Assembly* pAssembly = pLock->GetAssembly();
_ASSERTE(pAssembly != nullptr); // We should always be loading to at least FILE_LOAD_ALLOCATE, so the assembly should be created
// There may have been an error stored on the domain file by another thread, or from a previous load
diff --git a/src/coreclr/vm/comcache.h b/src/coreclr/vm/comcache.h
index 61059b9c9cec4b..2f02827f3879a2 100644
--- a/src/coreclr/vm/comcache.h
+++ b/src/coreclr/vm/comcache.h
@@ -262,6 +262,15 @@ struct InterfaceEntry
// will not try and optimize reads and writes to them.
Volatile m_pMT; // Interface asked for
Volatile m_pUnknown; // Result of query
+
+ friend struct ::cdac_data;
+};
+
+template<>
+struct cdac_data
+{
+ static constexpr size_t MethodTable = offsetof(InterfaceEntry, m_pMT);
+ static constexpr size_t Unknown = offsetof(InterfaceEntry, m_pUnknown);
};
class CtxEntryCacheTraits : public DefaultSHashTraits
diff --git a/src/coreclr/vm/datadescriptor/datadescriptor.inc b/src/coreclr/vm/datadescriptor/datadescriptor.inc
index 48a72a4040c40f..61b49340a39c2d 100644
--- a/src/coreclr/vm/datadescriptor/datadescriptor.inc
+++ b/src/coreclr/vm/datadescriptor/datadescriptor.inc
@@ -1115,12 +1115,19 @@ CDAC_TYPE_FIELD(RCW, /*pointer*/, NextRCW, cdac_data::NextRCW)
CDAC_TYPE_FIELD(RCW, /*uint32*/, Flags, cdac_data::Flags)
CDAC_TYPE_FIELD(RCW, /*pointer*/, CtxCookie, cdac_data::CtxCookie)
CDAC_TYPE_FIELD(RCW, /*pointer*/, CtxEntry, cdac_data::CtxEntry)
+CDAC_TYPE_FIELD(RCW, /*inline array*/, InterfaceEntries, cdac_data::InterfaceEntries)
CDAC_TYPE_END(RCW)
CDAC_TYPE_BEGIN(CtxEntry)
CDAC_TYPE_INDETERMINATE(CtxEntry)
CDAC_TYPE_FIELD(CtxEntry, /*pointer*/, STAThread, cdac_data::STAThread)
CDAC_TYPE_END(CtxEntry)
+
+CDAC_TYPE_BEGIN(InterfaceEntry)
+CDAC_TYPE_SIZE(sizeof(InterfaceEntry))
+CDAC_TYPE_FIELD(InterfaceEntry, /*pointer*/, MethodTable, cdac_data::MethodTable)
+CDAC_TYPE_FIELD(InterfaceEntry, /*pointer*/, Unknown, cdac_data::Unknown)
+CDAC_TYPE_END(InterfaceEntry)
#endif // FEATURE_COMINTEROP
#ifdef FEATURE_COMWRAPPERS
@@ -1306,6 +1313,7 @@ CDAC_GLOBAL_POINTER(TearOffAddRef, &g_cdacTearOffAddRef)
CDAC_GLOBAL_POINTER(TearOffAddRefSimple, &g_cdacTearOffAddRefSimple)
CDAC_GLOBAL_POINTER(TearOffAddRefSimpleInner, &g_cdacTearOffAddRefSimpleInner)
CDAC_GLOBAL_POINTER(RCWCleanupList, &g_pRCWCleanupList)
+CDAC_GLOBAL(RCWInterfaceCacheSize, uint32, INTERFACE_ENTRY_CACHE_SIZE)
#endif // FEATURE_COMINTEROP
// It is important for the subdescriptor pointers to be the last pointers in the global structure.
diff --git a/src/coreclr/vm/loongarch64/asmhelpers.S b/src/coreclr/vm/loongarch64/asmhelpers.S
index 9f424c39dd30f5..bd6b7beec16007 100644
--- a/src/coreclr/vm/loongarch64/asmhelpers.S
+++ b/src/coreclr/vm/loongarch64/asmhelpers.S
@@ -637,10 +637,12 @@ NESTED_ENTRY OnHijackTripThread, _TEXT, NoHandler
// save any integral return value(s)
st.d $a0, $sp, 88
st.d $a1, $sp, 96
+ // save async continuation return value
+ st.d $a2, $sp, 104
// save any FP return value(s)
- fst.d $f0, $sp, 104
- fst.d $f1, $sp, 112
+ fst.d $f0, $sp, 112
+ fst.d $f1, $sp, 120
ori $a0, $sp, 0
bl C_FUNC(OnHijackWorker)
@@ -650,10 +652,12 @@ NESTED_ENTRY OnHijackTripThread, _TEXT, NoHandler
// restore any integral return value(s)
ld.d $a0, $sp, 88
ld.d $a1, $sp, 96
+ // restore async continuation return value
+ ld.d $a2, $sp, 104
// restore any FP return value(s)
- fld.d $f0, $sp, 104
- fld.d $f1, $sp, 112
+ fld.d $f0, $sp, 112
+ fld.d $f1, $sp, 120
EPILOG_RESTORE_REG_PAIR 23, 24, 16
EPILOG_RESTORE_REG_PAIR 25, 26, 32
diff --git a/src/coreclr/vm/loongarch64/cgencpu.h b/src/coreclr/vm/loongarch64/cgencpu.h
index eb12a56d8dfd19..039a85d1782c02 100644
--- a/src/coreclr/vm/loongarch64/cgencpu.h
+++ b/src/coreclr/vm/loongarch64/cgencpu.h
@@ -429,6 +429,11 @@ struct HijackArgs
size_t ReturnValue[2];
};
union
+ {
+ DWORD64 A2;
+ size_t AsyncRet;
+ };
+ union
{
struct {
DWORD64 F0;
diff --git a/src/coreclr/vm/loongarch64/stubs.cpp b/src/coreclr/vm/loongarch64/stubs.cpp
index b974c9511f3a63..8f69312046cb30 100644
--- a/src/coreclr/vm/loongarch64/stubs.cpp
+++ b/src/coreclr/vm/loongarch64/stubs.cpp
@@ -474,9 +474,11 @@ void HijackFrame::UpdateRegDisplay_Impl(const PREGDISPLAY pRD, bool updateFloats
pRD->pCurrentContext->A0 = m_Args->A0;
pRD->pCurrentContext->A1 = m_Args->A1;
+ pRD->pCurrentContext->A2 = m_Args->A2;
pRD->volatileCurrContextPointers.A0 = &m_Args->A0;
pRD->volatileCurrContextPointers.A1 = &m_Args->A1;
+ pRD->volatileCurrContextPointers.A2 = &m_Args->A2;
pRD->pCurrentContext->S0 = m_Args->S0;
pRD->pCurrentContext->S1 = m_Args->S1;
diff --git a/src/coreclr/vm/prestub.cpp b/src/coreclr/vm/prestub.cpp
index b46b1bfefc7b97..fafa016af69e3c 100644
--- a/src/coreclr/vm/prestub.cpp
+++ b/src/coreclr/vm/prestub.cpp
@@ -2405,6 +2405,11 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT, CallerGCMode callerGCMo
void* ilStubInterpData = PortableEntryPoint::GetInterpreterData(pCode);
_ASSERTE(ilStubInterpData != NULL);
SetInterpreterCode((InterpByteCodeStart*)ilStubInterpData);
+
+ // Use this method's own PortableEntryPoint rather than the stub's.
+ // It is required to maintain 1:1 mapping between MethodDesc and its entrypoint.
+ pCode = GetPortableEntryPoint();
+ PortableEntryPoint::SetInterpreterData(pCode, (PCODE)(TADDR)ilStubInterpData);
SetCodeEntryPoint(pCode);
#else // !FEATURE_PORTABLE_ENTRYPOINTS
if (!GetOrCreatePrecode()->SetTargetInterlocked(pStub->GetEntryPoint()))
diff --git a/src/coreclr/vm/runtimecallablewrapper.h b/src/coreclr/vm/runtimecallablewrapper.h
index 54ad924248ffe8..fee5c74177ee78 100644
--- a/src/coreclr/vm/runtimecallablewrapper.h
+++ b/src/coreclr/vm/runtimecallablewrapper.h
@@ -542,7 +542,6 @@ private :
// IUnkEntry needs to access m_UnkEntry field
friend IUnkEntry;
- // cdac_data needs access to m_UnkEntry
friend struct ::cdac_data;
private :
@@ -591,6 +590,7 @@ struct cdac_data
static constexpr size_t Flags = offsetof(RCW, m_Flags);
static constexpr size_t CtxCookie = offsetof(RCW, m_UnkEntry) + offsetof(IUnkEntry, m_pCtxCookie);
static constexpr size_t CtxEntry = offsetof(RCW, m_UnkEntry) + offsetof(IUnkEntry, m_pCtxEntry);
+ static constexpr size_t InterfaceEntries = offsetof(RCW, m_aInterfaceEntries);
};
inline RCW::CreationFlags operator|(RCW::CreationFlags lhs, RCW::CreationFlags rhs)
diff --git a/src/libraries/Common/src/SourceGenerators/DiagnosticInfo.cs b/src/libraries/Common/src/SourceGenerators/DiagnosticInfo.cs
deleted file mode 100644
index 74f44f99c62baa..00000000000000
--- a/src/libraries/Common/src/SourceGenerators/DiagnosticInfo.cs
+++ /dev/null
@@ -1,60 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System;
-using System.Linq;
-using System.Numerics.Hashing;
-using Microsoft.CodeAnalysis;
-
-namespace SourceGenerators;
-
-///
-/// Descriptor for diagnostic instances using structural equality comparison.
-/// Provides a work-around for https://github.com/dotnet/roslyn/issues/68291.
-///
-internal readonly struct DiagnosticInfo : IEquatable
-{
- public DiagnosticDescriptor Descriptor { get; private init; }
- public object?[] MessageArgs { get; private init; }
- public Location? Location { get; private init; }
-
- public static DiagnosticInfo Create(DiagnosticDescriptor descriptor, Location? location, object?[]? messageArgs)
- {
- Location? trimmedLocation = location is null ? null : GetTrimmedLocation(location);
-
- return new DiagnosticInfo
- {
- Descriptor = descriptor,
- Location = trimmedLocation,
- MessageArgs = messageArgs ?? Array.Empty
-
+
-
+
? Diagnostics { get; private set; }
+ public List? Diagnostics { get; private set; }
public SourceGenerationSpec? GetSourceGenerationSpec(ImmutableArray invocations, CancellationToken cancellationToken)
{
if (!_langVersionIsSupported)
{
- RecordDiagnostic(DiagnosticDescriptors.LanguageVersionNotSupported, trimmedLocation: Location.None);
+ RecordDiagnostic(DiagnosticDescriptors.LanguageVersionNotSupported, location: Location.None);
return null;
}
@@ -979,10 +979,10 @@ private void ReportContainingTypeDiagnosticIfRequired(TypeParseInfo typeParseInf
}
}
- private void RecordDiagnostic(DiagnosticDescriptor descriptor, Location trimmedLocation, params object?[]? messageArgs)
+ private void RecordDiagnostic(DiagnosticDescriptor descriptor, Location location, params object?[]? messageArgs)
{
- Diagnostics ??= new List();
- Diagnostics.Add(DiagnosticInfo.Create(descriptor, trimmedLocation, messageArgs));
+ Diagnostics ??= new List();
+ Diagnostics.Add(Diagnostic.Create(descriptor, location, messageArgs));
}
private void CheckIfToEmitParseEnumMethod()
diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.cs
index 4a3d5bbf7dea81..816bdff43f5c40 100644
--- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.cs
+++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.cs
@@ -6,10 +6,10 @@
using System.Diagnostics;
using System.Reflection;
using System.Threading;
+using System.Collections.Immutable;
using Microsoft.CodeAnalysis;
using Microsoft.CodeAnalysis.CSharp;
using Microsoft.CodeAnalysis.CSharp.Syntax;
-using SourceGenerators;
namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration
{
@@ -37,7 +37,7 @@ public void Initialize(IncrementalGeneratorInitializationContext context)
? new CompilationData((CSharpCompilation)compilation)
: null);
- IncrementalValueProvider<(SourceGenerationSpec?, ImmutableEquatableArray?)> genSpec = context.SyntaxProvider
+ IncrementalValueProvider<(SourceGenerationSpec?, ImmutableArray)> genSpec = context.SyntaxProvider
.CreateSyntaxProvider(
(node, _) => BinderInvocation.IsCandidateSyntaxNode(node),
BinderInvocation.Create)
@@ -48,14 +48,16 @@ public void Initialize(IncrementalGeneratorInitializationContext context)
{
if (tuple.Right is not CompilationData compilationData)
{
- return (null, null);
+ return (null, ImmutableArray.Empty);
}
try
{
Parser parser = new(compilationData);
SourceGenerationSpec? spec = parser.GetSourceGenerationSpec(tuple.Left, cancellationToken);
- ImmutableEquatableArray? diagnostics = parser.Diagnostics?.ToImmutableEquatableArray();
+ ImmutableArray diagnostics = parser.Diagnostics is { } diags
+ ? diags.ToImmutableArray()
+ : ImmutableArray.Empty;
return (spec, diagnostics);
}
catch (Exception ex)
@@ -65,7 +67,26 @@ public void Initialize(IncrementalGeneratorInitializationContext context)
})
.WithTrackingName(GenSpecTrackingName);
- context.RegisterSourceOutput(genSpec, ReportDiagnosticsAndEmitSource);
+ // Project the combined pipeline result to just the equatable model, discarding diagnostics.
+ // SourceGenerationSpec implements value equality, so Roslyn's Select operator will compare
+ // successive model snapshots and only propagate changes downstream when the model structurally
+ // differs. This ensures source generation is fully incremental: re-emitting code only when
+ // the binding spec actually changes, not on every keystroke or positional shift.
+ IncrementalValueProvider sourceGenerationSpec =
+ genSpec.Select(static (t, _) => t.Item1);
+
+ context.RegisterSourceOutput(sourceGenerationSpec, EmitSource);
+
+ // Project to just the diagnostics, discarding the model. ImmutableArray does not
+ // implement value equality, so Roslyn's incremental pipeline uses reference equality for these
+ // values — the callback fires on every compilation change. This is by design: diagnostic
+ // emission is cheap, and we need fresh SourceLocation instances that are pragma-suppressible
+ // (cf. https://github.com/dotnet/runtime/issues/92509).
+ // No source code is generated from this pipeline — it exists solely to report diagnostics.
+ IncrementalValueProvider> diagnostics =
+ genSpec.Select(static (t, _) => t.Item2);
+
+ context.RegisterSourceOutput(diagnostics, EmitDiagnostics);
if (!s_hasInitializedInterceptorVersion)
{
@@ -136,17 +157,17 @@ internal static int DetermineInterceptableVersion()
///
public Action? OnSourceEmitting { get; init; }
- private void ReportDiagnosticsAndEmitSource(SourceProductionContext sourceProductionContext, (SourceGenerationSpec? SourceGenerationSpec, ImmutableEquatableArray? Diagnostics) input)
+ private static void EmitDiagnostics(SourceProductionContext context, ImmutableArray diagnostics)
{
- if (input.Diagnostics is ImmutableEquatableArray diagnostics)
+ foreach (Diagnostic diagnostic in diagnostics)
{
- foreach (DiagnosticInfo diagnostic in diagnostics)
- {
- sourceProductionContext.ReportDiagnostic(diagnostic.CreateDiagnostic());
- }
+ context.ReportDiagnostic(diagnostic);
}
+ }
- if (input.SourceGenerationSpec is SourceGenerationSpec spec)
+ private void EmitSource(SourceProductionContext sourceProductionContext, SourceGenerationSpec? spec)
+ {
+ if (spec is not null)
{
OnSourceEmitting?.Invoke(spec);
Emitter emitter = new(spec);
diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Microsoft.Extensions.Configuration.Binder.SourceGeneration.csproj b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Microsoft.Extensions.Configuration.Binder.SourceGeneration.csproj
index bf12a1fc225b81..a0fac6dbbfb626 100644
--- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Microsoft.Extensions.Configuration.Binder.SourceGeneration.csproj
+++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Microsoft.Extensions.Configuration.Binder.SourceGeneration.csproj
@@ -30,7 +30,6 @@
-
diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.cs
index 7eee48fdeb28ca..e15f9625a971a1 100644
--- a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.cs
+++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.cs
@@ -12,6 +12,7 @@
using System.Threading.Tasks;
using Microsoft.CodeAnalysis;
using Microsoft.CodeAnalysis.CSharp;
+using Microsoft.CodeAnalysis.Diagnostics;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Options;
@@ -414,5 +415,129 @@ public class AnotherGraphWithUnsupportedMembers
Assert.True(result.Diagnostics.Any(diag => diag.Id == Diagnostics.TypeNotSupported.Id));
Assert.True(result.Diagnostics.Any(diag => diag.Id == Diagnostics.PropertyNotSupported.Id));
}
+
+ [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsNetCore))]
+ public async Task Diagnostic_HasPragmaSuppressibleLocation()
+ {
+ // SYSLIB1103: ValueTypesInvalidForBind (Warning, configurable).
+ string source = """
+ #pragma warning disable SYSLIB1103
+ using System;
+ using Microsoft.Extensions.Configuration;
+
+ public class Program
+ {
+ public static void Main()
+ {
+ ConfigurationBuilder configurationBuilder = new();
+ IConfigurationRoot config = configurationBuilder.Build();
+
+ int myInt = 1;
+ config.Bind(myInt);
+ }
+ }
+ """;
+
+ ConfigBindingGenRunResult result = await RunGeneratorAndUpdateCompilation(source);
+ var effective = CompilationWithAnalyzers.GetEffectiveDiagnostics(result.Diagnostics, result.OutputCompilation);
+ Diagnostic diagnostic = Assert.Single(effective, d => d.Id == "SYSLIB1103");
+ Assert.True(diagnostic.IsSuppressed);
+ }
+
+ [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsNetCore))]
+ public async Task Diagnostic_NoPragma_IsNotSuppressed()
+ {
+ string source = """
+ using System;
+ using Microsoft.Extensions.Configuration;
+
+ public class Program
+ {
+ public static void Main()
+ {
+ ConfigurationBuilder configurationBuilder = new();
+ IConfigurationRoot config = configurationBuilder.Build();
+
+ int myInt = 1;
+ config.Bind(myInt);
+ }
+ }
+ """;
+
+ ConfigBindingGenRunResult result = await RunGeneratorAndUpdateCompilation(source);
+ var effective = CompilationWithAnalyzers.GetEffectiveDiagnostics(result.Diagnostics, result.OutputCompilation);
+ Diagnostic diagnostic = Assert.Single(effective, d => d.Id == "SYSLIB1103");
+ Assert.False(diagnostic.IsSuppressed);
+ }
+
+ [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsNetCore))]
+ public async Task Diagnostic_MultipleDiagnostics_OnlySomeSuppressed()
+ {
+ string source = """
+ using System;
+ using System.Collections.Immutable;
+ using System.Text;
+ using System.Text.Json;
+ using Microsoft.Extensions.Configuration;
+
+ public class Program
+ {
+ public static void Main()
+ {
+ ConfigurationBuilder configurationBuilder = new();
+ IConfigurationRoot config = configurationBuilder.Build();
+
+ // SYSLIB1103 suppressed for this call only.
+ #pragma warning disable SYSLIB1103
+ int myInt = 1;
+ config.Bind(myInt);
+ #pragma warning restore SYSLIB1103
+
+ // SYSLIB1103 NOT suppressed for this call.
+ long myLong = 1;
+ config.Bind(myLong);
+ }
+ }
+ """;
+
+ ConfigBindingGenRunResult result = await RunGeneratorAndUpdateCompilation(source);
+ var effective = CompilationWithAnalyzers.GetEffectiveDiagnostics(result.Diagnostics, result.OutputCompilation)
+ .Where(d => d.Id == "SYSLIB1103")
+ .ToList();
+
+ Assert.Equal(2, effective.Count);
+ Assert.Single(effective, d => d.IsSuppressed);
+ Assert.Single(effective, d => !d.IsSuppressed);
+ }
+
+ [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsNetCore))]
+ public async Task Diagnostic_PragmaRestoreOutsideSpan_IsNotSuppressed()
+ {
+ string source = """
+ using System;
+ using Microsoft.Extensions.Configuration;
+
+ public class Program
+ {
+ public static void Main()
+ {
+ ConfigurationBuilder configurationBuilder = new();
+ IConfigurationRoot config = configurationBuilder.Build();
+
+ // Suppress and restore BEFORE the diagnostic site.
+ #pragma warning disable SYSLIB1103
+ #pragma warning restore SYSLIB1103
+
+ int myInt = 1;
+ config.Bind(myInt);
+ }
+ }
+ """;
+
+ ConfigBindingGenRunResult result = await RunGeneratorAndUpdateCompilation(source);
+ var effective = CompilationWithAnalyzers.GetEffectiveDiagnostics(result.Diagnostics, result.OutputCompilation);
+ Diagnostic diagnostic = Assert.Single(effective, d => d.Id == "SYSLIB1103");
+ Assert.False(diagnostic.IsSuppressed);
+ }
}
}
diff --git a/src/libraries/Microsoft.Extensions.Logging.Abstractions/gen/LoggerMessageGenerator.Parser.cs b/src/libraries/Microsoft.Extensions.Logging.Abstractions/gen/LoggerMessageGenerator.Parser.cs
index c69c2db2c07e85..b98abaf7d04c99 100644
--- a/src/libraries/Microsoft.Extensions.Logging.Abstractions/gen/LoggerMessageGenerator.Parser.cs
+++ b/src/libraries/Microsoft.Extensions.Logging.Abstractions/gen/LoggerMessageGenerator.Parser.cs
@@ -38,7 +38,7 @@ internal sealed class Parser
private readonly INamedTypeSymbol _stringSymbol;
private readonly Action? _reportDiagnostic;
- public List Diagnostics { get; } = new();
+ public List Diagnostics { get; } = new();
public Parser(
INamedTypeSymbol loggerMessageAttribute,
@@ -811,12 +811,14 @@ private static string GenerateClassName(TypeDeclarationSyntax typeDeclaration)
private void Diag(DiagnosticDescriptor desc, Location? location, params object?[]? messageArgs)
{
+ Diagnostic diagnostic = Diagnostic.Create(desc, location, messageArgs);
+
// Report immediately if callback is provided (preserves pragma suppression with original locations)
- _reportDiagnostic?.Invoke(Diagnostic.Create(desc, location, messageArgs));
+ _reportDiagnostic?.Invoke(diagnostic);
// Also collect for scenarios that need the diagnostics list; in Roslyn 4.0+ incremental generators,
- // this list is exposed via parser.Diagnostics (as ImmutableEquatableArray) and reported in Execute.
- Diagnostics.Add(DiagnosticInfo.Create(desc, location, messageArgs));
+ // this list is exposed via parser.Diagnostics and reported in the diagnostic pipeline.
+ Diagnostics.Add(diagnostic);
}
private static bool IsBaseOrIdentity(ITypeSymbol source, ITypeSymbol dest, Compilation compilation)
diff --git a/src/libraries/Microsoft.Extensions.Logging.Abstractions/gen/LoggerMessageGenerator.Roslyn4.0.cs b/src/libraries/Microsoft.Extensions.Logging.Abstractions/gen/LoggerMessageGenerator.Roslyn4.0.cs
index ef9fa6582b53b3..9a205f873a6d00 100644
--- a/src/libraries/Microsoft.Extensions.Logging.Abstractions/gen/LoggerMessageGenerator.Roslyn4.0.cs
+++ b/src/libraries/Microsoft.Extensions.Logging.Abstractions/gen/LoggerMessageGenerator.Roslyn4.0.cs
@@ -25,7 +25,7 @@ public static class StepNames
public void Initialize(IncrementalGeneratorInitializationContext context)
{
- IncrementalValuesProvider<(LoggerClassSpec? LoggerClassSpec, ImmutableEquatableArray Diagnostics, bool HasStringCreate)> loggerClasses = context.SyntaxProvider
+ IncrementalValuesProvider<(LoggerClassSpec? LoggerClassSpec, ImmutableArray Diagnostics, bool HasStringCreate)> loggerClasses = context.SyntaxProvider
.ForAttributeWithMetadataName(
#if !ROSLYN4_4_OR_GREATER
context,
@@ -66,7 +66,7 @@ public void Initialize(IncrementalGeneratorInitializationContext context)
if (exceptionSymbol == null)
{
- var diagnostics = new[] { DiagnosticInfo.Create(DiagnosticDescriptors.MissingRequiredType, null, new object?[] { "System.Exception" }) }.ToImmutableEquatableArray();
+ var diagnostics = ImmutableArray.Create(Diagnostic.Create(DiagnosticDescriptors.MissingRequiredType, null, new object?[] { "System.Exception" }));
return (null, diagnostics, false);
}
@@ -92,75 +92,110 @@ public void Initialize(IncrementalGeneratorInitializationContext context)
// Convert to immutable spec for incremental caching
LoggerClassSpec? loggerClassSpec = logClasses.Count > 0 ? logClasses[0].ToSpec() : null;
- return (loggerClassSpec, parser.Diagnostics.ToImmutableEquatableArray(), hasStringCreate);
+ return (loggerClassSpec, parser.Diagnostics.ToImmutableArray(), hasStringCreate);
})
#if ROSLYN4_4_OR_GREATER
.WithTrackingName(StepNames.LoggerMessageTransform)
#endif
;
- context.RegisterSourceOutput(loggerClasses.Collect(), static (spc, items) => Execute(items, spc));
+ // Single collect for all per-method results, then aggregate into an equatable source
+ // model (using ImmutableEquatableArray for deep value equality) plus flat diagnostics.
+ // Diagnostics are deduplicated here because each attributed method triggers parsing of
+ // the entire class, producing duplicate diagnostics.
+ IncrementalValueProvider<(ImmutableEquatableArray<(LoggerClassSpec LoggerClassSpec, bool HasStringCreate)> Specs, ImmutableArray Diagnostics)> collected =
+ loggerClasses.Collect().Select(static (items, _) =>
+ {
+ ImmutableArray<(LoggerClassSpec, bool)>.Builder? specs = null;
+ ImmutableArray.Builder? diagnostics = null;
+ HashSet<(string Id, TextSpan? Span, string? FilePath, string Message)>? seen = null;
+
+ foreach (var item in items)
+ {
+ if (item.LoggerClassSpec is not null)
+ {
+ (specs ??= ImmutableArray.CreateBuilder<(LoggerClassSpec, bool)>()).Add((item.LoggerClassSpec, item.HasStringCreate));
+ }
+ foreach (Diagnostic diagnostic in item.Diagnostics)
+ {
+ if ((seen ??= new()).Add((diagnostic.Id, diagnostic.Location?.SourceSpan, diagnostic.Location?.SourceTree?.FilePath, diagnostic.GetMessage())))
+ {
+ (diagnostics ??= ImmutableArray.CreateBuilder()).Add(diagnostic);
+ }
+ }
+ }
+
+ return (
+ specs?.ToImmutableEquatableArray() ?? ImmutableEquatableArray<(LoggerClassSpec, bool)>.Empty,
+ diagnostics?.ToImmutable() ?? ImmutableArray.Empty);
+ });
+
+ // Project to just the equatable source model, discarding diagnostics.
+ // ImmutableEquatableArray provides deep value equality, so Roslyn's Select operator
+ // compares successive model snapshots and only propagates changes downstream when the
+ // model structurally differs. This ensures source generation is fully incremental.
+ IncrementalValueProvider> sourceGenerationSpecs =
+ collected.Select(static (t, _) => t.Specs);
+
+ context.RegisterSourceOutput(sourceGenerationSpecs, static (spc, items) => EmitSource(items, spc));
+
+ // Project to just the diagnostics, discarding the model. ImmutableArray does not
+ // implement value equality, so Roslyn's incremental pipeline uses reference equality for these
+ // values — the callback fires on every compilation change. This is by design: diagnostic
+ // emission is cheap, and we need fresh SourceLocation instances that are pragma-suppressible
+ // (cf. https://github.com/dotnet/runtime/issues/92509).
+ IncrementalValueProvider> diagnosticResults =
+ collected.Select(static (t, _) => t.Diagnostics);
+
+ context.RegisterSourceOutput(diagnosticResults, EmitDiagnostics);
+ }
+
+ private static void EmitDiagnostics(SourceProductionContext context, ImmutableArray diagnostics)
+ {
+ foreach (Diagnostic diagnostic in diagnostics)
+ {
+ context.ReportDiagnostic(diagnostic);
+ }
}
- private static void Execute(ImmutableArray<(LoggerClassSpec? LoggerClassSpec, ImmutableEquatableArray Diagnostics, bool HasStringCreate)> items, SourceProductionContext context)
+ private static void EmitSource(ImmutableEquatableArray<(LoggerClassSpec LoggerClassSpec, bool HasStringCreate)> items, SourceProductionContext context)
{
- if (items.IsDefaultOrEmpty)
+ if (items.Count == 0)
{
return;
}
bool hasStringCreate = false;
- var allLogClasses = new Dictionary(); // Use dictionary to deduplicate by class key
- var reportedDiagnostics = new HashSet(); // Track reported diagnostics to avoid duplicates
+ var allLogClasses = new Dictionary(); // Deduplicate by class key
foreach (var item in items)
{
- // Report diagnostics (note: pragma suppression doesn't work with trimmed locations - known Roslyn limitation)
- // Use HashSet to deduplicate - each attributed method triggers parsing of entire class, producing duplicate diagnostics
- if (item.Diagnostics is not null)
+ hasStringCreate |= item.HasStringCreate;
+
+ // Build unique key including parent class chain to handle nested classes
+ string classKey = BuildClassKey(item.LoggerClassSpec);
+
+ // Each attributed method in a partial class file produces the same LoggerClassSpec with all methods in that file.
+ // However, different partial class files produce different LoggerClassSpecs with different methods. Merge them.
+ if (!allLogClasses.TryGetValue(classKey, out LoggerClass? existingClass))
{
- foreach (var diagnostic in item.Diagnostics)
- {
- if (reportedDiagnostics.Add(diagnostic))
- {
- context.ReportDiagnostic(diagnostic.CreateDiagnostic());
- }
- }
+ allLogClasses[classKey] = FromSpec(item.LoggerClassSpec);
}
-
- if (item.LoggerClassSpec != null)
+ else
{
- hasStringCreate |= item.HasStringCreate;
-
- // Build unique key including parent class chain to handle nested classes
- string classKey = BuildClassKey(item.LoggerClassSpec);
+ var newClass = FromSpec(item.LoggerClassSpec);
- // Each attributed method in a partial class file produces the same LoggerClassSpec with all methods in that file.
- // However, different partial class files (e.g., LevelTestExtensions.cs and LevelTestExtensions.WithDiagnostics.cs)
- // produce different LoggerClassSpecs with different methods. Merge them.
- if (!allLogClasses.TryGetValue(classKey, out LoggerClass? existingClass))
+ var existingMethodKeys = new HashSet<(string Name, int EventId)>();
+ foreach (var method in existingClass.Methods)
{
- allLogClasses[classKey] = FromSpec(item.LoggerClassSpec);
+ existingMethodKeys.Add((method.Name, method.EventId));
}
- else
- {
- // Merge methods from different partial class files
- var newClass = FromSpec(item.LoggerClassSpec);
-
- // Use HashSet for O(1) lookup to avoid O(N×M) complexity
- var existingMethodKeys = new HashSet<(string Name, int EventId)>();
- foreach (var method in existingClass.Methods)
- {
- existingMethodKeys.Add((method.Name, method.EventId));
- }
- foreach (var method in newClass.Methods)
+ foreach (var method in newClass.Methods)
+ {
+ if (existingMethodKeys.Add((method.Name, method.EventId)))
{
- // Only add methods that don't already exist (avoid duplicates from same file)
- if (existingMethodKeys.Add((method.Name, method.EventId)))
- {
- existingClass.Methods.Add(method);
- }
+ existingClass.Methods.Add(method);
}
}
}
diff --git a/src/libraries/Microsoft.Extensions.Logging.Abstractions/gen/Microsoft.Extensions.Logging.Generators.targets b/src/libraries/Microsoft.Extensions.Logging.Abstractions/gen/Microsoft.Extensions.Logging.Generators.targets
index f1a42f8831ecfa..096dc2f89a3709 100644
--- a/src/libraries/Microsoft.Extensions.Logging.Abstractions/gen/Microsoft.Extensions.Logging.Generators.targets
+++ b/src/libraries/Microsoft.Extensions.Logging.Abstractions/gen/Microsoft.Extensions.Logging.Generators.targets
@@ -25,7 +25,6 @@
-
diff --git a/src/libraries/Microsoft.Extensions.Logging.Abstractions/tests/Microsoft.Extensions.Logging.Generators.Tests/LoggerMessageGeneratorParserTests.cs b/src/libraries/Microsoft.Extensions.Logging.Abstractions/tests/Microsoft.Extensions.Logging.Generators.Tests/LoggerMessageGeneratorParserTests.cs
index a6cc6db209297d..a6bef486f6de2f 100644
--- a/src/libraries/Microsoft.Extensions.Logging.Abstractions/tests/Microsoft.Extensions.Logging.Generators.Tests/LoggerMessageGeneratorParserTests.cs
+++ b/src/libraries/Microsoft.Extensions.Logging.Abstractions/tests/Microsoft.Extensions.Logging.Generators.Tests/LoggerMessageGeneratorParserTests.cs
@@ -10,6 +10,7 @@
using System.Threading.Tasks;
using Microsoft.CodeAnalysis;
using Microsoft.CodeAnalysis.CSharp;
+using Microsoft.CodeAnalysis.Diagnostics;
using SourceGenerators.Tests;
using Xunit;
@@ -1426,5 +1427,35 @@ private static async Task> RunGenerator(
return d;
}
+
+ [Fact]
+ public async Task Diagnostic_HasPragmaSuppressibleLocation()
+ {
+ // SYSLIB1017: MissingLogLevel (Error, but not NotConfigurable).
+ string code = """
+ #pragma warning disable SYSLIB1017
+ using Microsoft.Extensions.Logging;
+
+ namespace Test
+ {
+ partial class C
+ {
+ [LoggerMessage(EventId = 0, Message = "M1")]
+ static partial void M1(ILogger logger);
+ }
+ }
+ """;
+
+ Assembly[] refs = new[] { typeof(ILogger).Assembly, typeof(LoggerMessageAttribute).Assembly };
+ using var workspace = RoslynTestUtils.CreateTestWorkspace();
+ Project proj = RoslynTestUtils.CreateTestProject(workspace, refs)
+ .WithDocuments(new[] { code });
+ Assert.True(proj.Solution.Workspace.TryApplyChanges(proj.Solution));
+ Compilation comp = (await proj.GetCompilationAsync().ConfigureAwait(false))!;
+ var (diags, _) = RoslynTestUtils.RunGenerator(comp, new LoggerMessageGenerator());
+ var effective = CompilationWithAnalyzers.GetEffectiveDiagnostics(diags, comp);
+ Diagnostic diagnostic = Assert.Single(effective, d => d.Id == "SYSLIB1017");
+ Assert.True(diagnostic.IsSuppressed);
+ }
}
}
diff --git a/src/libraries/System.Net.Ping/src/System/Net/NetworkInformation/Ping.Windows.cs b/src/libraries/System.Net.Ping/src/System/Net/NetworkInformation/Ping.Windows.cs
index 0b04636ac4f71c..d63c19b44d44d2 100644
--- a/src/libraries/System.Net.Ping/src/System/Net/NetworkInformation/Ping.Windows.cs
+++ b/src/libraries/System.Net.Ping/src/System/Net/NetworkInformation/Ping.Windows.cs
@@ -344,21 +344,21 @@ private static PingReply CreatePingReplyFromIcmpEchoReply(in Interop.IpHlpApi.IC
IPAddress address = new IPAddress(reply.address);
IPStatus ipStatus = GetStatusFromCode((int)reply.status);
- long rtt;
+ // The ICMP_ECHO_REPLY RoundTripTime field is always populated by the OS
+ // for any received reply, regardless of status (e.g. TTL expired, unreachable).
+ long rtt = reply.roundTripTime;
PingOptions? options;
byte[] buffer;
if (ipStatus == IPStatus.Success)
{
// Only copy the data if we succeed w/ the ping operation.
- rtt = reply.roundTripTime;
options = new PingOptions(reply.options.ttl, (reply.options.flags & DontFragmentFlag) > 0);
buffer = new byte[reply.dataSize];
Marshal.Copy(reply.data, buffer, 0, reply.dataSize);
}
else
{
- rtt = 0;
options = null;
buffer = Array.Empty();
}
@@ -371,19 +371,19 @@ private static PingReply CreatePingReplyFromIcmp6EchoReply(in Interop.IpHlpApi.I
IPAddress address = new IPAddress(reply.Address.Address, reply.Address.ScopeID);
IPStatus ipStatus = GetStatusFromCode((int)reply.Status);
- long rtt;
+ // The ICMPV6_ECHO_REPLY RoundTripTime field is always populated by the OS
+ // for any received reply, regardless of status (e.g. TTL expired, unreachable).
+ long rtt = reply.RoundTripTime;
byte[] buffer;
if (ipStatus == IPStatus.Success)
{
// Only copy the data if we succeed w/ the ping operation.
- rtt = reply.RoundTripTime;
buffer = new byte[sendSize];
Marshal.Copy(dataPtr + 36, buffer, 0, sendSize);
}
else
{
- rtt = 0;
buffer = Array.Empty();
}
diff --git a/src/libraries/System.Net.Ping/tests/FunctionalTests/PingTest.cs b/src/libraries/System.Net.Ping/tests/FunctionalTests/PingTest.cs
index 5265b001010ec9..900302a3fa9517 100644
--- a/src/libraries/System.Net.Ping/tests/FunctionalTests/PingTest.cs
+++ b/src/libraries/System.Net.Ping/tests/FunctionalTests/PingTest.cs
@@ -787,6 +787,62 @@ public async Task SendPingToExternalHostWithLowTtlTest()
Assert.NotEqual(IPAddress.Any, pingReply.Address);
}
+ [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsMultithreadingSupported))]
+ [OuterLoop] // Depends on external host
+ public async Task SendPingWithLowTtl_RoundtripTimeIsNonZero()
+ {
+ // Regression test: non-Success replies (e.g. TtlExpired) should preserve the
+ // round-trip time from the ICMP reply, not hardcode it to 0.
+ if (UsesPingUtility)
+ {
+ throw new SkipTestException("Test is only applicable to the IcmpSendEcho code path.");
+ }
+
+ string host = System.Net.Test.Common.Configuration.Ping.PingHost;
+ PingOptions options = new PingOptions();
+ byte[] payload = TestSettings.PayloadAsBytesShort;
+
+ using Ping ping = new Ping();
+
+ // Verify host is reachable first.
+ bool reachable = false;
+ for (int i = 0; i < s_pingcount; i++)
+ {
+ PingReply checkReply = await ping.SendPingAsync(host, TestSettings.PingTimeout, payload);
+ if (checkReply.Status == IPStatus.Success)
+ {
+ reachable = true;
+ break;
+ }
+ }
+ if (!reachable)
+ {
+ throw new SkipTestException($"Host {host} is not reachable. Skipping test.");
+ }
+
+ // RTT can legitimately be 0ms on very fast networks, so retry a few times
+ // and assert that at least one reply reports a non-zero RTT.
+ options.Ttl = 1;
+ bool gotNonZeroRtt = false;
+ for (int attempt = 0; attempt < 3; attempt++)
+ {
+ PingReply pingReply = await ping.SendPingAsync(host, TestSettings.PingTimeout, payload, options);
+
+ Assert.True(
+ pingReply.Status == IPStatus.TimeExceeded || pingReply.Status == IPStatus.TtlExpired,
+ $"pingReply.Status was {pingReply.Status} instead of TimeExceeded or TtlExpired");
+
+ if (pingReply.RoundtripTime > 0)
+ {
+ gotNonZeroRtt = true;
+ break;
+ }
+ }
+
+ Assert.True(gotNonZeroRtt,
+ "Expected at least one TtlExpired reply with non-zero RoundtripTime across 3 attempts");
+ }
+
private async Task Ping_TimedOut_Core(Func> sendPing)
{
Ping sender = new Ping();
diff --git a/src/libraries/System.Net.Security/src/System.Net.Security.csproj b/src/libraries/System.Net.Security/src/System.Net.Security.csproj
index fa9f8b24212436..61d4b05c9aae6b 100644
--- a/src/libraries/System.Net.Security/src/System.Net.Security.csproj
+++ b/src/libraries/System.Net.Security/src/System.Net.Security.csproj
@@ -23,8 +23,6 @@
ReferenceAssemblyExclusions.txt
-
-
diff --git a/src/libraries/System.Net.Security/src/System/Net/NegotiateAuthenticationPal.ManagedSpnego.cs b/src/libraries/System.Net.Security/src/System/Net/NegotiateAuthenticationPal.ManagedSpnego.cs
index 1ddb004769037f..ad860ec7595f5b 100644
--- a/src/libraries/System.Net.Security/src/System/Net/NegotiateAuthenticationPal.ManagedSpnego.cs
+++ b/src/libraries/System.Net.Security/src/System/Net/NegotiateAuthenticationPal.ManagedSpnego.cs
@@ -222,8 +222,8 @@ private IEnumerable> EnumerateMechanisms()
try
{
- AsnValueReader reader = new AsnValueReader(challenge, AsnEncodingRules.DER);
- AsnValueReader challengeReader = reader.ReadSequence(new Asn1Tag(TagClass.ContextSpecific, (int)NegotiationToken.NegTokenResp));
+ ValueAsnReader reader = new ValueAsnReader(challenge, AsnEncodingRules.DER);
+ ValueAsnReader challengeReader = reader.ReadSequence(new Asn1Tag(TagClass.ContextSpecific, (int)NegotiationToken.NegTokenResp));
reader.ThrowIfNotEmpty();
// NegTokenResp ::= SEQUENCE {
@@ -245,28 +245,28 @@ private IEnumerable> EnumerateMechanisms()
if (challengeReader.HasData && challengeReader.PeekTag().HasSameClassAndValue(new Asn1Tag(TagClass.ContextSpecific, (int)NegTokenResp.NegState)))
{
- AsnValueReader valueReader = challengeReader.ReadSequence(new Asn1Tag(TagClass.ContextSpecific, (int)NegTokenResp.NegState));
+ ValueAsnReader valueReader = challengeReader.ReadSequence(new Asn1Tag(TagClass.ContextSpecific, (int)NegTokenResp.NegState));
state = valueReader.ReadEnumeratedValue();
valueReader.ThrowIfNotEmpty();
}
if (challengeReader.HasData && challengeReader.PeekTag().HasSameClassAndValue(new Asn1Tag(TagClass.ContextSpecific, (int)NegTokenResp.SupportedMech)))
{
- AsnValueReader valueReader = challengeReader.ReadSequence(new Asn1Tag(TagClass.ContextSpecific, (int)NegTokenResp.SupportedMech));
+ ValueAsnReader valueReader = challengeReader.ReadSequence(new Asn1Tag(TagClass.ContextSpecific, (int)NegTokenResp.SupportedMech));
mech = valueReader.ReadObjectIdentifier();
valueReader.ThrowIfNotEmpty();
}
if (challengeReader.HasData && challengeReader.PeekTag().HasSameClassAndValue(new Asn1Tag(TagClass.ContextSpecific, (int)NegTokenResp.ResponseToken)))
{
- AsnValueReader valueReader = challengeReader.ReadSequence(new Asn1Tag(TagClass.ContextSpecific, (int)NegTokenResp.ResponseToken));
+ ValueAsnReader valueReader = challengeReader.ReadSequence(new Asn1Tag(TagClass.ContextSpecific, (int)NegTokenResp.ResponseToken));
blob = valueReader.ReadOctetString();
valueReader.ThrowIfNotEmpty();
}
if (challengeReader.HasData && challengeReader.PeekTag().HasSameClassAndValue(new Asn1Tag(TagClass.ContextSpecific, (int)NegTokenResp.MechListMIC)))
{
- AsnValueReader valueReader = challengeReader.ReadSequence(new Asn1Tag(TagClass.ContextSpecific, (int)NegTokenResp.MechListMIC));
+ ValueAsnReader valueReader = challengeReader.ReadSequence(new Asn1Tag(TagClass.ContextSpecific, (int)NegTokenResp.MechListMIC));
mechListMIC = valueReader.ReadOctetString();
valueReader.ThrowIfNotEmpty();
}
diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/Sve.PlatformNotSupported.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/Sve.PlatformNotSupported.cs
index 819b72cec57008..6e013db76ae088 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/Sve.PlatformNotSupported.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/Sve.PlatformNotSupported.cs
@@ -4938,18 +4938,6 @@ internal Arm64() { }
// Load 32-bit data and zero-extend
- ///
- /// svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); }
-
- ///
- /// svint64_t svld1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); }
-
///
/// svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets)
/// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
@@ -4962,18 +4950,6 @@ internal Arm64() { }
///
public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); }
- ///
- /// svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); }
-
- ///
- /// svuint64_t svld1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); }
-
///
/// svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets)
/// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
@@ -4989,18 +4965,6 @@ internal Arm64() { }
// Load 32-bit data and zero-extend, first-faulting
- ///
- /// svint64_t svldff1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); }
-
- ///
- /// svint64_t svldff1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); }
-
///
/// svint64_t svldff1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets)
/// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
@@ -5013,18 +4977,6 @@ internal Arm64() { }
///
public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); }
- ///
- /// svuint64_t svldff1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); }
-
- ///
- /// svuint64_t svldff1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); }
-
///
/// svuint64_t svldff1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets)
/// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
@@ -5040,25 +4992,6 @@ internal Arm64() { }
// Load 32-bit data and zero-extend
- ///
- /// svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); }
-
- //
- // svint64_t svld1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
- // LD1W Zresult.D, Pg/Z, [Zbases.D, #0]
- //
- // Removed as per #103297
- // public static Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
-
- ///
- /// svint64_t svld1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); }
-
///
/// svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices)
/// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
@@ -5077,25 +5010,6 @@ internal Arm64() { }
///
public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); }
- ///
- /// svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); }
-
- //
- // svuint64_t svld1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
- // LD1W Zresult.D, Pg/Z, [Zbases.D, #0]
- //
- // Removed as per #103297
- // public static Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
-
- ///
- /// svuint64_t svld1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); }
-
///
/// svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices)
/// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
@@ -5117,25 +5031,6 @@ internal Arm64() { }
// Load 32-bit data and zero-extend, first-faulting
- ///
- /// svint64_t svldff1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); }
-
- //
- // svint64_t svldff1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
- // LDFF1W Zresult.D, Pg/Z, [Zbases.D, #0]
- //
- // Removed as per #103297
- // public static Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
-
- ///
- /// svint64_t svldff1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); }
-
///
/// svint64_t svldff1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices)
/// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
@@ -5154,25 +5049,6 @@ internal Arm64() { }
///
public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); }
- ///
- /// svuint64_t svldff1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); }
-
- //
- // svuint64_t svldff1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
- // LDFF1W Zresult.D, Pg/Z, [Zbases.D, #0]
- //
- // Removed as per #103297
- // public static Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
-
- ///
- /// svuint64_t svldff1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); }
-
///
/// svuint64_t svldff1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices)
/// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/Sve.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/Sve.cs
index 69d4375549fd7d..c6cf60093d09a4 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/Sve.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/Sve.cs
@@ -4935,18 +4935,6 @@ internal Arm64() { }
// Load 32-bit data and zero-extend
- ///
- /// svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets);
-
- ///
- /// svint64_t svld1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets);
-
///
/// svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets)
/// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
@@ -4959,18 +4947,6 @@ internal Arm64() { }
///
public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets);
- ///
- /// svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets);
-
- ///
- /// svuint64_t svld1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets);
-
///
/// svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets)
/// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
@@ -4986,18 +4962,6 @@ internal Arm64() { }
// Load 32-bit data and zero-extend, first-faulting
- ///
- /// svint64_t svldff1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets);
-
- ///
- /// svint64_t svldff1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets);
-
///
/// svint64_t svldff1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets)
/// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
@@ -5010,18 +4974,6 @@ internal Arm64() { }
///
public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets);
- ///
- /// svuint64_t svldff1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets);
-
- ///
- /// svuint64_t svldff1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
- ///
- public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets);
-
///
/// svuint64_t svldff1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets)
/// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
@@ -5037,25 +4989,6 @@ internal Arm64() { }
// Load 32-bit data and zero-extend
- ///
- /// svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices);
-
- //
- // svint64_t svld1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
- // LD1W Zresult.D, Pg/Z, [Zbases.D, #0]
- //
- // Removed as per #103297
- // public static Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtend(mask, addresses);
-
- ///
- /// svint64_t svld1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices);
-
///
/// svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices)
/// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
@@ -5074,25 +5007,6 @@ internal Arm64() { }
///
public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices);
- ///
- /// svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices);
-
- //
- // svuint64_t svld1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
- // LD1W Zresult.D, Pg/Z, [Zbases.D, #0]
- //
- // Removed as per #103297
- // public static Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtend(mask, addresses);
-
- ///
- /// svuint64_t svld1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices)
- /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices);
-
///
/// svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices)
/// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
@@ -5114,25 +5028,6 @@ internal Arm64() { }
// Load 32-bit data and zero-extend, first-faulting
- ///
- /// svint64_t svldff1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices);
-
- //
- // svint64_t svldff1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
- // LDFF1W Zresult.D, Pg/Z, [Zbases.D, #0]
- //
- // Removed as per #103297
- // public static Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, addresses);
-
- ///
- /// svint64_t svldff1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices);
-
///
/// svint64_t svldff1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices)
/// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
@@ -5151,25 +5046,6 @@ internal Arm64() { }
///
public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices);
- ///
- /// svuint64_t svldff1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices);
-
- //
- // svuint64_t svldff1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
- // LDFF1W Zresult.D, Pg/Z, [Zbases.D, #0]
- //
- // Removed as per #103297
- // public static Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, addresses);
-
- ///
- /// svuint64_t svldff1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices)
- /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
- ///
- public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices);
-
///
/// svuint64_t svldff1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices)
/// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/Sve2.PlatformNotSupported.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/Sve2.PlatformNotSupported.cs
index add56b177a1390..1eb32eba2be1ca 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/Sve2.PlatformNotSupported.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/Sve2.PlatformNotSupported.cs
@@ -1479,6 +1479,584 @@ internal Arm64() { }
public static Vector FusedAddRoundedHalving(Vector left, Vector right) { throw new PlatformNotSupportedException(); }
+ // Load 8-bit data and zero-extend, non-temporal
+
+ //
+ // svint32_t svldnt1ub_gather[_u32base]_s32(svbool_t pg, svuint32_t bases)
+ // LDNT1B Zresult.S, Pg/Z, [Zbases.S, XZR]
+ //
+ // Removed as per #103297
+ // public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svint32_t svldnt1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets)
+ /// LDNT1B Zresult.S, Pg/Z, [Zoffsets.S, Xbase]
+ ///
+ public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svint64_t svldnt1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
+ /// LDNT1B Zresult.D, Pg/Z, [Zbases.D, XZR]
+ ///
+ public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svint64_t svldnt1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets)
+ /// LDNT1B Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svint64_t svldnt1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets)
+ /// LDNT1B Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); }
+
+ //
+ // svuint32_t svldnt1ub_gather[_u32base]_u32(svbool_t pg, svuint32_t bases)
+ // LDNT1B Zresult.S, Pg/Z, [Zbases.S, XZR]
+ //
+ // Removed as per #103297
+ // public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svuint32_t svldnt1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets)
+ /// LDNT1B Zresult.S, Pg/Z, [Zoffsets.S, Xbase]
+ ///
+ public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svuint64_t svldnt1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
+ /// LDNT1B Zresult.D, Pg/Z, [Zbases.D, XZR]
+ ///
+ public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svuint64_t svldnt1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets)
+ /// LDNT1B Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svuint64_t svldnt1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets)
+ /// LDNT1B Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); }
+
+
+ // Load 16-bit data and sign-extend, non-temporal
+
+ //
+ // svint32_t svldnt1sh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases)
+ // LDNT1SH Zresult.S, Pg/Z, [Zbases.S, XZR]
+ //
+ // Removed as per #103297
+ // public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svint64_t svldnt1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
+ /// LDNT1SH Zresult.D, Pg/Z, [Zbases.D, XZR]
+ ///
+ public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svint64_t svldnt1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices)
+ /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svint64_t svldnt1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices)
+ /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); }
+
+ //
+ // svuint32_t svldnt1sh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases)
+ // LDNT1SH Zresult.S, Pg/Z, [Zbases.S, XZR]
+ //
+ // Removed as per #103297
+ // public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svuint64_t svldnt1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
+ /// LDNT1SH Zresult.D, Pg/Z, [Zbases.D, XZR]
+ ///
+ public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svuint64_t svldnt1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices)
+ /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svuint64_t svldnt1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices)
+ /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); }
+
+
+ // Load 16-bit data and sign-extend, non-temporal
+
+ ///
+ /// svint32_t svldnt1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets)
+ /// LDNT1SH Zresult.S, Pg/Z, [Zoffsets.S, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svint64_t svldnt1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets)
+ /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svint64_t svldnt1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets)
+ /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svuint32_t svldnt1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets)
+ /// LDNT1SH Zresult.S, Pg/Z, [Zoffsets.S, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svuint64_t svldnt1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets)
+ /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svuint64_t svldnt1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets)
+ /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); }
+
+
+ // Load 32-bit data and sign-extend, non-temporal
+
+ ///
+ /// svint64_t svldnt1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
+ /// LDNT1SW Zresult.D, Pg/Z, [Zbases.D, XZR]
+ ///
+ public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svint64_t svldnt1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices)
+ /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svint64_t svldnt1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices)
+ /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svuint64_t svldnt1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
+ /// LDNT1SW Zresult.D, Pg/Z, [Zbases.D, XZR]
+ ///
+ public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svuint64_t svldnt1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices)
+ /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); }
+
+ ///
+ /// svuint64_t svldnt1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices)
+ /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]
+ ///
+ public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector