From 522f5073870455504352bc78e4cb5e71bd115461 Mon Sep 17 00:00:00 2001
From: Adeel Mujahid <3840695+am11@users.noreply.github.com>
Date: Tue, 17 Mar 2026 14:11:32 +0000
Subject: [PATCH] Fix inner build of community archs
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The full build (./build.sh) is working because it goes through MSBuild
(runtime.proj) which has explicit top-level defaults setting
`FEATURE_DYNAMIC_CODE_COMPILED=1` for all architectures. This enables
Virtual Stub Dispatch (VSD) + dynamic code generation, and
riscv64/loongarch64 both have complete VSD implementations in
`runtime/{arch}/StubDispatch.S`.
The inner build (src/coreclr/build-runtime.sh) was failing because it
invoked CMake directly without the top-level context. The CMake feature
logic in clrfeatures.cmake had no architecture-specific defaults, so it
defaulted to `FEATURE_DYNAMIC_CODE_COMPILED=0` for riscv64/loongarch64.
This triggered a configuration requiring both
`StubPrecodeDynamicHelpers.S` and `CachedInterfaceDispatchCoreCLR.S`
assembly stubs—neither of which were implemented for those
architectures—causing undefined symbol linker errors. The CMake fix now
forces `FEATURE_DYNAMIC_CODE_COMPILED=1` for both architectures in
`clrfeatures.cmake``, aligning the inner build with the top-level
defaults.
---
src/coreclr/clr.featuredefines.props | 2 +-
src/coreclr/clrfeatures.cmake | 28 +-
.../Runtime/unix/unixasmmacrosloongarch64.inc | 23 +-
.../pal/inc/unixasmmacrosloongarch64.inc | 8 +
src/coreclr/vm/callstubgenerator.cpp | 52 +-
src/coreclr/vm/callstubgenerator.h | 6 +-
src/coreclr/vm/loongarch64/asmconstants.h | 22 +
src/coreclr/vm/loongarch64/asmhelpers.S | 1770 +++++++++++++++++
src/coreclr/vm/loongarch64/stubs.cpp | 72 +-
src/coreclr/vm/prestub.cpp | 2 +-
src/coreclr/vm/riscv64/asmconstants.h | 2 +
src/coreclr/vm/riscv64/asmhelpers.S | 51 +
src/coreclr/vm/riscv64/stubs.cpp | 49 +-
src/coreclr/vm/riscv64/thunktemplates.S | 2 +
.../Directory.Build.props | 2 +-
.../JIT/interpreter/InterpreterTester.csproj | 2 +-
16 files changed, 2009 insertions(+), 84 deletions(-)
diff --git a/src/coreclr/clr.featuredefines.props b/src/coreclr/clr.featuredefines.props
index 76677d3c6148c2..1bdc341c1ccaa7 100644
--- a/src/coreclr/clr.featuredefines.props
+++ b/src/coreclr/clr.featuredefines.props
@@ -47,7 +47,7 @@
true
-
+
true
diff --git a/src/coreclr/clrfeatures.cmake b/src/coreclr/clrfeatures.cmake
index 828c9be93fdd51..b3fb6958244594 100644
--- a/src/coreclr/clrfeatures.cmake
+++ b/src/coreclr/clrfeatures.cmake
@@ -1,3 +1,12 @@
+# riscv64 and loongarch64 do not have a separate CID-only asm stub layer yet.
+# Forcing FEATURE_DYNAMIC_CODE_COMPILED on keeps the feature matrix consistent with
+# the top-level build and avoids unresolved CID/VSD stub symbols at link time.
+if (NOT DEFINED FEATURE_DYNAMIC_CODE_COMPILED)
+ if (CLR_CMAKE_TARGET_ARCH_RISCV64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64)
+ set(FEATURE_DYNAMIC_CODE_COMPILED 1)
+ endif()
+endif()
+
if (FEATURE_DYNAMIC_CODE_COMPILED)
set(FEATURE_TIERED_COMPILATION 1)
set(FEATURE_REJIT 1)
@@ -48,16 +57,17 @@ if(CLR_CMAKE_TARGET_BROWSER)
endif()
if(NOT DEFINED FEATURE_INTERPRETER)
- if(CLR_CMAKE_TARGET_ANDROID)
- set(FEATURE_INTERPRETER 0)
- else()
- if(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_RISCV64)
- set(FEATURE_INTERPRETER $,1,0>)
- else(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_RISCV64)
- set(FEATURE_INTERPRETER 0)
- endif(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_RISCV64)
+ set(FEATURE_INTERPRETER 0)
+
+ if(NOT CLR_CMAKE_TARGET_ANDROID AND
+ (CLR_CMAKE_TARGET_ARCH_AMD64 OR
+ CLR_CMAKE_TARGET_ARCH_ARM64 OR
+ CLR_CMAKE_TARGET_ARCH_ARM OR
+ CLR_CMAKE_TARGET_ARCH_RISCV64 OR
+ CLR_CMAKE_TARGET_ARCH_LOONGARCH64))
+ set(FEATURE_INTERPRETER $,1,0>)
endif()
-endif(NOT DEFINED FEATURE_INTERPRETER)
+endif()
if(NOT DEFINED FEATURE_STANDALONE_GC)
set(FEATURE_STANDALONE_GC 1)
diff --git a/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosloongarch64.inc b/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosloongarch64.inc
index cf3583aae5ba87..1277824476cb8c 100644
--- a/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosloongarch64.inc
+++ b/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosloongarch64.inc
@@ -143,28 +143,7 @@ C_FUNC(\Name):
.endm
-// Loads the address of a thread-local variable into the target register,
-// which cannot be a0. Preserves all other registers.
-.macro INLINE_GET_TLS_VAR target, var
- .ifc \target, $a0
- .error "target cannot be a0"
- .endif
-
- addi.d $sp, $sp, -16
- st.d $a0, $sp, 0
- st.d $ra, $sp, 8
-
- // This instruction is recognized and potentially patched
- // by the linker (GD->IE/LE relaxation).
- la.tls.desc $a0, \var
-
- ori \target, $tp, 0
- add.d \target, \target, $a0
-
- ld.d $a0, $sp, 0
- ld.d $ra, $sp, 8
- addi.d $sp, $sp, 16
-.endm
+#include
// Inlined version of RhpGetThread. Target cannot be a0.
.macro INLINE_GETTHREAD target
diff --git a/src/coreclr/pal/inc/unixasmmacrosloongarch64.inc b/src/coreclr/pal/inc/unixasmmacrosloongarch64.inc
index 92d701598f933e..f042792f51832a 100644
--- a/src/coreclr/pal/inc/unixasmmacrosloongarch64.inc
+++ b/src/coreclr/pal/inc/unixasmmacrosloongarch64.inc
@@ -523,3 +523,11 @@ $__RedirectionStubEndFuncName
0:
#endif
.endm
+
+#include
+
+// Inlined version of RhpGetThread. Target cannot be a0.
+.macro INLINE_GETTHREAD target
+ INLINE_GET_TLS_VAR \target, C_FUNC(t_CurrentThreadInfo)
+ ld.d \target, \target, OFFSETOF__ThreadLocalInfo__m_pThread
+.endm
diff --git a/src/coreclr/vm/callstubgenerator.cpp b/src/coreclr/vm/callstubgenerator.cpp
index ce30ce8757cc8f..941ca75e0f0236 100644
--- a/src/coreclr/vm/callstubgenerator.cpp
+++ b/src/coreclr/vm/callstubgenerator.cpp
@@ -615,7 +615,7 @@ extern "C" void Store_Stack_4B();
#endif // TARGET_ARM
-#ifdef TARGET_RISCV64
+#if defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
extern "C" void Load_A0();
extern "C" void Load_A0_A1();
@@ -783,7 +783,7 @@ extern "C" void Store_FA6();
extern "C" void Store_FA6_FA7();
extern "C" void Store_FA7();
-#endif // TARGET_RISCV64
+#endif // TARGET_RISCV64 || TARGET_LOONGARCH64
PCODE CallStubGenerator::GetStackRoutine()
{
@@ -879,7 +879,7 @@ PCODE CallStubGenerator::GetGPRegRangeRoutine(int r1, int r2)
(PCODE)0, (PCODE)0, (PCODE)Store_R2, (PCODE)Store_R2_R3,
(PCODE)0, (PCODE)0, (PCODE)0, (PCODE)Store_R3
};
-#elif defined(TARGET_RISCV64)
+#elif defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
static const PCODE GPRegsLoadRoutines[] = {
(PCODE)Load_A0, (PCODE)Load_A0_A1, (PCODE)Load_A0_A1_A2, (PCODE)Load_A0_A1_A2_A3, (PCODE)Load_A0_A1_A2_A3_A4, (PCODE)Load_A0_A1_A2_A3_A4_A5, (PCODE)Load_A0_A1_A2_A3_A4_A5_A6, (PCODE)Load_A0_A1_A2_A3_A4_A5_A6_A7,
(PCODE)0, (PCODE)Load_A1, (PCODE)Load_A1_A2, (PCODE)Load_A1_A2_A3, (PCODE)Load_A1_A2_A3_A4, (PCODE)Load_A1_A2_A3_A4_A5, (PCODE)Load_A1_A2_A3_A4_A5_A6, (PCODE)Load_A1_A2_A3_A4_A5_A6_A7,
@@ -931,7 +931,7 @@ PCODE CallStubGenerator::GetGPRegRefRoutine(int r)
(PCODE)Store_Ref_X0, (PCODE)Store_Ref_X1, (PCODE)Store_Ref_X2, (PCODE)Store_Ref_X3,
(PCODE)Store_Ref_X4, (PCODE)Store_Ref_X5, (PCODE)Store_Ref_X6, (PCODE)Store_Ref_X7
};
-#elif defined(TARGET_RISCV64)
+#elif defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
static const PCODE GPRegsRefLoadRoutines[] = {
(PCODE)Load_Ref_A0, (PCODE)Load_Ref_A1, (PCODE)Load_Ref_A2, (PCODE)Load_Ref_A3,
(PCODE)Load_Ref_A4, (PCODE)Load_Ref_A5, (PCODE)Load_Ref_A6, (PCODE)Load_Ref_A7
@@ -1017,7 +1017,7 @@ PCODE CallStubGenerator::GetFPRegRangeRoutine(int x1, int x2)
(PCODE)0, (PCODE)0, (PCODE)0, (PCODE)0, (PCODE)0, (PCODE)0, (PCODE)Store_D6, (PCODE)Store_D6_D7,
(PCODE)0, (PCODE)0, (PCODE)0, (PCODE)0, (PCODE)0, (PCODE)0, (PCODE)0, (PCODE)Store_D7
};
-#elif defined(TARGET_RISCV64)
+#elif defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
static const PCODE FPRegsLoadRoutines[] = {
(PCODE)Load_FA0, (PCODE)Load_FA0_FA1, (PCODE)Load_FA0_FA1_FA2, (PCODE)Load_FA0_FA1_FA2_FA3, (PCODE)Load_FA0_FA1_FA2_FA3_FA4, (PCODE)Load_FA0_FA1_FA2_FA3_FA4_FA5, (PCODE)Load_FA0_FA1_FA2_FA3_FA4_FA5_FA6, (PCODE)Load_FA0_FA1_FA2_FA3_FA4_FA5_FA6_FA7,
(PCODE)0, (PCODE)Load_FA1, (PCODE)Load_FA1_FA2, (PCODE)Load_FA1_FA2_FA3, (PCODE)Load_FA1_FA2_FA3_FA4, (PCODE)Load_FA1_FA2_FA3_FA4_FA5, (PCODE)Load_FA1_FA2_FA3_FA4_FA5_FA6, (PCODE)Load_FA1_FA2_FA3_FA4_FA5_FA6_FA7,
@@ -1315,16 +1315,20 @@ extern "C" void InterpreterStubRet3Vector128();
extern "C" void InterpreterStubRet4Vector128();
#endif // TARGET_ARM64
-#if defined(TARGET_RISCV64)
+#if defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
+extern "C" void CallJittedMethodRetFloat(PCODE *routines, int8_t*pArgs, int8_t*pRet, int totalStackSize, PTR_PTR_Object pContinuation);
+extern "C" void CallJittedMethodRet2Float(PCODE *routines, int8_t*pArgs, int8_t*pRet, int totalStackSize, PTR_PTR_Object pContinuation);
extern "C" void CallJittedMethodRet2I8(PCODE *routines, int8_t*pArgs, int8_t*pRet, int totalStackSize, PTR_PTR_Object pContinuation);
extern "C" void CallJittedMethodRet2Double(PCODE *routines, int8_t*pArgs, int8_t*pRet, int totalStackSize, PTR_PTR_Object pContinuation);
extern "C" void CallJittedMethodRetFloatInt(PCODE *routines, int8_t*pArgs, int8_t*pRet, int totalStackSize, PTR_PTR_Object pContinuation);
extern "C" void CallJittedMethodRetIntFloat(PCODE *routines, int8_t*pArgs, int8_t*pRet, int totalStackSize, PTR_PTR_Object pContinuation);
+extern "C" void InterpreterStubRetFloat();
+extern "C" void InterpreterStubRet2Float();
extern "C" void InterpreterStubRet2I8();
extern "C" void InterpreterStubRet2Double();
extern "C" void InterpreterStubRetFloatInt();
extern "C" void InterpreterStubRetIntFloat();
-#endif // TARGET_RISCV64
+#endif // TARGET_RISCV64 || TARGET_LOONGARCH64
#define INVOKE_FUNCTION_PTR(functionPtrName) LOG2((LF2_INTERPRETER, LL_INFO10000, #functionPtrName "\n")); return functionPtrName
@@ -1432,7 +1436,11 @@ CallStubHeader::InvokeFunctionPtr CallStubGenerator::GetInvokeFunctionPtr(CallSt
INVOKE_FUNCTION_PTR(CallJittedMethodRetSwiftLowered);
#endif // TARGET_APPLE
#endif // TARGET_ARM64
-#if defined(TARGET_RISCV64)
+#if defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
+ case ReturnTypeFloat:
+ INVOKE_FUNCTION_PTR(CallJittedMethodRetFloat);
+ case ReturnType2Float:
+ INVOKE_FUNCTION_PTR(CallJittedMethodRet2Float);
case ReturnType2I8:
INVOKE_FUNCTION_PTR(CallJittedMethodRet2I8);
case ReturnType2Double:
@@ -1441,7 +1449,7 @@ CallStubHeader::InvokeFunctionPtr CallStubGenerator::GetInvokeFunctionPtr(CallSt
INVOKE_FUNCTION_PTR(CallJittedMethodRetFloatInt);
case ReturnTypeIntFloat:
INVOKE_FUNCTION_PTR(CallJittedMethodRetIntFloat);
-#endif // TARGET_RISCV64
+#endif // TARGET_RISCV64 || TARGET_LOONGARCH64
default:
_ASSERTE(!"Unexpected return type for interpreter stub");
return NULL; // This should never happen, but just in case.
@@ -1547,7 +1555,11 @@ PCODE CallStubGenerator::GetInterpreterReturnTypeHandler(CallStubGenerator::Retu
case ReturnType4Vector128:
RETURN_TYPE_HANDLER(InterpreterStubRet4Vector128);
#endif // TARGET_ARM64
-#if defined(TARGET_RISCV64)
+#if defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
+ case ReturnTypeFloat:
+ RETURN_TYPE_HANDLER(InterpreterStubRetFloat);
+ case ReturnType2Float:
+ RETURN_TYPE_HANDLER(InterpreterStubRet2Float);
case ReturnType2I8:
RETURN_TYPE_HANDLER(InterpreterStubRet2I8);
case ReturnType2Double:
@@ -1556,7 +1568,7 @@ PCODE CallStubGenerator::GetInterpreterReturnTypeHandler(CallStubGenerator::Retu
RETURN_TYPE_HANDLER(InterpreterStubRetFloatInt);
case ReturnTypeIntFloat:
RETURN_TYPE_HANDLER(InterpreterStubRetIntFloat);
-#endif // TARGET_RISCV64
+#endif // TARGET_RISCV64 || TARGET_LOONGARCH64
default:
_ASSERTE(!"Unexpected return type for interpreter stub");
return 0; // This should never happen, but just in case.
@@ -2665,9 +2677,9 @@ CallStubGenerator::ReturnType CallStubGenerator::GetReturnType(ArgIteratorType *
return ReturnTypeI8;
break;
case ELEMENT_TYPE_R4:
-#if defined(TARGET_ARM64) || defined(TARGET_32BIT)
+#if defined(TARGET_ARM64) || defined(TARGET_32BIT) || defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
return ReturnTypeFloat;
-#endif // TARGET_ARM64 || TARGET_32BIT
+#endif // TARGET_ARM64 || TARGET_32BIT || TARGET_RISCV64 || TARGET_LOONGARCH64
case ELEMENT_TYPE_R8:
return ReturnTypeDouble;
break;
@@ -2821,19 +2833,21 @@ CallStubGenerator::ReturnType CallStubGenerator::GetReturnType(ArgIteratorType *
_ASSERTE(!"The return types should be <= 8 bytes in size");
break;
}
-#elif defined(TARGET_RISCV64)
+#elif defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
{
FpStructInRegistersInfo info = pArgIt->GetReturnFpStructInRegistersInfo();
- // RISC-V pass floating-point struct fields in FA registers
+ // RISC-V and LoongArch64 pass floating-point struct fields in FA/F registers.
+ // Preserve 32-bit float width where applicable to avoid reading/writing
+ // packed 4-byte fields via 8-byte helpers.
if ((info.flags & FpStruct::OnlyOne) != 0)
{
- // Single field - could be float or int in single register
- return ReturnTypeDouble; // Use Double routine for both float and double (NaN-boxed)
+ return (info.Size1st() == sizeof(float)) ? ReturnTypeFloat : ReturnTypeDouble;
}
else if ((info.flags & FpStruct::BothFloat) != 0)
{
- // Two float/double fields
- return ReturnType2Double;
+ return (info.Size1st() == sizeof(float) && info.Size2nd() == sizeof(float))
+ ? ReturnType2Float
+ : ReturnType2Double;
}
else if ((info.flags & FpStruct::FloatInt) != 0)
{
diff --git a/src/coreclr/vm/callstubgenerator.h b/src/coreclr/vm/callstubgenerator.h
index c2996de338fba3..193849d18c3a0c 100644
--- a/src/coreclr/vm/callstubgenerator.h
+++ b/src/coreclr/vm/callstubgenerator.h
@@ -124,12 +124,14 @@ class CallStubGenerator
ReturnTypeSwiftLowered,
#endif // TARGET_APPLE
#endif // TARGET_ARM64
-#if defined(TARGET_RISCV64)
+#if defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
+ ReturnTypeFloat,
+ ReturnType2Float,
ReturnType2I8,
ReturnType2Double,
ReturnTypeFloatInt,
ReturnTypeIntFloat,
-#endif // TARGET_RISCV64
+#endif // TARGET_RISCV64 || TARGET_LOONGARCH64
};
enum class RoutineType
diff --git a/src/coreclr/vm/loongarch64/asmconstants.h b/src/coreclr/vm/loongarch64/asmconstants.h
index 77404df969160f..8a8050b8931a21 100644
--- a/src/coreclr/vm/loongarch64/asmconstants.h
+++ b/src/coreclr/vm/loongarch64/asmconstants.h
@@ -52,6 +52,9 @@ ASMCONSTANTS_C_ASSERT(Thread__m_pFrame == offsetof(Thread, m_pFrame));
#define OFFSETOF__RuntimeThreadLocals__ee_alloc_context 0
ASMCONSTANTS_C_ASSERT(OFFSETOF__RuntimeThreadLocals__ee_alloc_context == offsetof(RuntimeThreadLocals, alloc_context));
+#define OFFSETOF__ThreadLocalInfo__m_pThread 0
+ASMCONSTANTS_C_ASSERT(OFFSETOF__ThreadLocalInfo__m_pThread == offsetof(ThreadLocalInfo, m_pThread));
+
#define OFFSETOF__ee_alloc_context__alloc_ptr 0x8
ASMCONSTANTS_C_ASSERT(OFFSETOF__ee_alloc_context__alloc_ptr == offsetof(ee_alloc_context, m_GCAllocContext) +
offsetof(gc_alloc_context, alloc_ptr));
@@ -168,10 +171,12 @@ ASMCONSTANTS_C_ASSERT(SIZEOF__FixupPrecode == sizeof(FixupPrecode));
ASMCONSTANTS_C_ASSERT(MethodDesc_ALIGNMENT_SHIFT == MethodDesc::ALIGNMENT_SHIFT);
ASMCONSTANTS_C_ASSERT((1<ControlPC, pRD->SP));
}
+#ifdef FEATURE_INTERPRETER
+#ifndef DACCESS_COMPILE
+void InterpreterFrame::UpdateFloatingPointRegisters_Impl(const PREGDISPLAY pRD, TADDR)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // The interpreter frame saves the floating point callee-saved registers (f24-f31)
+ // before FloatArgumentRegisters and TransitionBlock:
+ // [f24-f31 (64 bytes)] [fa0-fa7 (64 bytes)] [TransitionBlock]
+ // So f24-f31 are located at TransitionBlock - 128.
+ TADDR pTransitionBlock = GetTransitionBlock();
+ UINT64 *pCalleeSavedFloats = (UINT64*)((BYTE*)pTransitionBlock - 128);
+
+ // LoongArch CONTEXT::F has 4 slots per register for LASX support.
+ // Each scalar double value is stored in the first slot.
+ for (int i = 0; i < 8; i++)
+ {
+ memcpy(&pRD->pCurrentContext->F[(24 + i) * 4], &pCalleeSavedFloats[i], sizeof(double));
+ }
+}
+#endif // DACCESS_COMPILE
+#endif // FEATURE_INTERPRETER
+
void FaultingExceptionFrame::UpdateRegDisplay_Impl(const PREGDISPLAY pRD, bool updateFloats)
{
LIMITED_METHOD_DAC_CONTRACT;
@@ -608,26 +631,44 @@ AdjustContextForVirtualStub(
PCODE f_IP = GetIP(pContext);
- StubCodeBlockKind sk = RangeSectionStubManager::GetStubKind(f_IP);
-
- if (sk == STUB_CODE_BLOCK_VSD_DISPATCH_STUB)
+ bool isVirtualStubNullCheck = false;
+#ifdef FEATURE_CACHED_INTERFACE_DISPATCH
+ if (VirtualCallStubManager::isCachedInterfaceDispatchStubAVLocation(f_IP))
{
- if (*PTR_DWORD(f_IP - 4) != DISPATCH_STUB_FIRST_DWORD)
- {
- _ASSERTE(!"AV in DispatchStub at unknown instruction");
- return FALSE;
- }
+ isVirtualStubNullCheck = true;
}
- else
- if (sk == STUB_CODE_BLOCK_VSD_RESOLVE_STUB)
+#endif // FEATURE_CACHED_INTERFACE_DISPATCH
+#ifdef FEATURE_VIRTUAL_STUB_DISPATCH
+ if (!isVirtualStubNullCheck)
{
- if (*PTR_DWORD(f_IP) != RESOLVE_STUB_FIRST_DWORD)
+ StubCodeBlockKind sk = RangeSectionStubManager::GetStubKind(f_IP);
+
+ if (sk == STUB_CODE_BLOCK_VSD_DISPATCH_STUB)
{
- _ASSERTE(!"AV in ResolveStub at unknown instruction");
- return FALSE;
+ if (*PTR_DWORD(f_IP - 4) != DISPATCH_STUB_FIRST_DWORD)
+ {
+ _ASSERTE(!"AV in DispatchStub at unknown instruction");
+ }
+ else
+ {
+ isVirtualStubNullCheck = true;
+ }
+ }
+ else if (sk == STUB_CODE_BLOCK_VSD_RESOLVE_STUB)
+ {
+ if (*PTR_DWORD(f_IP) != RESOLVE_STUB_FIRST_DWORD)
+ {
+ _ASSERTE(!"AV in ResolveStub at unknown instruction");
+ }
+ else
+ {
+ isVirtualStubNullCheck = true;
+ }
}
}
- else
+#endif // FEATURE_VIRTUAL_STUB_DISPATCH
+
+ if (!isVirtualStubNullCheck)
{
return FALSE;
}
@@ -949,6 +990,8 @@ void StubLinkerCPU::EmitCallManagedMethod(MethodDesc *pMD, BOOL fTailCall)
// Allocation of dynamic helpers
//
+#ifndef FEATURE_STUBPRECODE_DYNAMIC_HELPERS
+
#define DYNAMIC_HELPER_ALIGNMENT sizeof(TADDR)
#define BEGIN_DYNAMIC_HELPER_EMIT_WORKER(size) \
@@ -1433,6 +1476,7 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
END_DYNAMIC_HELPER_EMIT();
}
}
+#endif // FEATURE_STUBPRECODE_DYNAMIC_HELPERS
#endif // FEATURE_READYTORUN
#endif // #ifndef DACCESS_COMPILE
diff --git a/src/coreclr/vm/prestub.cpp b/src/coreclr/vm/prestub.cpp
index 3bc20bab6854d8..a9758414d1c684 100644
--- a/src/coreclr/vm/prestub.cpp
+++ b/src/coreclr/vm/prestub.cpp
@@ -2037,7 +2037,7 @@ extern "C" void* STDCALL ExecuteInterpretedMethod(TransitionBlock* pTransitionBl
pArgumentRegisters->x[2] = (INT64)*frames.interpreterFrame.GetContinuationPtr();
#elif defined(TARGET_ARM)
pArgumentRegisters->r[2] = (INT64)*frames.interpreterFrame.GetContinuationPtr();
- #elif defined(TARGET_RISCV64)
+ #elif defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
pArgumentRegisters->a[2] = (INT64)*frames.interpreterFrame.GetContinuationPtr();
#elif defined(TARGET_WASM)
// We do not yet have an ABI for WebAssembly native code to handle here.
diff --git a/src/coreclr/vm/riscv64/asmconstants.h b/src/coreclr/vm/riscv64/asmconstants.h
index 7a892c5a918e7a..3327a70d71985b 100644
--- a/src/coreclr/vm/riscv64/asmconstants.h
+++ b/src/coreclr/vm/riscv64/asmconstants.h
@@ -169,10 +169,12 @@ ASMCONSTANTS_C_ASSERT(FaultingExceptionFrame__m_fFilterExecuted == offsetof(Faul
ASMCONSTANTS_C_ASSERT(SIZEOF__FixupPrecode == sizeof(FixupPrecode));
ASMCONSTANTS_C_ASSERT(MethodDesc_ALIGNMENT_SHIFT == MethodDesc::ALIGNMENT_SHIFT);
+#ifdef FEATURE_VIRTUAL_STUB_DISPATCH
#define ResolveCacheElem__target 0x10
#define ResolveCacheElem__pNext 0x18
ASMCONSTANTS_C_ASSERT(ResolveCacheElem__target == offsetof(ResolveCacheElem, target));
ASMCONSTANTS_C_ASSERT(ResolveCacheElem__pNext == offsetof(ResolveCacheElem, pNext));
+#endif // FEATURE_VIRTUAL_STUB_DISPATCH
#define OFFSETOF__DynamicStaticsInfo__m_pMethodTable 0x10
ASMCONSTANTS_C_ASSERT(OFFSETOF__DynamicStaticsInfo__m_pMethodTable
diff --git a/src/coreclr/vm/riscv64/asmhelpers.S b/src/coreclr/vm/riscv64/asmhelpers.S
index 0d14aab155fc4f..a2c4496288fe57 100644
--- a/src/coreclr/vm/riscv64/asmhelpers.S
+++ b/src/coreclr/vm/riscv64/asmhelpers.S
@@ -409,6 +409,7 @@ NESTED_END TheUMEntryPrestub, _TEXT
GenerateRedirectedStubWithFrame RedirectForThreadAbort, ThrowControlForThread
+#ifdef FEATURE_VIRTUAL_STUB_DISPATCH
// ------------------------------------------------------------------
// ResolveWorkerChainLookupAsmStub
//
@@ -496,6 +497,7 @@ NESTED_ENTRY ResolveWorkerAsmStub, _TEXT, NoHandler
EPILOG_BRANCH_REG t4
NESTED_END ResolveWorkerAsmStub, _TEXT
+#endif // FEATURE_VIRTUAL_STUB_DISPATCH
#ifdef FEATURE_HIJACK
// ------------------------------------------------------------------
@@ -1301,6 +1303,30 @@ NESTED_ENTRY CallJittedMethodRetDouble, _TEXT, NoHandler
EPILOG_RETURN
NESTED_END CallJittedMethodRetDouble, _TEXT
+// a0 - routines array
+// a1 - interpreter stack args location
+// a2 - interpreter stack return value location
+// a3 - stack arguments size (properly aligned)
+// a4 - address of continuation return value
+NESTED_ENTRY CallJittedMethodRetFloat, _TEXT, NoHandler
+ PROLOG_SAVE_REG_PAIR_INDEXED fp, ra, -32
+ sd a2, 16(fp)
+ sd a4, 24(fp)
+ sub sp, sp, a3
+ mv t2, a0
+ mv t3, a1
+ ld t4, 0(t2)
+ addi t2, t2, 8
+ jalr t4
+ ld a4, 24(fp)
+ sd a2, 0(a4)
+ ld a2, 16(fp)
+ fsw fa0, 0(a2)
+ EPILOG_STACK_RESTORE
+ EPILOG_RESTORE_REG_PAIR_INDEXED fp, ra, 32
+ EPILOG_RETURN
+NESTED_END CallJittedMethodRetFloat, _TEXT
+
// a0 - routines array
// a1 - interpreter stack args location
// a2 - interpreter stack return value location
@@ -1351,6 +1377,31 @@ NESTED_ENTRY CallJittedMethodRet2Double, _TEXT, NoHandler
EPILOG_RETURN
NESTED_END CallJittedMethodRet2Double, _TEXT
+// a0 - routines array
+// a1 - interpreter stack args location
+// a2 - interpreter stack return value location
+// a3 - stack arguments size (properly aligned)
+// a4 - address of continuation return value
+NESTED_ENTRY CallJittedMethodRet2Float, _TEXT, NoHandler
+ PROLOG_SAVE_REG_PAIR_INDEXED fp, ra, -32
+ sd a2, 16(fp)
+ sd a4, 24(fp)
+ sub sp, sp, a3
+ mv t2, a0
+ mv t3, a1
+ ld t4, 0(t2)
+ addi t2, t2, 8
+ jalr t4
+ ld a4, 24(fp)
+ sd a2, 0(a4)
+ ld a2, 16(fp)
+ fsw fa0, 0(a2)
+ fsw fa1, 4(a2)
+ EPILOG_STACK_RESTORE
+ EPILOG_RESTORE_REG_PAIR_INDEXED fp, ra, 32
+ EPILOG_RETURN
+NESTED_END CallJittedMethodRet2Float, _TEXT
+
// a0 - routines array
// a1 - interpreter stack args location
// a2 - interpreter stack return value location
diff --git a/src/coreclr/vm/riscv64/stubs.cpp b/src/coreclr/vm/riscv64/stubs.cpp
index b05d86c0f15afe..7686a6523274cb 100644
--- a/src/coreclr/vm/riscv64/stubs.cpp
+++ b/src/coreclr/vm/riscv64/stubs.cpp
@@ -579,26 +579,44 @@ AdjustContextForVirtualStub(
PCODE f_IP = GetIP(pContext);
- StubCodeBlockKind sk = RangeSectionStubManager::GetStubKind(f_IP);
-
- if (sk == STUB_CODE_BLOCK_VSD_DISPATCH_STUB)
+ bool isVirtualStubNullCheck = false;
+#ifdef FEATURE_CACHED_INTERFACE_DISPATCH
+ if (VirtualCallStubManager::isCachedInterfaceDispatchStubAVLocation(f_IP))
{
- if (*PTR_DWORD(f_IP - 4) != DISPATCH_STUB_FIRST_DWORD)
- {
- _ASSERTE(!"AV in DispatchStub at unknown instruction");
- return FALSE;
- }
+ isVirtualStubNullCheck = true;
}
- else
- if (sk == STUB_CODE_BLOCK_VSD_RESOLVE_STUB)
+#endif // FEATURE_CACHED_INTERFACE_DISPATCH
+#ifdef FEATURE_VIRTUAL_STUB_DISPATCH
+ if (!isVirtualStubNullCheck)
{
- if (*PTR_DWORD(f_IP) != RESOLVE_STUB_FIRST_DWORD)
+ StubCodeBlockKind sk = RangeSectionStubManager::GetStubKind(f_IP);
+
+ if (sk == STUB_CODE_BLOCK_VSD_DISPATCH_STUB)
+ {
+ if (*PTR_DWORD(f_IP - 4) != DISPATCH_STUB_FIRST_DWORD)
+ {
+ _ASSERTE(!"AV in DispatchStub at unknown instruction");
+ }
+ else
+ {
+ isVirtualStubNullCheck = true;
+ }
+ }
+ else if (sk == STUB_CODE_BLOCK_VSD_RESOLVE_STUB)
{
- _ASSERTE(!"AV in ResolveStub at unknown instruction");
- return FALSE;
+ if (*PTR_DWORD(f_IP) != RESOLVE_STUB_FIRST_DWORD)
+ {
+ _ASSERTE(!"AV in ResolveStub at unknown instruction");
+ }
+ else
+ {
+ isVirtualStubNullCheck = true;
+ }
}
}
- else
+#endif // FEATURE_VIRTUAL_STUB_DISPATCH
+
+ if (!isVirtualStubNullCheck)
{
return FALSE;
}
@@ -1007,6 +1025,8 @@ void StubLinkerCPU::EmitCallManagedMethod(MethodDesc *pMD, BOOL fTailCall)
//
// Allocation of dynamic helpers
//
+#ifndef FEATURE_STUBPRECODE_DYNAMIC_HELPERS
+
#define DYNAMIC_HELPER_ALIGNMENT sizeof(TADDR)
#define BEGIN_DYNAMIC_HELPER_EMIT_WORKER(size) \
@@ -1494,6 +1514,7 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
END_DYNAMIC_HELPER_EMIT();
}
}
+#endif // FEATURE_STUBPRECODE_DYNAMIC_HELPERS
#endif // FEATURE_READYTORUN
diff --git a/src/coreclr/vm/riscv64/thunktemplates.S b/src/coreclr/vm/riscv64/thunktemplates.S
index e2990d44c57eee..ce4e1d4b912025 100644
--- a/src/coreclr/vm/riscv64/thunktemplates.S
+++ b/src/coreclr/vm/riscv64/thunktemplates.S
@@ -23,6 +23,7 @@ LEAF_ENTRY FixupPrecodeCode
jr t1
LEAF_END_MARKED FixupPrecodeCode
+#ifdef FEATURE_TIERED_COMPILATION
LEAF_ENTRY CallCountingStubCode
auipc t2, 0x4
ld t3, (CallCountingStubData__RemainingCallCountCell)(t2)
@@ -36,3 +37,4 @@ LOCAL_LABEL(CountReachedZero):
ld t1, (CallCountingStubData__TargetForThresholdReached)(t2)
jr t1
LEAF_END_MARKED CallCountingStubCode
+#endif // FEATURE_TIERED_COMPILATION
diff --git a/src/installer/pkg/sfx/Microsoft.NETCore.App/Directory.Build.props b/src/installer/pkg/sfx/Microsoft.NETCore.App/Directory.Build.props
index 7c752b511514ae..52fcfcf860bbf9 100644
--- a/src/installer/pkg/sfx/Microsoft.NETCore.App/Directory.Build.props
+++ b/src/installer/pkg/sfx/Microsoft.NETCore.App/Directory.Build.props
@@ -122,7 +122,7 @@
-
+
diff --git a/src/tests/JIT/interpreter/InterpreterTester.csproj b/src/tests/JIT/interpreter/InterpreterTester.csproj
index 45a180eef5fe85..5e0641be74dc2b 100644
--- a/src/tests/JIT/interpreter/InterpreterTester.csproj
+++ b/src/tests/JIT/interpreter/InterpreterTester.csproj
@@ -2,7 +2,7 @@
true
true
- true
+ true
true