From bd142fea6a86e2060f071dde21b125fe74273ecb Mon Sep 17 00:00:00 2001 From: Jan Vorlicek Date: Thu, 12 Feb 2026 02:35:20 +0100 Subject: [PATCH] Redo fix lookup for current Thread in async signal handler (#122513) This is a new version of the fix. The code in HijackCallback was using the fact whether the pThreadToHijack is NULL or not as an indicator of whether it should suspend inline or redirect the thread. So passing in the pThreadToHijack without other changes caused it to never suspend inline on Unix and it was causing hangs in System.Collections.Concurrent tests. The current CheckActivationSafePoint uses thread local storage to get the current Thread instance. But this function is called from async signal handler (the activation signal handler) and it is not allowed to access TLS variables there because the access can allocate and if the interrupted code was running in an allocation code, it could crash. There was no problem with this since .NET 1.0, but a change in the recent glibc version has broken this. We've got reports of crashes in this code due to the reason mentioned above. This change introduces an async safe mechanism for accessing the current Thread instance from async signal handlers. It uses a segmented array that can grow, but never shrink. Entries for threads are added when runtime creates a thread / attaches to an external thread and removed when the thread dies. The check for safety of the activation injection was further enhanced to make sure that the ScanReaderLock is not taken. In cases it would need to be taken, we just reject the location. Since NativeAOT is subject to the same issue, the code to maintain the thread id to thread instance map is placed to the minipal and shared between coreclr and NativeAOT. Closes https://github.com/dotnet/runtime/issues/121581 --------- Co-authored-by: Jan Kotas Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/coreclr/nativeaot/Runtime/CMakeLists.txt | 6 + src/coreclr/nativeaot/Runtime/thread.cpp | 5 +- src/coreclr/nativeaot/Runtime/thread.h | 2 +- src/coreclr/nativeaot/Runtime/threadstore.cpp | 20 +++ src/coreclr/nativeaot/Runtime/threadstore.h | 3 + .../nativeaot/Runtime/unix/PalUnix.cpp | 30 ++--- .../nativeaot/Runtime/windows/PalMinWin.cpp | 6 +- src/coreclr/pal/src/exception/signal.cpp | 24 ++-- src/coreclr/runtime/asyncsafethreadmap.cpp | 126 ++++++++++++++++++ src/coreclr/runtime/asyncsafethreadmap.h | 27 ++++ src/coreclr/vm/CMakeLists.txt | 12 ++ src/coreclr/vm/codeman.cpp | 25 +++- src/coreclr/vm/codeman.h | 8 +- src/coreclr/vm/threads.cpp | 34 ++++- src/coreclr/vm/threads.h | 2 + src/coreclr/vm/threadsuspend.cpp | 7 +- src/native/minipal/thread.h | 66 ++++++--- 17 files changed, 340 insertions(+), 63 deletions(-) create mode 100644 src/coreclr/runtime/asyncsafethreadmap.cpp create mode 100644 src/coreclr/runtime/asyncsafethreadmap.h diff --git a/src/coreclr/nativeaot/Runtime/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/CMakeLists.txt index 61ed486a7989c8..5abe50f9f02862 100644 --- a/src/coreclr/nativeaot/Runtime/CMakeLists.txt +++ b/src/coreclr/nativeaot/Runtime/CMakeLists.txt @@ -56,6 +56,12 @@ set(COMMON_RUNTIME_SOURCES ${CLR_SRC_NATIVE_DIR}/minipal/xoshiro128pp.c ) +if (CLR_CMAKE_TARGET_UNIX AND NOT CLR_CMAKE_TARGET_ARCH_WASM) + list(APPEND COMMON_RUNTIME_SOURCES + ${RUNTIME_DIR}/asyncsafethreadmap.cpp + ) +endif() + set(SERVER_GC_SOURCES ${GC_DIR}/gceesvr.cpp ${GC_DIR}/gcsvr.cpp diff --git a/src/coreclr/nativeaot/Runtime/thread.cpp b/src/coreclr/nativeaot/Runtime/thread.cpp index f0e369224a20ad..2c596855a21b5e 100644 --- a/src/coreclr/nativeaot/Runtime/thread.cpp +++ b/src/coreclr/nativeaot/Runtime/thread.cpp @@ -625,7 +625,7 @@ void Thread::Hijack() PalHijack(this); } -void Thread::HijackCallback(NATIVE_CONTEXT* pThreadContext, Thread* pThreadToHijack) +void Thread::HijackCallback(NATIVE_CONTEXT* pThreadContext, Thread* pThreadToHijack, bool doInlineSuspend) { // If we are no longer trying to suspend, no need to do anything. // This is just an optimization. It is ok to race with the setting the trap flag here. @@ -694,9 +694,8 @@ void Thread::HijackCallback(NATIVE_CONTEXT* pThreadContext, Thread* pThreadToHij ASSERT(codeManager->IsUnwindable(pvAddress) || runtime->IsConservativeStackReportingEnabled()); #endif - // if we are not given a thread to hijack // perform in-line wait on the current thread - if (pThreadToHijack == NULL) + if (doInlineSuspend) { ASSERT(pThread->m_interruptedContext == NULL); pThread->InlineSuspend(pThreadContext); diff --git a/src/coreclr/nativeaot/Runtime/thread.h b/src/coreclr/nativeaot/Runtime/thread.h index 83249cfc6bc77e..356c01eba8e125 100644 --- a/src/coreclr/nativeaot/Runtime/thread.h +++ b/src/coreclr/nativeaot/Runtime/thread.h @@ -280,7 +280,7 @@ class Thread : private RuntimeThreadLocals void* GetHijackedReturnAddress(); static bool IsHijackTarget(void * address); - static void HijackCallback(NATIVE_CONTEXT* pThreadContext, Thread* pThreadToHijack); + static void HijackCallback(NATIVE_CONTEXT* pThreadContext, Thread* pThreadToHijack, bool doInlineSuspend); #else // FEATURE_HIJACK void Unhijack() { } bool IsHijacked() { return false; } diff --git a/src/coreclr/nativeaot/Runtime/threadstore.cpp b/src/coreclr/nativeaot/Runtime/threadstore.cpp index daf745ba0b65ef..b7f764f4c2eaa4 100644 --- a/src/coreclr/nativeaot/Runtime/threadstore.cpp +++ b/src/coreclr/nativeaot/Runtime/threadstore.cpp @@ -22,6 +22,8 @@ #include "TargetPtrs.h" #include "yieldprocessornormalized.h" #include +#include +#include "asyncsafethreadmap.h" #include "slist.inl" @@ -143,6 +145,14 @@ void ThreadStore::AttachCurrentThread(bool fAcquireThreadStoreLock) pAttachingThread->m_ThreadStateFlags = Thread::TSF_Attached; pTS->m_ThreadList.PushHead(pAttachingThread); + +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) + if (!InsertThreadIntoAsyncSafeMap(pAttachingThread->m_threadId, pAttachingThread)) + { + PalPrintFatalError("\nFailed to insert thread into async-safe map due to out of memory.\n"); + RhFailFast(); + } +#endif // TARGET_UNIX && !TARGET_WASM } // static @@ -188,6 +198,9 @@ void ThreadStore::DetachCurrentThread() pTS->m_ThreadList.RemoveFirst(pDetachingThread); // tidy up GC related stuff (release allocation context, etc..) pDetachingThread->Detach(); +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) + RemoveThreadFromAsyncSafeMap(pDetachingThread->m_threadId, pDetachingThread); +#endif } // post-mortem clean up. @@ -426,6 +439,13 @@ EXTERN_C RuntimeThreadLocals* RhpGetThread() return &tls_CurrentThread; } +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) +Thread * ThreadStore::GetCurrentThreadIfAvailableAsyncSafe() +{ + return (Thread*)FindThreadInAsyncSafeMap(minipal_get_current_thread_id_no_cache()); +} +#endif // TARGET_UNIX && !TARGET_WASM + #endif // !DACCESS_COMPILE #ifdef _WIN32 diff --git a/src/coreclr/nativeaot/Runtime/threadstore.h b/src/coreclr/nativeaot/Runtime/threadstore.h index c9208cecf3b8d7..4e991d3f061464 100644 --- a/src/coreclr/nativeaot/Runtime/threadstore.h +++ b/src/coreclr/nativeaot/Runtime/threadstore.h @@ -48,6 +48,9 @@ class ThreadStore static Thread * RawGetCurrentThread(); static Thread * GetCurrentThread(); static Thread * GetCurrentThreadIfAvailable(); +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) + static Thread * GetCurrentThreadIfAvailableAsyncSafe(); +#endif static PTR_Thread GetSuspendingThread(); static void AttachCurrentThread(); static void AttachCurrentThread(bool fAcquireThreadStoreLock); diff --git a/src/coreclr/nativeaot/Runtime/unix/PalUnix.cpp b/src/coreclr/nativeaot/Runtime/unix/PalUnix.cpp index 76b18bb0ad8e2c..14efb5a984e641 100644 --- a/src/coreclr/nativeaot/Runtime/unix/PalUnix.cpp +++ b/src/coreclr/nativeaot/Runtime/unix/PalUnix.cpp @@ -1016,24 +1016,24 @@ static struct sigaction g_previousActivationHandler; static void ActivationHandler(int code, siginfo_t* siginfo, void* context) { - // Only accept activations from the current process - if (siginfo->si_pid == getpid() + Thread* pThread = ThreadStore::GetCurrentThreadIfAvailableAsyncSafe(); + if (pThread) + { + // Only accept activations from the current process + if (siginfo->si_pid == getpid() #ifdef HOST_APPLE - // On Apple platforms si_pid is sometimes 0. It was confirmed by Apple to be expected, as the si_pid is tracked at the process level. So when multiple - // signals are in flight in the same process at the same time, it may be overwritten / zeroed. - || siginfo->si_pid == 0 + // On Apple platforms si_pid is sometimes 0. It was confirmed by Apple to be expected, as the si_pid is tracked at the process level. So when multiple + // signals are in flight in the same process at the same time, it may be overwritten / zeroed. + || siginfo->si_pid == 0 #endif - ) - { - // Make sure that errno is not modified - int savedErrNo = errno; - Thread::HijackCallback((NATIVE_CONTEXT*)context, NULL); - errno = savedErrNo; - } + ) + { + // Make sure that errno is not modified + int savedErrNo = errno; + Thread::HijackCallback((NATIVE_CONTEXT*)context, pThread, true /* doInlineSuspend */); + errno = savedErrNo; + } - Thread* pThread = ThreadStore::GetCurrentThreadIfAvailable(); - if (pThread) - { pThread->SetActivationPending(false); } diff --git a/src/coreclr/nativeaot/Runtime/windows/PalMinWin.cpp b/src/coreclr/nativeaot/Runtime/windows/PalMinWin.cpp index 8df2e75d1aaf81..7c4a9000913e56 100644 --- a/src/coreclr/nativeaot/Runtime/windows/PalMinWin.cpp +++ b/src/coreclr/nativeaot/Runtime/windows/PalMinWin.cpp @@ -673,9 +673,9 @@ static void* g_returnAddressHijackTarget = NULL; static void NTAPI ActivationHandler(ULONG_PTR parameter) { CLONE_APC_CALLBACK_DATA* data = (CLONE_APC_CALLBACK_DATA*)parameter; - Thread::HijackCallback((NATIVE_CONTEXT*)data->ContextRecord, NULL); - Thread* pThread = (Thread*)data->Parameter; + Thread::HijackCallback((NATIVE_CONTEXT*)data->ContextRecord, pThread, true /* doInlineSuspend */); + pThread->SetActivationPending(false); } @@ -833,7 +833,7 @@ void PalHijack(Thread* pThreadToHijack) if (isSafeToRedirect) { - Thread::HijackCallback((NATIVE_CONTEXT*)&win32ctx, pThreadToHijack); + Thread::HijackCallback((NATIVE_CONTEXT*)&win32ctx, pThreadToHijack, false /* doInlineSuspend */); } } diff --git a/src/coreclr/pal/src/exception/signal.cpp b/src/coreclr/pal/src/exception/signal.cpp index 732d1d5db05933..0dfd05d028a73b 100644 --- a/src/coreclr/pal/src/exception/signal.cpp +++ b/src/coreclr/pal/src/exception/signal.cpp @@ -936,22 +936,20 @@ static void inject_activation_handler(int code, siginfo_t *siginfo, void *contex CONTEXTToNativeContext(&winContext, ucontext); } } + + // Call the original handler when it is not ignored or default (terminate). + if (g_previous_activation.sa_flags & SA_SIGINFO) + { + _ASSERTE(g_previous_activation.sa_sigaction != NULL); + g_previous_activation.sa_sigaction(code, siginfo, context); + } else { - // Call the original handler when it is not ignored or default (terminate). - if (g_previous_activation.sa_flags & SA_SIGINFO) - { - _ASSERTE(g_previous_activation.sa_sigaction != NULL); - g_previous_activation.sa_sigaction(code, siginfo, context); - } - else + if (g_previous_activation.sa_handler != SIG_IGN && + g_previous_activation.sa_handler != SIG_DFL) { - if (g_previous_activation.sa_handler != SIG_IGN && - g_previous_activation.sa_handler != SIG_DFL) - { - _ASSERTE(g_previous_activation.sa_handler != NULL); - g_previous_activation.sa_handler(code); - } + _ASSERTE(g_previous_activation.sa_handler != NULL); + g_previous_activation.sa_handler(code); } } } diff --git a/src/coreclr/runtime/asyncsafethreadmap.cpp b/src/coreclr/runtime/asyncsafethreadmap.cpp new file mode 100644 index 00000000000000..5c5882589a2613 --- /dev/null +++ b/src/coreclr/runtime/asyncsafethreadmap.cpp @@ -0,0 +1,126 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#include "common.h" + +#include "asyncsafethreadmap.h" + +// Async safe lock free thread map for use in signal handlers + +struct ThreadEntry +{ + size_t osThread; + void* pThread; +}; + +#define MAX_THREADS_IN_SEGMENT 256 + +struct ThreadSegment +{ + ThreadEntry entries[MAX_THREADS_IN_SEGMENT]; + ThreadSegment* pNext; +}; + +static ThreadSegment *s_pAsyncSafeThreadMapHead = NULL; + +bool InsertThreadIntoAsyncSafeMap(size_t osThread, void* pThread) +{ + size_t startIndex = osThread % MAX_THREADS_IN_SEGMENT; + + ThreadSegment* pSegment = s_pAsyncSafeThreadMapHead; + ThreadSegment** ppSegment = &s_pAsyncSafeThreadMapHead; + while (true) + { + if (pSegment == NULL) + { + // Need to add a new segment + ThreadSegment* pNewSegment = new (nothrow) ThreadSegment(); + if (pNewSegment == NULL) + { + // Memory allocation failed + return false; + } + + memset(pNewSegment, 0, sizeof(ThreadSegment)); + ThreadSegment* pExpected = NULL; + if (!__atomic_compare_exchange_n( + ppSegment, + &pExpected, + pNewSegment, + false /* weak */, + __ATOMIC_RELEASE /* success_memorder */, + __ATOMIC_RELAXED /* failure_memorder */)) + { + // Another thread added the segment first + delete pNewSegment; + pNewSegment = pExpected; + } + + pSegment = pNewSegment; + } + for (size_t i = 0; i < MAX_THREADS_IN_SEGMENT; i++) + { + size_t index = (startIndex + i) % MAX_THREADS_IN_SEGMENT; + + size_t expected = 0; + if (__atomic_compare_exchange_n( + &pSegment->entries[index].osThread, + &expected, + osThread, + false /* weak */, + __ATOMIC_RELEASE /* success_memorder */, + __ATOMIC_RELAXED /* failure_memorder */)) + { + // Successfully inserted + // Use atomic store with release to ensure proper ordering + __atomic_store_n(&pSegment->entries[index].pThread, pThread, __ATOMIC_RELEASE); + return true; + } + } + + ppSegment = &pSegment->pNext; + pSegment = __atomic_load_n(&pSegment->pNext, __ATOMIC_ACQUIRE); + } +} + +void RemoveThreadFromAsyncSafeMap(size_t osThread, void* pThread) +{ + size_t startIndex = osThread % MAX_THREADS_IN_SEGMENT; + + ThreadSegment* pSegment = s_pAsyncSafeThreadMapHead; + while (pSegment) + { + for (size_t i = 0; i < MAX_THREADS_IN_SEGMENT; i++) + { + size_t index = (startIndex + i) % MAX_THREADS_IN_SEGMENT; + if (pSegment->entries[index].pThread == pThread) + { + // Found the entry, remove it + pSegment->entries[index].pThread = NULL; + __atomic_exchange_n(&pSegment->entries[index].osThread, (size_t)0, __ATOMIC_RELEASE); + return; + } + } + pSegment = __atomic_load_n(&pSegment->pNext, __ATOMIC_ACQUIRE); + } +} + +void *FindThreadInAsyncSafeMap(size_t osThread) +{ + size_t startIndex = osThread % MAX_THREADS_IN_SEGMENT; + ThreadSegment* pSegment = s_pAsyncSafeThreadMapHead; + while (pSegment) + { + for (size_t i = 0; i < MAX_THREADS_IN_SEGMENT; i++) + { + size_t index = (startIndex + i) % MAX_THREADS_IN_SEGMENT; + // Use acquire to synchronize with release in InsertThreadIntoAsyncSafeMap + if (__atomic_load_n(&pSegment->entries[index].osThread, __ATOMIC_ACQUIRE) == osThread) + { + return pSegment->entries[index].pThread; + } + } + pSegment = __atomic_load_n(&pSegment->pNext, __ATOMIC_ACQUIRE); + } + return NULL; +} diff --git a/src/coreclr/runtime/asyncsafethreadmap.h b/src/coreclr/runtime/asyncsafethreadmap.h new file mode 100644 index 00000000000000..f9ffaf4bffbbac --- /dev/null +++ b/src/coreclr/runtime/asyncsafethreadmap.h @@ -0,0 +1,27 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#ifndef __ASYNCSAFETHREADMAP_H__ +#define __ASYNCSAFETHREADMAP_H__ + +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) + +// Insert a thread into the async-safe map. +// * osThread - The OS thread ID to insert. +// * pThread - A pointer to the thread object to associate with the OS thread ID. +// * return true if the insertion was successful, false otherwise (OOM). +bool InsertThreadIntoAsyncSafeMap(size_t osThread, void* pThread); + +// Remove a thread from the async-safe map. +// * osThread - The OS thread ID to remove. +// * pThread - A pointer to the thread object associated with the OS thread ID. +void RemoveThreadFromAsyncSafeMap(size_t osThread, void* pThread); + +// Find a thread in the async-safe map. +// * osThread - The OS thread ID to search for. +// * return - A pointer to the thread object associated with the OS thread ID, or NULL if not found. +void* FindThreadInAsyncSafeMap(size_t osThread); + +#endif // TARGET_UNIX && !TARGET_WASM + +#endif // __ASYNCSAFETHREADMAP_H__ diff --git a/src/coreclr/vm/CMakeLists.txt b/src/coreclr/vm/CMakeLists.txt index 37f1a76d4f73a3..60479796650884 100644 --- a/src/coreclr/vm/CMakeLists.txt +++ b/src/coreclr/vm/CMakeLists.txt @@ -384,6 +384,12 @@ set(VM_SOURCES_WKS ${VM_SOURCES_GDBJIT} ) +if (CLR_CMAKE_TARGET_UNIX AND NOT CLR_CMAKE_TARGET_ARCH_WASM) + list(APPEND VM_SOURCES_WKS + ${RUNTIME_DIR}/asyncsafethreadmap.cpp + ) +endif() + # coreclr needs to compile codeman.cpp differently depending on flavor (i.e. dll vs. static lib)) list(REMOVE_ITEM VM_SOURCES_WKS codeman.cpp) @@ -482,6 +488,12 @@ set(VM_HEADERS_WKS ${VM_HEADERS_GDBJIT} ) +if (CLR_CMAKE_TARGET_UNIX AND NOT CLR_CMAKE_TARGET_ARCH_WASM) + list(APPEND VM_HEADERS_WKS + ${RUNTIME_DIR}/asyncsafethreadmap.h + ) +endif() + set(GC_SOURCES_WKS ${GC_SOURCES_DAC_AND_WKS_COMMON} ../gc/gceventstatus.cpp diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index 03aaed2aa9515f..ef46fae5d3d182 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -858,7 +858,7 @@ IJitManager::IJitManager() // been stopped when we suspend the EE so they won't be touching an element that is about to be deleted. // However for pre-emptive mode threads, they could be stalled right on top of the element we want // to delete, so we need to apply the reader lock to them and wait for them to drain. -ExecutionManager::ScanFlag ExecutionManager::GetScanFlags() +ExecutionManager::ScanFlag ExecutionManager::GetScanFlags(Thread *pThread) { CONTRACTL { NOTHROW; @@ -869,7 +869,10 @@ ExecutionManager::ScanFlag ExecutionManager::GetScanFlags() #if !defined(DACCESS_COMPILE) - Thread *pThread = GetThreadNULLOk(); + if (!pThread) + { + pThread = GetThreadNULLOk(); + } if (!pThread) return ScanNoReaderLock; @@ -5034,6 +5037,24 @@ BOOL ExecutionManager::IsManagedCode(PCODE currentPC) return IsManagedCodeWorker(currentPC, &lockState); } +//************************************************************************** +BOOL ExecutionManager::IsManagedCodeNoLock(PCODE currentPC) +{ + CONTRACTL { + NOTHROW; + GC_NOTRIGGER; + } CONTRACTL_END; + + if (currentPC == (PCODE)NULL) + return FALSE; + + _ASSERTE(GetScanFlags() != ScanReaderLock); + + // Since ScanReaderLock is not set, then we must assume that the ReaderLock is effectively taken. + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; + return IsManagedCodeWorker(currentPC, &lockState); +} + //************************************************************************** NOINLINE // Make sure that the slow path with lock won't affect the fast path BOOL ExecutionManager::IsManagedCodeWithLock(PCODE currentPC) diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index 1be9d9ce96d81b..096d706ee038cf 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -2268,11 +2268,15 @@ class ExecutionManager }; // Returns default scan flag for current thread - static ScanFlag GetScanFlags(); + static ScanFlag GetScanFlags(Thread *pThread = NULL); - // Returns whether currentPC is in managed code. Returns false for jump stubs on WIN64. + // Returns whether currentPC is in managed code. Returns false for jump stubs. static BOOL IsManagedCode(PCODE currentPC); + // Returns whether currentPC is in managed code. Returns false for jump stubs. + // Does not acquire the reader lock. Caller must ensure it is safe. + static BOOL IsManagedCodeNoLock(PCODE currentPC); + // Returns true if currentPC is ready to run codegen static BOOL IsReadyToRunCode(PCODE currentPC); diff --git a/src/coreclr/vm/threads.cpp b/src/coreclr/vm/threads.cpp index 61a12afdd24710..aa6f7c7536858c 100644 --- a/src/coreclr/vm/threads.cpp +++ b/src/coreclr/vm/threads.cpp @@ -33,6 +33,8 @@ #include "vmholder.h" #include "exceptmacros.h" #include "minipal/time.h" +#include "minipal/thread.h" +#include "asyncsafethreadmap.h" #ifdef FEATURE_COMINTEROP #include "runtimecallablewrapper.h" @@ -62,6 +64,17 @@ #include "interpexec.h" #endif // FEATURE_INTERPRETER +#ifndef DACCESS_COMPILE +Thread* GetThreadAsyncSafe() +{ +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) + return (Thread*)FindThreadInAsyncSafeMap(minipal_get_current_thread_id_no_cache()); +#else + return GetThreadNULLOk(); +#endif +} +#endif // DACCESS_COMPILE + static const PortableTailCallFrame g_sentinelTailCallFrame = { NULL, NULL }; TailCallTls::TailCallTls() @@ -372,7 +385,26 @@ void SetThread(Thread* t) #endif // Clear or set the app domain to the one domain based on if the thread is being nulled out or set - t_CurrentThreadInfo.m_pAppDomain = t == NULL ? NULL : AppDomain::GetCurrentDomain(); + if (t != NULL) + { + t_CurrentThreadInfo.m_pAppDomain = AppDomain::GetCurrentDomain(); +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) + if (!InsertThreadIntoAsyncSafeMap(t->GetOSThreadId64(), t)) + { + EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Failed to insert thread into async-safe map due to out of memory.")); + } +#endif // TARGET_UNIX && !TARGET_WASM + } + else + { + t_CurrentThreadInfo.m_pAppDomain = NULL; +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) + if (origThread != NULL) + { + RemoveThreadFromAsyncSafeMap(origThread->GetOSThreadId64(), origThread); + } +#endif // TARGET_UNIX && !TARGET_WASM + } } BOOL Thread::Alert () diff --git a/src/coreclr/vm/threads.h b/src/coreclr/vm/threads.h index 0c22e89efc873c..a7d6df5eee59d7 100644 --- a/src/coreclr/vm/threads.h +++ b/src/coreclr/vm/threads.h @@ -5620,6 +5620,8 @@ class StackWalkerWalkingThreadHolder Thread* m_PreviousValue; }; +EXTERN_C Thread* GetThreadAsyncSafe(); + #ifndef DACCESS_COMPILE #if defined(TARGET_WINDOWS) && defined(TARGET_AMD64) EXTERN_C void STDCALL ClrRestoreNonvolatileContextWorker(PCONTEXT ContextRecord, DWORD64 ssp); diff --git a/src/coreclr/vm/threadsuspend.cpp b/src/coreclr/vm/threadsuspend.cpp index b9b199f53ddd0d..5ea08c3362bbab 100644 --- a/src/coreclr/vm/threadsuspend.cpp +++ b/src/coreclr/vm/threadsuspend.cpp @@ -5732,16 +5732,17 @@ void ThreadSuspend::SuspendEE(SUSPEND_REASON reason) // It is unsafe to use blocking APIs or allocate in this method. BOOL CheckActivationSafePoint(SIZE_T ip) { - Thread *pThread = GetThreadNULLOk(); + Thread *pThread = GetThreadAsyncSafe(); // The criteria for safe activation is to be running managed code. // Also we are not interested in handling interruption if we are already in preemptive mode nor if we are single stepping BOOL isActivationSafePoint = pThread != NULL && (pThread->m_StateNC & Thread::TSNC_DebuggerIsStepping) == 0 && pThread->PreemptiveGCDisabled() && - ExecutionManager::IsManagedCode(ip); + (ExecutionManager::GetScanFlags(pThread) != ExecutionManager::ScanReaderLock) && + ExecutionManager::IsManagedCodeNoLock(ip); - if (!isActivationSafePoint) + if (!isActivationSafePoint && pThread != NULL) { pThread->m_hasPendingActivation = false; } diff --git a/src/native/minipal/thread.h b/src/native/minipal/thread.h index 48405dfb3195ae..b5532c315cbc1e 100644 --- a/src/native/minipal/thread.h +++ b/src/native/minipal/thread.h @@ -4,6 +4,8 @@ #ifndef HAVE_MINIPAL_THREAD_H #define HAVE_MINIPAL_THREAD_H +#ifndef HOST_WINDOWS + #include #include #include @@ -36,6 +38,42 @@ extern "C" { #endif +/** + * Get the current thread ID without caching in a TLS variable. + * + * @return The current thread ID as a size_t value. + */ +static inline size_t minipal_get_current_thread_id_no_cache(void) +{ + size_t tid; +#if defined(__wasm) && !defined(_REENTRANT) + tid = 1; // In non-reentrant WASM builds, we define a single thread with ID 1. +#else // !__wasm || _REENTRANT + +#if defined(__linux__) + tid = (size_t)syscall(SYS_gettid); +#elif defined(__APPLE__) + uint64_t thread_id; + pthread_threadid_np(pthread_self(), &thread_id); + tid = (size_t)thread_id; // Cast the uint64_t thread ID to size_t +#elif defined(__FreeBSD__) + tid = (size_t)pthread_getthreadid_np(); +#elif defined(__NetBSD__) + tid = (size_t)_lwp_self(); +#elif defined(__HAIKU__) + tid = (size_t)find_thread(NULL); +#elif defined(__sun) + tid = (size_t)pthread_self(); +#elif defined(__wasm) + tid = (size_t)(void*)pthread_self(); +#else +#error "Unsupported platform" +#endif + +#endif // __wasm && !_REENTRANT + return tid; +} + /** * Get the current thread ID. * @@ -43,9 +81,10 @@ extern "C" { */ static inline size_t minipal_get_current_thread_id(void) { -#if defined(__wasm) && defined(MONO_WASM_MT) - return 0; -#else +#if defined(__wasm) && !defined(_REENTRANT) + return minipal_get_current_thread_id_no_cache(); + +#else // !__wasm || _REENTRANT #if defined(__GNUC__) && !defined(__clang__) && defined(__cplusplus) // gcc doesn't like _Thread_local when __cplusplus is defined. // although thread_local is C2x, which other compilers don't allow with C11. @@ -55,25 +94,9 @@ static inline size_t minipal_get_current_thread_id(void) #endif if (!tid) -#if defined(__linux__) - tid = (size_t)syscall(SYS_gettid); -#elif defined(__APPLE__) { - uint64_t thread_id; - pthread_threadid_np(pthread_self(), &thread_id); - tid = (size_t)thread_id; // Cast the uint64_t thread ID to size_t + tid = minipal_get_current_thread_id_no_cache(); } -#elif defined(__FreeBSD__) - tid = (size_t)pthread_getthreadid_np(); -#elif defined(__NetBSD__) - tid = (size_t)_lwp_self(); -#elif defined(__HAIKU__) - tid = (size_t)find_thread(NULL); -#elif defined(__sun) - tid = (size_t)pthread_self(); -#else - tid = (size_t)(void*)pthread_self(); -#endif return tid; #endif @@ -115,8 +138,11 @@ static inline int minipal_set_thread_name(pthread_t thread, const char* name) #endif } + #ifdef __cplusplus } #endif // extern "C" +#endif // !HOST_WINDOWS + #endif // HAVE_MINIPAL_THREAD_H