From ceb9bcfd14e197bf5250abc77e482187b1154555 Mon Sep 17 00:00:00 2001 From: Stefanos Baziotis Date: Sat, 14 Sep 2019 18:08:33 +0300 Subject: [PATCH 01/13] Decouple Stack Context --- src/core/thread/context.d | 308 +++++++++++++++++++++++++ src/core/thread/fiber.d | 57 ++--- src/core/thread/osthread.d | 459 ++++++++----------------------------- 3 files changed, 411 insertions(+), 413 deletions(-) create mode 100644 src/core/thread/context.d diff --git a/src/core/thread/context.d b/src/core/thread/context.d new file mode 100644 index 0000000000..e5e3f0166f --- /dev/null +++ b/src/core/thread/context.d @@ -0,0 +1,308 @@ +/** + * thread.context provides stack context data structures needed by threads and fibers. + * + * Copyright: Copyright Sean Kelly 2005 - 2012. + * License: Distributed under the + * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0). + * (See accompanying file LICENSE) + * Authors: Sean Kelly, Walter Bright, Alex Rønne Petersen, Martin Nowak + * Source: $(DRUNTIMESRC core/thread/osthread.d) + */ + +module core.thread.context; + +struct StackContext +{ + // bottom, top of stack + void* bstack, tstack; + + /// Slot for the EH implementation to keep some state for each stack + /// (will be necessary for exception chaining, etc.). Opaque as far as + /// we are concerned here. + void* ehContext; + + StackContext* within, next, prev; +} + +/** +A class that represents a thread of execution that manages a stack. +This serves primarily as a superclass for Thread and Fiber. +*/ +class StackContextExecutor +{ + // + // The type of routine passed on thread/fiber construction. + // + enum Call + { + NO, + FN, + DG + } + + // Common standard data for Thread / Fiber. + Call m_call = Call.NO; + union + { + void function() m_fn; + void delegate() m_dg; + } + + // Thread / Fiber entry point. Invokes the function or delegate passed on + // construction (if any). + final void run() + { + switch ( m_call ) + { + case Call.FN: + m_fn(); + break; + case Call.DG: + m_dg(); + break; + default: + break; + } + } +} + +struct GlobalStackContext +{ + /////////////////////////////////////////////////////////////////////////// + // GC Scanning Support + /////////////////////////////////////////////////////////////////////////// + + import core.sync.mutex : Mutex; + + + // NOTE: The GC scanning process works like so: + // + // 1. Suspend all threads. + // 2. Scan the stacks of all suspended threads for roots. + // 3. Resume all threads. + // + // Step 1 and 3 require a list of all threads in the system, while + // step 2 requires a list of all thread stacks (each represented by + // a Context struct). Traditionally, there was one stack per thread + // and the Context structs were not necessary. However, Fibers have + // changed things so that each thread has its own 'main' stack plus + // an arbitrary number of nested stacks (normally referenced via + // m_curr). Also, there may be 'free-floating' stacks in the system, + // which are Fibers that are not currently executing on any specific + // thread but are still being processed and still contain valid + // roots. + // + // To support all of this, the StackContext struct has been created to + // represent a stack range, and a global list of StackContext structs has + // been added to enable scanning of these stack ranges. The lifetime + // (and presence in the StackContext list) of a thread's 'main' stack will + // be equivalent to the thread's lifetime. So the StackContext will be + // added to the list on thread entry, and removed from the list on + // thread exit (which is essentially the same as the presence of a + // Thread object in its own global list). The lifetime of a Fiber's + // context, however, will be tied to the lifetime of the Fiber object + // itself, and Fibers are expected to add/remove their Context struct + // on construction/deletion. + + + // + // All use of the global thread lists/array should synchronize on this lock. + // + // Careful as the GC acquires this lock after the GC lock to suspend all + // threads any GC usage with slock held can result in a deadlock through + // lock order inversion. + @property static Mutex slock() nothrow @nogc + { + return cast(Mutex)_locks[0].ptr; + } + + @property static Mutex criticalRegionLock() nothrow @nogc + { + return cast(Mutex)_locks[1].ptr; + } + + __gshared align(Mutex.alignof) void[__traits(classInstanceSize, Mutex)][2] _locks; + + static void initLocks() @nogc + { + foreach (ref lock; _locks) + { + lock[] = typeid(Mutex).initializer[]; + (cast(Mutex)lock.ptr).__ctor(); + } + } + + static void termLocks() @nogc + { + foreach (ref lock; _locks) + (cast(Mutex)lock.ptr).__dtor(); + } + + + + __gshared StackContext* sm_cbeg; + + import core.thread : Thread; + + __gshared Thread sm_tbeg; + __gshared size_t sm_tlen; + + // can't use core.internal.util.array in public code + __gshared Thread* pAboutToStart; + __gshared size_t nAboutToStart; + + + __gshared uint suspendDepth = 0; + + /////////////////////////////////////////////////////////////////////////// + // Global Context List Operations + /////////////////////////////////////////////////////////////////////////// + + + // + // Add a context to the global context list. + // + static void add(StackContext* c) nothrow @nogc + in + { + assert( c ); + assert( !c.next && !c.prev ); + } + do + { + slock.lock_nothrow(); + scope(exit) slock.unlock_nothrow(); + assert(!suspendDepth); // must be 0 b/c it's only set with slock held + + if (sm_cbeg) + { + c.next = sm_cbeg; + sm_cbeg.prev = c; + } + sm_cbeg = c; + } + + // + // Remove a context from the global context list. + // + // This assumes slock being acquired. This isn't done here to + // avoid double locking when called from remove(Thread) + static void remove(StackContext* c) nothrow @nogc + in + { + assert( c ); + assert( c.next || c.prev ); + } + do + { + if ( c.prev ) + c.prev.next = c.next; + if ( c.next ) + c.next.prev = c.prev; + if ( sm_cbeg == c ) + sm_cbeg = c.next; + // NOTE: Don't null out c.next or c.prev because opApply currently + // follows c.next after removing a node. This could be easily + // addressed by simply returning the next node from this + // function, however, a context should never be re-added to the + // list anyway and having next and prev be non-null is a good way + // to ensure that. + } + + /////////////////////////////////////////////////////////////////////////// + // Global Thread List Operations + /////////////////////////////////////////////////////////////////////////// + + + // + // Add a thread to the global thread list. + // + static void add( Thread t, bool rmAboutToStart = true ) nothrow @nogc + in + { + assert( t ); + assert( !t.next && !t.prev ); + } + do + { + slock.lock_nothrow(); + scope(exit) slock.unlock_nothrow(); + assert(t.isRunning); // check this with slock to ensure pthread_create already returned + assert(!suspendDepth); // must be 0 b/c it's only set with slock held + + if (rmAboutToStart) + { + size_t idx = -1; + foreach (i, thr; pAboutToStart[0 .. nAboutToStart]) + { + if (thr is t) + { + idx = i; + break; + } + } + assert(idx != -1); + import core.stdc.string : memmove; + import core.stdc.stdlib : realloc; + memmove(pAboutToStart + idx, + pAboutToStart + idx + 1, Thread.sizeof * (nAboutToStart - idx - 1)); + + pAboutToStart = + cast(Thread*)realloc(pAboutToStart, Thread.sizeof * --nAboutToStart); + } + + if (sm_tbeg) + { + t.next = sm_tbeg; + sm_tbeg.prev = t; + } + sm_tbeg = t; + ++sm_tlen; + } + + + // + // Remove a thread from the global thread list. + // + static void remove( Thread t ) nothrow @nogc + in + { + assert( t ); + } + do + { + // Thread was already removed earlier, might happen b/c of thread_detachInstance + if (!t.next && !t.prev && (sm_tbeg !is t)) + return; + + slock.lock_nothrow(); + { + // NOTE: When a thread is removed from the global thread list its + // main context is invalid and should be removed as well. + // It is possible that t.m_curr could reference more + // than just the main context if the thread exited abnormally + // (if it was terminated), but we must assume that the user + // retains a reference to them and that they may be re-used + // elsewhere. Therefore, it is the responsibility of any + // object that creates contexts to clean them up properly + // when it is done with them. + remove( &t.m_main ); + + if ( t.prev ) + t.prev.next = t.next; + if ( t.next ) + t.next.prev = t.prev; + if ( sm_tbeg is t ) + sm_tbeg = t.next; + t.prev = t.next = null; + --sm_tlen; + } + // NOTE: Don't null out t.next or t.prev because opApply currently + // follows t.next after removing a node. This could be easily + // addressed by simply returning the next node from this + // function, however, a thread should never be re-added to the + // list anyway and having next and prev be non-null is a good way + // to ensure that. + slock.unlock_nothrow(); + } +} diff --git a/src/core/thread/fiber.d b/src/core/thread/fiber.d index 5eb15e7555..d6a275ca6e 100644 --- a/src/core/thread/fiber.d +++ b/src/core/thread/fiber.d @@ -518,7 +518,10 @@ private * * Authors: Based on a design by Mikola Lysenko. */ -class Fiber + +import core.thread.context : StackContext, StackContextExecutor; + +class Fiber : StackContextExecutor { /////////////////////////////////////////////////////////////////////////// // Initialization @@ -614,6 +617,8 @@ class Fiber // General Actions /////////////////////////////////////////////////////////////////////////// + /// Flag to control rethrow behavior of $(D $(LREF call)) + enum Rethrow : bool { no, yes } /** * Transfers execution to this fiber object. The calling context will be @@ -692,9 +697,6 @@ class Fiber } } - /// Flag to control rethrow behavior of $(D $(LREF call)) - enum Rethrow : bool { no, yes } - /** * Resets this fiber so that it may be re-used, optionally with a * new function/delegate. This routine should only be called for @@ -866,43 +868,10 @@ private: m_call = Call.NO; } - - // - // Fiber entry point. Invokes the function or delegate passed on - // construction (if any). - // - final void run() - { - switch ( m_call ) - { - case Call.FN: - m_fn(); - break; - case Call.DG: - m_dg(); - break; - default: - break; - } - } - - private: - // - // The type of routine passed on fiber construction. - // - enum Call - { - NO, - FN, - DG - } - - // // Standard fiber data // - Call m_call; union { void function() m_fn; @@ -917,7 +886,7 @@ private: /////////////////////////////////////////////////////////////////////////// // Stack Management /////////////////////////////////////////////////////////////////////////// - + import core.thread.context : GlobalStackContext; // // Allocate a new stack for this fiber. @@ -941,7 +910,7 @@ private: // room for this struct explicitly would be to mash it into the // base of the stack being allocated below. However, doing so // requires too much special logic to be worthwhile. - m_ctxt = new Thread.Context; + m_ctxt = new StackContext; static if ( __traits( compiles, VirtualAlloc ) ) { @@ -1060,7 +1029,7 @@ private: } } - Thread.add( m_ctxt ); + GlobalStackContext.add( m_ctxt ); } @@ -1076,9 +1045,9 @@ private: { // NOTE: m_ctxt is guaranteed to be alive because it is held in the // global context list. - Thread.slock.lock_nothrow(); - scope(exit) Thread.slock.unlock_nothrow(); - Thread.remove( m_ctxt ); + GlobalStackContext.slock.lock_nothrow(); + scope(exit) GlobalStackContext.slock.unlock_nothrow(); + GlobalStackContext.remove( m_ctxt ); static if ( __traits( compiles, VirtualAlloc ) ) { @@ -1445,7 +1414,7 @@ private: } - Thread.Context* m_ctxt; + StackContext* m_ctxt; size_t m_size; void* m_pmem; diff --git a/src/core/thread/osthread.d b/src/core/thread/osthread.d index b46a6655fe..8cd19cb5d9 100644 --- a/src/core/thread/osthread.d +++ b/src/core/thread/osthread.d @@ -363,6 +363,8 @@ else version (Posix) import core.sys.posix.signal; import core.sys.posix.time; + import core.thread.context : GlobalStackContext; + version (Darwin) { import core.sys.darwin.mach.thread_act; @@ -401,13 +403,13 @@ else version (Posix) atomicStore!(MemoryOrder.raw)(obj.m_isRunning, true); Thread.setThis(obj); // allocates lazy TLS (see Issue 11981) - Thread.add(obj); // can only receive signals from here on + GlobalStackContext.add(obj); // can only receive signals from here on scope (exit) { - Thread.remove(obj); + GlobalStackContext.remove(obj); atomicStore!(MemoryOrder.raw)(obj.m_isRunning, false); } - Thread.add(&obj.m_main); + GlobalStackContext.add(&obj.m_main); static extern (C) void thread_cleanupHandler( void* arg ) nothrow @nogc { @@ -600,11 +602,11 @@ else static assert( false, "Unknown threading implementation." ); } - /////////////////////////////////////////////////////////////////////////////// // Thread /////////////////////////////////////////////////////////////////////////////// +import core.thread.context; /** * This class encapsulates all threading functionality for the D @@ -614,7 +616,7 @@ else * A new thread may be created using either derivation or composition, as * in the following example. */ -class Thread +class Thread : StackContextExecutor { /////////////////////////////////////////////////////////////////////////// // Initialization @@ -677,7 +679,7 @@ class Thread ~this() nothrow @nogc { bool no_context = m_addr == m_addr.init; - bool not_registered = !next && !prev && (sm_tbeg !is this); + bool not_registered = !next && !prev && (GlobalStackContext.sm_tbeg !is this); if (no_context || not_registered) { @@ -762,12 +764,14 @@ class Thread onThreadError( "Error creating thread" ); } - slock.lock_nothrow(); - scope(exit) slock.unlock_nothrow(); + GlobalStackContext.slock.lock_nothrow(); + scope(exit) GlobalStackContext.slock.unlock_nothrow(); { - ++nAboutToStart; - pAboutToStart = cast(Thread*)realloc(pAboutToStart, Thread.sizeof * nAboutToStart); - pAboutToStart[nAboutToStart - 1] = this; + ++(GlobalStackContext.nAboutToStart); + GlobalStackContext.pAboutToStart = cast(Thread*)realloc( + GlobalStackContext.pAboutToStart, + Thread.sizeof * GlobalStackContext.nAboutToStart); + GlobalStackContext.pAboutToStart[GlobalStackContext.nAboutToStart - 1] = this; version (Windows) { if ( ResumeThread( m_hndl ) == -1 ) @@ -1492,15 +1496,16 @@ class Thread Thread[] buf; while (true) { - immutable len = atomicLoad!(MemoryOrder.raw)(*cast(shared)&sm_tlen); + auto tlen_addr = &GlobalStackContext.sm_tlen; + immutable len = atomicLoad!(MemoryOrder.raw)(*cast(shared)tlen_addr); resize(buf, len); assert(buf.length == len); - synchronized (slock) + synchronized (GlobalStackContext.slock) { - if (len == sm_tlen) + if (len == GlobalStackContext.sm_tlen) { size_t pos; - for (Thread t = sm_tbeg; t; t = t.next) + for (Thread t = GlobalStackContext.sm_tbeg; t; t = t.next) buf[pos++] = t; return buf; } @@ -1537,39 +1542,7 @@ private: m_curr = &m_main; } - - // - // Thread entry point. Invokes the function or delegate passed on - // construction (if any). - // - final void run() - { - switch ( m_call ) - { - case Call.FN: - m_fn(); - break; - case Call.DG: - m_dg(); - break; - default: - break; - } - } - - private: - // - // The type of routine passed on thread construction. - // - enum Call - { - NO, - FN, - DG - } - - // // Standard types // @@ -1613,13 +1586,7 @@ private: mach_port_t m_tmach; } ThreadID m_addr; - Call m_call; string m_name; - union - { - void function() m_fn; - void delegate() m_dg; - } size_t m_sz; version (Posix) { @@ -1649,32 +1616,12 @@ private: } package(core.thread): - static struct Context - { - void* bstack, - tstack; - - /// Slot for the EH implementation to keep some state for each stack - /// (will be necessary for exception chaining, etc.). Opaque as far as - /// we are concerned here. - void* ehContext; + StackContext m_main; + StackContext* m_curr; + bool m_lock; + void* m_tlsgcdata; - Context* within; - Context* next, - prev; - } - - Context m_main; - Context* m_curr; - bool m_lock; - void* m_tlsgcdata; - - /////////////////////////////////////////////////////////////////////////// - // Thread Context and GC Scanning Support - /////////////////////////////////////////////////////////////////////////// - - - final void pushContext( Context* c ) nothrow @nogc + final void pushContext( StackContext* c ) nothrow @nogc in { assert( !c.within ); @@ -1686,7 +1633,6 @@ package(core.thread): m_curr = c; } - final void popContext() nothrow @nogc in { @@ -1694,15 +1640,13 @@ package(core.thread): } do { - Context* c = m_curr; + StackContext* c = m_curr; m_curr = c.within; c.ehContext = swapContext(m_curr.ehContext); c.within = null; } -private: - - final Context* topContext() nothrow @nogc + final StackContext* topContext() nothrow @nogc in { assert( m_curr ); @@ -1712,6 +1656,12 @@ private: return m_curr; } + /////////////////////////////////////////////////////////////////////////// + // Thread Context and GC Scanning Support + /////////////////////////////////////////////////////////////////////////// + + private: + version (Windows) { version (X86) @@ -1747,239 +1697,11 @@ private: package(core.thread): - /////////////////////////////////////////////////////////////////////////// - // GC Scanning Support - /////////////////////////////////////////////////////////////////////////// - - - // NOTE: The GC scanning process works like so: - // - // 1. Suspend all threads. - // 2. Scan the stacks of all suspended threads for roots. - // 3. Resume all threads. - // - // Step 1 and 3 require a list of all threads in the system, while - // step 2 requires a list of all thread stacks (each represented by - // a Context struct). Traditionally, there was one stack per thread - // and the Context structs were not necessary. However, Fibers have - // changed things so that each thread has its own 'main' stack plus - // an arbitrary number of nested stacks (normally referenced via - // m_curr). Also, there may be 'free-floating' stacks in the system, - // which are Fibers that are not currently executing on any specific - // thread but are still being processed and still contain valid - // roots. - // - // To support all of this, the Context struct has been created to - // represent a stack range, and a global list of Context structs has - // been added to enable scanning of these stack ranges. The lifetime - // (and presence in the Context list) of a thread's 'main' stack will - // be equivalent to the thread's lifetime. So the Ccontext will be - // added to the list on thread entry, and removed from the list on - // thread exit (which is essentially the same as the presence of a - // Thread object in its own global list). The lifetime of a Fiber's - // context, however, will be tied to the lifetime of the Fiber object - // itself, and Fibers are expected to add/remove their Context struct - // on construction/deletion. - - - // - // All use of the global thread lists/array should synchronize on this lock. - // - // Careful as the GC acquires this lock after the GC lock to suspend all - // threads any GC usage with slock held can result in a deadlock through - // lock order inversion. - @property static Mutex slock() nothrow @nogc - { - return cast(Mutex)_locks[0].ptr; - } - - @property static Mutex criticalRegionLock() nothrow @nogc - { - return cast(Mutex)_locks[1].ptr; - } - - __gshared align(Mutex.alignof) void[__traits(classInstanceSize, Mutex)][2] _locks; - - static void initLocks() @nogc - { - foreach (ref lock; _locks) - { - lock[] = typeid(Mutex).initializer[]; - (cast(Mutex)lock.ptr).__ctor(); - } - } - - static void termLocks() @nogc - { - foreach (ref lock; _locks) - (cast(Mutex)lock.ptr).__dtor(); - } - - __gshared Context* sm_cbeg; - - __gshared Thread sm_tbeg; - __gshared size_t sm_tlen; - - // can't use core.internal.util.array in public code - __gshared Thread* pAboutToStart; - __gshared size_t nAboutToStart; - // // Used for ordering threads in the global thread list. // Thread prev; Thread next; - - - /////////////////////////////////////////////////////////////////////////// - // Global Context List Operations - /////////////////////////////////////////////////////////////////////////// - - - // - // Add a context to the global context list. - // - static void add( Context* c ) nothrow @nogc - in - { - assert( c ); - assert( !c.next && !c.prev ); - } - do - { - slock.lock_nothrow(); - scope(exit) slock.unlock_nothrow(); - assert(!suspendDepth); // must be 0 b/c it's only set with slock held - - if (sm_cbeg) - { - c.next = sm_cbeg; - sm_cbeg.prev = c; - } - sm_cbeg = c; - } - - // - // Remove a context from the global context list. - // - // This assumes slock being acquired. This isn't done here to - // avoid double locking when called from remove(Thread) - static void remove( Context* c ) nothrow @nogc - in - { - assert( c ); - assert( c.next || c.prev ); - } - do - { - if ( c.prev ) - c.prev.next = c.next; - if ( c.next ) - c.next.prev = c.prev; - if ( sm_cbeg == c ) - sm_cbeg = c.next; - // NOTE: Don't null out c.next or c.prev because opApply currently - // follows c.next after removing a node. This could be easily - // addressed by simply returning the next node from this - // function, however, a context should never be re-added to the - // list anyway and having next and prev be non-null is a good way - // to ensure that. - } - - - /////////////////////////////////////////////////////////////////////////// - // Global Thread List Operations - /////////////////////////////////////////////////////////////////////////// - - - // - // Add a thread to the global thread list. - // - static void add( Thread t, bool rmAboutToStart = true ) nothrow @nogc - in - { - assert( t ); - assert( !t.next && !t.prev ); - } - do - { - slock.lock_nothrow(); - scope(exit) slock.unlock_nothrow(); - assert(t.isRunning); // check this with slock to ensure pthread_create already returned - assert(!suspendDepth); // must be 0 b/c it's only set with slock held - - if (rmAboutToStart) - { - size_t idx = -1; - foreach (i, thr; pAboutToStart[0 .. nAboutToStart]) - { - if (thr is t) - { - idx = i; - break; - } - } - assert(idx != -1); - import core.stdc.string : memmove; - memmove(pAboutToStart + idx, pAboutToStart + idx + 1, Thread.sizeof * (nAboutToStart - idx - 1)); - pAboutToStart = - cast(Thread*)realloc(pAboutToStart, Thread.sizeof * --nAboutToStart); - } - - if (sm_tbeg) - { - t.next = sm_tbeg; - sm_tbeg.prev = t; - } - sm_tbeg = t; - ++sm_tlen; - } - - - // - // Remove a thread from the global thread list. - // - static void remove( Thread t ) nothrow @nogc - in - { - assert( t ); - } - do - { - // Thread was already removed earlier, might happen b/c of thread_detachInstance - if (!t.next && !t.prev && (sm_tbeg !is t)) - return; - - slock.lock_nothrow(); - { - // NOTE: When a thread is removed from the global thread list its - // main context is invalid and should be removed as well. - // It is possible that t.m_curr could reference more - // than just the main context if the thread exited abnormally - // (if it was terminated), but we must assume that the user - // retains a reference to them and that they may be re-used - // elsewhere. Therefore, it is the responsibility of any - // object that creates contexts to clean them up properly - // when it is done with them. - remove( &t.m_main ); - - if ( t.prev ) - t.prev.next = t.next; - if ( t.next ) - t.next.prev = t.prev; - if ( sm_tbeg is t ) - sm_tbeg = t.next; - t.prev = t.next = null; - --sm_tlen; - } - // NOTE: Don't null out t.next or t.prev because opApply currently - // follows t.next after removing a node. This could be easily - // addressed by simply returning the next node from this - // function, however, a thread should never be re-added to the - // list anyway and having next and prev be non-null is a good way - // to ensure that. - slock.unlock_nothrow(); - } } /// @@ -2102,7 +1824,7 @@ extern (C) void thread_init() @nogc // functions to detect the condition and return immediately. initLowlevelThreads(); - Thread.initLocks(); + GlobalStackContext.initLocks(); // The Android VM runtime intercepts SIGUSR1 and apparently doesn't allow // its signal handler to run, so swap the two signals on Android, since @@ -2191,14 +1913,14 @@ extern (C) void thread_term() @nogc (cast(ubyte[])_mainThreadStore)[] = 0; Thread.sm_main = null; - assert(Thread.sm_tbeg && Thread.sm_tlen == 1); - assert(!Thread.nAboutToStart); - if (Thread.pAboutToStart) // in case realloc(p, 0) doesn't return null + assert(GlobalStackContext.sm_tbeg && GlobalStackContext.sm_tlen == 1); + assert(!GlobalStackContext.nAboutToStart); + if (GlobalStackContext.pAboutToStart) // in case realloc(p, 0) doesn't return null { - free(Thread.pAboutToStart); - Thread.pAboutToStart = null; + free(GlobalStackContext.pAboutToStart); + GlobalStackContext.pAboutToStart = null; } - Thread.termLocks(); + GlobalStackContext.termLocks(); termLowlevelThreads(); } @@ -2232,7 +1954,7 @@ extern (C) Thread thread_attachThis() private Thread attachThread(Thread thisThread) @nogc { - Thread.Context* thisContext = &thisThread.m_main; + StackContext* thisContext = &thisThread.m_main; assert( thisContext == thisThread.m_curr ); version (Windows) @@ -2260,8 +1982,8 @@ private Thread attachThread(Thread thisThread) @nogc assert( thisThread.m_tmach != thisThread.m_tmach.init ); } - Thread.add( thisThread, false ); - Thread.add( thisContext ); + GlobalStackContext.add( thisThread, false ); + GlobalStackContext.add( thisContext ); if ( Thread.sm_main !is null ) multiThreadedFlag = true; return thisThread; @@ -2344,7 +2066,7 @@ version (Windows) extern (C) void thread_detachThis() nothrow @nogc { if (auto t = Thread.getThis()) - Thread.remove(t); + GlobalStackContext.remove(t); } @@ -2362,14 +2084,14 @@ extern (C) void thread_detachThis() nothrow @nogc extern (C) void thread_detachByAddr( ThreadID addr ) { if ( auto t = thread_findByAddr( addr ) ) - Thread.remove( t ); + GlobalStackContext.remove( t ); } /// ditto extern (C) void thread_detachInstance( Thread t ) nothrow @nogc { - Thread.remove( t ); + GlobalStackContext.remove( t ); } @@ -2402,12 +2124,12 @@ unittest */ static Thread thread_findByAddr( ThreadID addr ) { - Thread.slock.lock_nothrow(); - scope(exit) Thread.slock.unlock_nothrow(); + GlobalStackContext.slock.lock_nothrow(); + scope(exit) GlobalStackContext.slock.unlock_nothrow(); // also return just spawned thread so that // DLL_THREAD_ATTACH knows it's a D thread - foreach (t; Thread.pAboutToStart[0 .. Thread.nAboutToStart]) + foreach (t; GlobalStackContext.pAboutToStart[0 .. GlobalStackContext.nAboutToStart]) if (t.m_addr == addr) return t; @@ -2443,23 +2165,23 @@ extern (C) void thread_setThis(Thread t) nothrow @nogc extern (C) void thread_joinAll() { Lagain: - Thread.slock.lock_nothrow(); + GlobalStackContext.slock.lock_nothrow(); // wait for just spawned threads - if (Thread.nAboutToStart) + if (GlobalStackContext.nAboutToStart) { - Thread.slock.unlock_nothrow(); + GlobalStackContext.slock.unlock_nothrow(); Thread.yield(); goto Lagain; } // join all non-daemon threads, the main thread is also a daemon - auto t = Thread.sm_tbeg; + auto t = GlobalStackContext.sm_tbeg; while (t) { if (!t.isRunning) { auto tn = t.next; - Thread.remove(t); + GlobalStackContext.remove(t); t = tn; } else if (t.isDaemon) @@ -2468,12 +2190,12 @@ extern (C) void thread_joinAll() } else { - Thread.slock.unlock_nothrow(); + GlobalStackContext.slock.unlock_nothrow(); t.join(); // might rethrow goto Lagain; // must restart iteration b/c of unlock } } - Thread.slock.unlock_nothrow(); + GlobalStackContext.slock.unlock_nothrow(); } @@ -2485,12 +2207,12 @@ shared static ~this() // NOTE: The functionality related to garbage collection must be minimally // operable after this dtor completes. Therefore, only minimal // cleanup may occur. - auto t = Thread.sm_tbeg; + auto t = GlobalStackContext.sm_tbeg; while (t) { auto tn = t.next; if (!t.isRunning) - Thread.remove(t); + GlobalStackContext.remove(t); t = tn; } } @@ -2587,9 +2309,6 @@ else } } -// Used for suspendAll/resumeAll below. -private __gshared uint suspendDepth = 0; - /** * Suspend the specified thread and load stack and register information for * use by thread_scanAll. If the supplied thread is the calling thread, @@ -2612,15 +2331,15 @@ private bool suspend( Thread t ) nothrow Lagain: if (!t.isRunning) { - Thread.remove(t); + GlobalStackContext.remove(t); return false; } else if (t.m_isInCriticalRegion) { - Thread.criticalRegionLock.unlock_nothrow(); + GlobalStackContext.criticalRegionLock.unlock_nothrow(); Thread.sleep(waittime); if (waittime < dur!"msecs"(10)) waittime *= 2; - Thread.criticalRegionLock.lock_nothrow(); + GlobalStackContext.criticalRegionLock.lock_nothrow(); goto Lagain; } @@ -2630,7 +2349,7 @@ private bool suspend( Thread t ) nothrow { if ( !t.isRunning ) { - Thread.remove( t ); + GlobalStackContext.remove( t ); return false; } onThreadError( "Unable to suspend thread" ); @@ -2755,7 +2474,7 @@ private bool suspend( Thread t ) nothrow { if ( !t.isRunning ) { - Thread.remove( t ); + GlobalStackContext.remove( t ); return false; } onThreadError( "Unable to suspend thread" ); @@ -2793,23 +2512,23 @@ extern (C) void thread_suspendAll() nothrow // error. For the short time when Thread.sm_tbeg is null, there is // no reason not to simply call the multithreaded code below, with // the expectation that the foreach loop will never be entered. - if ( !multiThreadedFlag && Thread.sm_tbeg ) + if ( !multiThreadedFlag && GlobalStackContext.sm_tbeg ) { - if ( ++suspendDepth == 1 ) + if ( ++GlobalStackContext.suspendDepth == 1 ) suspend( Thread.getThis() ); return; } - Thread.slock.lock_nothrow(); + GlobalStackContext.slock.lock_nothrow(); { - if ( ++suspendDepth > 1 ) + if ( ++GlobalStackContext.suspendDepth > 1 ) return; - Thread.criticalRegionLock.lock_nothrow(); - scope (exit) Thread.criticalRegionLock.unlock_nothrow(); + GlobalStackContext.criticalRegionLock.lock_nothrow(); + scope (exit) GlobalStackContext.criticalRegionLock.unlock_nothrow(); size_t cnt; - auto t = Thread.sm_tbeg; + auto t = GlobalStackContext.sm_tbeg; while (t) { auto tn = t.next; @@ -2910,7 +2629,7 @@ private void resume( Thread t ) nothrow { if ( !t.isRunning ) { - Thread.remove( t ); + GlobalStackContext.remove( t ); return; } onThreadError( "Unable to resume thread" ); @@ -2937,24 +2656,24 @@ private void resume( Thread t ) nothrow extern (C) void thread_resumeAll() nothrow in { - assert( suspendDepth > 0 ); + assert( GlobalStackContext.suspendDepth > 0 ); } do { // NOTE: See thread_suspendAll for the logic behind this. - if ( !multiThreadedFlag && Thread.sm_tbeg ) + if ( !multiThreadedFlag && GlobalStackContext.sm_tbeg ) { - if ( --suspendDepth == 0 ) + if ( --GlobalStackContext.suspendDepth == 0 ) resume( Thread.getThis() ); return; } - scope(exit) Thread.slock.unlock_nothrow(); + scope(exit) GlobalStackContext.slock.unlock_nothrow(); { - if ( --suspendDepth > 0 ) + if ( --GlobalStackContext.suspendDepth > 0 ) return; - for ( Thread t = Thread.sm_tbeg; t; t = t.next ) + for ( Thread t = GlobalStackContext.sm_tbeg; t; t = t.next ) { // NOTE: We do not need to care about critical regions at all // here. thread_suspendAll takes care of everything. @@ -2988,7 +2707,7 @@ alias ScanAllThreadsTypeFn = void delegate(ScanType, void*, void*) nothrow; /// extern (C) void thread_scanAllType( scope ScanAllThreadsTypeFn scan ) nothrow in { - assert( suspendDepth > 0 ); + assert( GlobalStackContext.suspendDepth > 0 ); } do { @@ -3001,7 +2720,7 @@ private void scanAllTypeImpl( scope ScanAllThreadsTypeFn scan, void* curStackTop Thread thisThread = null; void* oldStackTop = null; - if ( Thread.sm_tbeg ) + if ( GlobalStackContext.sm_tbeg ) { thisThread = Thread.getThis(); if ( !thisThread.m_lock ) @@ -3013,7 +2732,7 @@ private void scanAllTypeImpl( scope ScanAllThreadsTypeFn scan, void* curStackTop scope( exit ) { - if ( Thread.sm_tbeg ) + if ( GlobalStackContext.sm_tbeg ) { if ( !thisThread.m_lock ) { @@ -3025,10 +2744,12 @@ private void scanAllTypeImpl( scope ScanAllThreadsTypeFn scan, void* curStackTop // NOTE: Synchronizing on Thread.slock is not needed because this // function may only be called after all other threads have // been suspended from within the same lock. - if (Thread.nAboutToStart) - scan(ScanType.stack, Thread.pAboutToStart, Thread.pAboutToStart + Thread.nAboutToStart); + if (GlobalStackContext.nAboutToStart) + scan(ScanType.stack, + GlobalStackContext.pAboutToStart, + GlobalStackContext.pAboutToStart + GlobalStackContext.nAboutToStart); - for ( Thread.Context* c = Thread.sm_cbeg; c; c = c.next ) + for ( StackContext* c = GlobalStackContext.sm_cbeg; c; c = c.next ) { version (StackGrowsDown) { @@ -3044,7 +2765,7 @@ private void scanAllTypeImpl( scope ScanAllThreadsTypeFn scan, void* curStackTop } } - for ( Thread t = Thread.sm_tbeg; t; t = t.next ) + for ( Thread t = GlobalStackContext.sm_tbeg; t; t = t.next ) { version (Windows) { @@ -3102,7 +2823,7 @@ in } do { - synchronized (Thread.criticalRegionLock) + synchronized (GlobalStackContext.criticalRegionLock) Thread.getThis().m_isInCriticalRegion = true; } @@ -3121,7 +2842,7 @@ in } do { - synchronized (Thread.criticalRegionLock) + synchronized (GlobalStackContext.criticalRegionLock) Thread.getThis().m_isInCriticalRegion = false; } @@ -3139,7 +2860,7 @@ in } do { - synchronized (Thread.criticalRegionLock) + synchronized (GlobalStackContext.criticalRegionLock) return Thread.getThis().m_isInCriticalRegion; } @@ -3282,7 +3003,7 @@ alias IsMarkedDg = int delegate( void* addr ) nothrow; /// The isMarked callback */ extern(C) void thread_processGCMarks( scope IsMarkedDg isMarked ) nothrow { - for ( Thread t = Thread.sm_tbeg; t; t = t.next ) + for ( Thread t = GlobalStackContext.sm_tbeg; t; t = t.next ) { /* Can be null if collection was triggered between adding a * thread and calling rt_tlsgc_init. From 06e160d802be8b96a857f17a92e42a853dbf805d Mon Sep 17 00:00:00 2001 From: Stefanos Baziotis Date: Sat, 14 Sep 2019 18:16:53 +0300 Subject: [PATCH 02/13] Update context.d doc --- src/core/thread/context.d | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/core/thread/context.d b/src/core/thread/context.d index e5e3f0166f..9ecad69e4f 100644 --- a/src/core/thread/context.d +++ b/src/core/thread/context.d @@ -1,5 +1,9 @@ /** - * thread.context provides stack context data structures needed by threads and fibers. + * Every thread / fiber needs stack context data structure. + * thread.context decouples this info into the StackContext struct. + * Also, Thread / Fiber have some common functionality which is provided + * as a super class for both in StackContextExecutor. + * Finally, GlobalStackContext contains commonly used global data. * * Copyright: Copyright Sean Kelly 2005 - 2012. * License: Distributed under the From 47e671b8967d862407ee75ce85a98528ad1438d9 Mon Sep 17 00:00:00 2001 From: Stefanos Baziotis Date: Sat, 14 Sep 2019 18:19:05 +0300 Subject: [PATCH 03/13] Update /mak files --- mak/COPY | 1 + mak/DOCS | 1 + mak/SRCS | 1 + mak/WINDOWS | 3 +++ 4 files changed, 6 insertions(+) diff --git a/mak/COPY b/mak/COPY index 47f6ec3f2f..45e55e63fd 100644 --- a/mak/COPY +++ b/mak/COPY @@ -435,6 +435,7 @@ COPY=\ \ $(IMPDIR)\core\thread\fiber.d \ $(IMPDIR)\core\thread\osthread.d \ + $(IMPDIR)\core\thread\context.d \ $(IMPDIR)\core\thread\package.d \ \ $(IMPDIR)\etc\linux\memoryerror.d diff --git a/mak/DOCS b/mak/DOCS index 7892992f3e..fb09b703da 100644 --- a/mak/DOCS +++ b/mak/DOCS @@ -79,6 +79,7 @@ DOCS=\ \ $(DOCDIR)\core_thread_fiber.html \ $(DOCDIR)\core_thread_osthread.html \ + $(DOCDIR)\core_thread_context.html \ \ $(DOCDIR)\core_internal_array_capacity.html \ \ diff --git a/mak/SRCS b/mak/SRCS index 6237ad872a..67c6e915b5 100644 --- a/mak/SRCS +++ b/mak/SRCS @@ -433,6 +433,7 @@ SRCS=\ \ src\core\thread\fiber.d \ src\core\thread\osthread.d \ + src\core\thread\context.d \ src\core\thread\package.d \ \ src\gc\bits.d \ diff --git a/mak/WINDOWS b/mak/WINDOWS index 0ada094036..c8aa1575e4 100644 --- a/mak/WINDOWS +++ b/mak/WINDOWS @@ -1276,6 +1276,9 @@ $(IMPDIR)\core\thread\fiber.d : src\core\thread\fiber.d $(IMPDIR)\core\thread\osthread.d : src\core\thread\osthread.d copy $** $@ +$(IMPDIR)\core\thread\context.d : src\core\thread\context.d + copy $** $@ + $(IMPDIR)\core\thread\package.d : src\core\thread\package.d copy $** $@ From 757bc24523c49d1a692fa64c2fdd5ffbee964f25 Mon Sep 17 00:00:00 2001 From: Stefanos Baziotis Date: Sun, 15 Sep 2019 02:25:18 +0300 Subject: [PATCH 04/13] Small fixes --- src/core/thread/osthread.d | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/core/thread/osthread.d b/src/core/thread/osthread.d index 8cd19cb5d9..46c4cb81de 100644 --- a/src/core/thread/osthread.d +++ b/src/core/thread/osthread.d @@ -291,12 +291,12 @@ version (Windows) obj.m_tlsgcdata = rt_tlsgc_init(); Thread.setThis(obj); - Thread.add(obj); + GlobalStackContext.add(obj); scope (exit) { - Thread.remove(obj); + GlobalStackContext.remove(obj); } - Thread.add(&obj.m_main); + GlobalStackContext.add(&obj.m_main); // NOTE: No GC allocations may occur until the stack pointers have // been set and Thread.getThis returns a valid reference to @@ -2018,7 +2018,7 @@ version (Windows) return t; Thread thisThread = new Thread(); - Thread.Context* thisContext = &thisThread.m_main; + StackContext* thisContext = &thisThread.m_main; assert( thisContext == thisThread.m_curr ); thisThread.m_addr = addr; @@ -2043,8 +2043,8 @@ version (Windows) }); } - Thread.add( thisThread, false ); - Thread.add( thisContext ); + GlobalStackContext.add( thisThread, false ); + GlobalStackContext.add( thisContext ); if ( Thread.sm_main !is null ) multiThreadedFlag = true; return thisThread; @@ -2408,7 +2408,7 @@ private bool suspend( Thread t ) nothrow { if ( !t.isRunning ) { - Thread.remove( t ); + GlobalStackContext.remove( t ); return false; } onThreadError( "Unable to suspend thread" ); @@ -2507,9 +2507,9 @@ extern (C) void thread_suspendAll() nothrow // completes, with the assumption that no other GC memory has yet // been allocated by the system, and thus there is no risk of losing // data if the global thread list is empty. The check of - // Thread.sm_tbeg below is done to ensure thread_init has completed, + // GlobalStackContext.sm_tbeg below is done to ensure thread_init has completed, // and therefore that calling Thread.getThis will not result in an - // error. For the short time when Thread.sm_tbeg is null, there is + // error. For the short time when GlobalStackContext.sm_tbeg is null, there is // no reason not to simply call the multithreaded code below, with // the expectation that the foreach loop will never be entered. if ( !multiThreadedFlag && GlobalStackContext.sm_tbeg ) @@ -2558,7 +2558,7 @@ extern (C) void thread_suspendAll() nothrow version (FreeBSD) { // avoid deadlocks, see Issue 13416 - t = Thread.sm_tbeg; + t = GlobalStackContext.sm_tbeg; while (t) { auto tn = t.next; @@ -2595,7 +2595,7 @@ private void resume( Thread t ) nothrow { if ( !t.isRunning ) { - Thread.remove( t ); + GlobalStackContext.remove( t ); return; } onThreadError( "Unable to resume thread" ); @@ -2611,7 +2611,7 @@ private void resume( Thread t ) nothrow { if ( !t.isRunning ) { - Thread.remove( t ); + GlobalStackContext.remove( t ); return; } onThreadError( "Unable to resume thread" ); @@ -2741,7 +2741,7 @@ private void scanAllTypeImpl( scope ScanAllThreadsTypeFn scan, void* curStackTop } } - // NOTE: Synchronizing on Thread.slock is not needed because this + // NOTE: Synchronizing on GlobalStackContext.slock is not needed because this // function may only be called after all other threads have // been suspended from within the same lock. if (GlobalStackContext.nAboutToStart) @@ -2935,12 +2935,12 @@ unittest thr.start(); sema.wait(); - synchronized (Thread.criticalRegionLock) + synchronized (GlobalStackContext.criticalRegionLock) assert(thr.m_isInCriticalRegion); semb.notify(); sema.wait(); - synchronized (Thread.criticalRegionLock) + synchronized (GlobalStackContext.criticalRegionLock) assert(!thr.m_isInCriticalRegion); semb.notify(); From 3bed5d94c830d2fe6b86f8f0eac85b1abef4c062 Mon Sep 17 00:00:00 2001 From: Stefanos Baziotis Date: Sun, 15 Sep 2019 04:48:28 +0300 Subject: [PATCH 05/13] Fixed duplicate standard fiber data --- src/core/thread/fiber.d | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/core/thread/fiber.d b/src/core/thread/fiber.d index d6a275ca6e..ca070b92a8 100644 --- a/src/core/thread/fiber.d +++ b/src/core/thread/fiber.d @@ -872,11 +872,6 @@ private: // // Standard fiber data // - union - { - void function() m_fn; - void delegate() m_dg; - } bool m_isRunning; Throwable m_unhandled; State m_state; @@ -1578,6 +1573,7 @@ version (unittest) super(&run); } + private: void run() { foreach (i; 0 .. 1000) From 8fbcf3184b3fcbd3a5e4f9cc968883b0ee88d4b8 Mon Sep 17 00:00:00 2001 From: Stefanos Baziotis Date: Sun, 15 Sep 2019 05:02:06 +0300 Subject: [PATCH 06/13] Fixed fiber_guard_page.d Due to changing the number of fields in Fiber (and moving them in a base class), the indexing on .tupleof had to change. --- src/core/thread/fiber.d | 2 +- test/thread/src/fiber_guard_page.d | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/core/thread/fiber.d b/src/core/thread/fiber.d index ca070b92a8..9a4c51702b 100644 --- a/src/core/thread/fiber.d +++ b/src/core/thread/fiber.d @@ -1409,7 +1409,7 @@ private: } - StackContext* m_ctxt; + StackContext* m_ctxt; size_t m_size; void* m_pmem; diff --git a/test/thread/src/fiber_guard_page.d b/test/thread/src/fiber_guard_page.d index 212f4c857e..472e9108d2 100644 --- a/test/thread/src/fiber_guard_page.d +++ b/test/thread/src/fiber_guard_page.d @@ -21,21 +21,21 @@ void main() // allocate a page below (above) the fiber's stack to make stack overflows possible (w/o segfaulting) version (StackGrowsDown) { - static assert(__traits(identifier, test_fiber.tupleof[8]) == "m_pmem"); - auto stackBottom = test_fiber.tupleof[8]; - auto p = mmap(stackBottom - 8 * stackSize, 8 * stackSize, + static assert(__traits(identifier, test_fiber.tupleof[5]) == "m_pmem"); + auto stackBottom = test_fiber.tupleof[5]; + auto p = mmap(stackBottom - 5 * stackSize, 5 * stackSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); assert(p !is null, "failed to allocate page"); } else { - auto m_sz = test_fiber.tupleof[7]; - auto m_pmem = test_fiber.tupleof[8]; - static assert(__traits(identifier, test_fiber.tupleof[7]) == "m_size"); - static assert(__traits(identifier, test_fiber.tupleof[8]) == "m_pmem"); + auto m_sz = test_fiber.tupleof[4]; + auto m_pmem = test_fiber.tupleof[5]; + static assert(__traits(identifier, test_fiber.tupleof[4]) == "m_size"); + static assert(__traits(identifier, test_fiber.tupleof[5]) == "m_pmem"); auto stackTop = m_pmem + m_sz; - auto p = mmap(stackTop, 8 * stackSize, + auto p = mmap(stackTop, 5 * stackSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); assert(p !is null, "failed to allocate page"); } From 9b921ae0ed91e6c91a4fa08c4c5b3a075b57c731 Mon Sep 17 00:00:00 2001 From: Stefanos Baziotis Date: Sun, 15 Sep 2019 14:54:18 +0300 Subject: [PATCH 07/13] Decoupling 3 member variables --- src/core/thread/context.d | 4 +++ src/core/thread/fiber.d | 14 +++----- src/core/thread/osthread.d | 69 ++++++++++++++++++-------------------- 3 files changed, 42 insertions(+), 45 deletions(-) diff --git a/src/core/thread/context.d b/src/core/thread/context.d index 9ecad69e4f..f023707d79 100644 --- a/src/core/thread/context.d +++ b/src/core/thread/context.d @@ -52,6 +52,10 @@ class StackContextExecutor void delegate() m_dg; } + StackContext* m_ctxt; + size_t m_size; + Throwable m_unhandled; + // Thread / Fiber entry point. Invokes the function or delegate passed on // construction (if any). final void run() diff --git a/src/core/thread/fiber.d b/src/core/thread/fiber.d index 9a4c51702b..2aaf213f8c 100644 --- a/src/core/thread/fiber.d +++ b/src/core/thread/fiber.d @@ -144,7 +144,7 @@ private Fiber obj = Fiber.getThis(); assert( obj ); - assert( Thread.getThis().m_curr is obj.m_ctxt ); + assert( Thread.getThis().m_ctxt is obj.m_ctxt ); atomicStore!(MemoryOrder.raw)(*cast(shared)&Thread.getThis().m_lock, false); obj.m_ctxt.tstack = obj.m_ctxt.bstack; obj.m_state = Fiber.State.EXEC; @@ -873,7 +873,6 @@ private: // Standard fiber data // bool m_isRunning; - Throwable m_unhandled; State m_state; @@ -1408,9 +1407,6 @@ private: static assert(0, "Not implemented"); } - - StackContext* m_ctxt; - size_t m_size; void* m_pmem; static if ( __traits( compiles, ucontext_t ) ) @@ -1452,7 +1448,7 @@ private: final void switchIn() nothrow @nogc { Thread tobj = Thread.getThis(); - void** oldp = &tobj.m_curr.tstack; + void** oldp = &tobj.m_ctxt.tstack; void* newp = m_ctxt.tstack; // NOTE: The order of operations here is very important. The current @@ -1476,7 +1472,7 @@ private: // to prevent Bad Things from happening. tobj.popContext(); atomicStore!(MemoryOrder.raw)(*cast(shared)&tobj.m_lock, false); - tobj.m_curr.tstack = tobj.m_curr.bstack; + tobj.m_ctxt.tstack = tobj.m_ctxt.bstack; } @@ -1487,7 +1483,7 @@ private: { Thread tobj = Thread.getThis(); void** oldp = &m_ctxt.tstack; - void* newp = tobj.m_curr.within.tstack; + void* newp = tobj.m_ctxt.within.tstack; // NOTE: The order of operations here is very important. The current // stack top must be stored before m_lock is set, and pushContext @@ -1512,7 +1508,7 @@ private: // current thread handle before unlocking, etc. tobj = Thread.getThis(); atomicStore!(MemoryOrder.raw)(*cast(shared)&tobj.m_lock, false); - tobj.m_curr.tstack = tobj.m_curr.bstack; + tobj.m_ctxt.tstack = tobj.m_ctxt.bstack; } } diff --git a/src/core/thread/osthread.d b/src/core/thread/osthread.d index 46c4cb81de..1bf0db1b90 100644 --- a/src/core/thread/osthread.d +++ b/src/core/thread/osthread.d @@ -285,7 +285,7 @@ version (Windows) Thread obj = cast(Thread) arg; assert( obj ); - assert( obj.m_curr is &obj.m_main ); + assert( obj.m_ctxt is &obj.m_main ); obj.m_main.bstack = getStackBottom(); obj.m_main.tstack = obj.m_main.bstack; obj.m_tlsgcdata = rt_tlsgc_init(); @@ -396,7 +396,7 @@ else version (Posix) void function(void*) @nogc nothrow)(loadedLibraries); } - assert( obj.m_curr is &obj.m_main ); + assert( obj.m_ctxt is &obj.m_main ); obj.m_main.bstack = getStackBottom(); obj.m_main.tstack = obj.m_main.bstack; obj.m_tlsgcdata = rt_tlsgc_init(); @@ -519,7 +519,7 @@ else version (Posix) if ( !obj.m_lock ) { - obj.m_curr.tstack = getStackTop(); + obj.m_ctxt.tstack = getStackTop(); } sigset_t sigres = void; @@ -539,7 +539,7 @@ else version (Posix) if ( !obj.m_lock ) { - obj.m_curr.tstack = obj.m_curr.bstack; + obj.m_ctxt.tstack = obj.m_ctxt.bstack; } } @@ -644,7 +644,7 @@ class Thread : StackContextExecutor this(sz); () @trusted { m_fn = fn; }(); m_call = Call.FN; - m_curr = &m_main; + m_ctxt = &m_main; } @@ -669,7 +669,7 @@ class Thread : StackContextExecutor this(sz); () @trusted { m_dg = dg; }(); m_call = Call.DG; - m_curr = &m_main; + m_ctxt = &m_main; } @@ -743,7 +743,7 @@ class Thread : StackContextExecutor if ( pthread_attr_init( &attr ) ) onThreadError( "Error initializing thread attributes" ); - if ( m_sz && pthread_attr_setstacksize( &attr, m_sz ) ) + if ( m_size && pthread_attr_setstacksize( &attr, m_size ) ) onThreadError( "Error initializing thread stack size" ); } @@ -758,8 +758,8 @@ class Thread : StackContextExecutor // // Solution: Create the thread in suspended state and then // add and resume it with slock acquired - assert(m_sz <= uint.max, "m_sz must be less than or equal to uint.max"); - m_hndl = cast(HANDLE) _beginthreadex( null, cast(uint) m_sz, &thread_entryPoint, cast(void*) this, CREATE_SUSPENDED, &m_addr ); + assert(m_size <= uint.max, "m_size must be less than or equal to uint.max"); + m_hndl = cast(HANDLE) _beginthreadex( null, cast(uint) m_size, &thread_entryPoint, cast(void*) this, CREATE_SUSPENDED, &m_addr ); if ( cast(size_t) m_hndl == 0 ) onThreadError( "Error creating thread" ); } @@ -1536,10 +1536,10 @@ private: if (PTHREAD_STACK_MIN > sz) sz = PTHREAD_STACK_MIN; } - m_sz = sz; + m_size = sz; } m_call = Call.NO; - m_curr = &m_main; + m_ctxt = &m_main; } private: @@ -1587,14 +1587,12 @@ private: } ThreadID m_addr; string m_name; - size_t m_sz; version (Posix) { shared bool m_isRunning; } bool m_isDaemon; bool m_isInCriticalRegion; - Throwable m_unhandled; version (Solaris) { @@ -1617,7 +1615,6 @@ private: package(core.thread): StackContext m_main; - StackContext* m_curr; bool m_lock; void* m_tlsgcdata; @@ -1628,32 +1625,32 @@ package(core.thread): } do { - m_curr.ehContext = swapContext(c.ehContext); - c.within = m_curr; - m_curr = c; + m_ctxt.ehContext = swapContext(c.ehContext); + c.within = m_ctxt; + m_ctxt = c; } final void popContext() nothrow @nogc in { - assert( m_curr && m_curr.within ); + assert( m_ctxt && m_ctxt.within ); } do { - StackContext* c = m_curr; - m_curr = c.within; - c.ehContext = swapContext(m_curr.ehContext); + StackContext* c = m_ctxt; + m_ctxt = c.within; + c.ehContext = swapContext(m_ctxt.ehContext); c.within = null; } final StackContext* topContext() nothrow @nogc in { - assert( m_curr ); + assert( m_ctxt ); } do { - return m_curr; + return m_ctxt; } /////////////////////////////////////////////////////////////////////////// @@ -1955,7 +1952,7 @@ extern (C) Thread thread_attachThis() private Thread attachThread(Thread thisThread) @nogc { StackContext* thisContext = &thisThread.m_main; - assert( thisContext == thisThread.m_curr ); + assert( thisContext == thisThread.m_ctxt ); version (Windows) { @@ -2019,7 +2016,7 @@ version (Windows) Thread thisThread = new Thread(); StackContext* thisContext = &thisThread.m_main; - assert( thisContext == thisThread.m_curr ); + assert( thisContext == thisThread.m_ctxt ); thisThread.m_addr = addr; thisContext.bstack = bstack; @@ -2363,7 +2360,7 @@ private bool suspend( Thread t ) nothrow version (X86) { if ( !t.m_lock ) - t.m_curr.tstack = cast(void*) context.Esp; + t.m_ctxt.tstack = cast(void*) context.Esp; // eax,ebx,ecx,edx,edi,esi,ebp,esp t.m_reg[0] = context.Eax; t.m_reg[1] = context.Ebx; @@ -2377,7 +2374,7 @@ private bool suspend( Thread t ) nothrow else version (X86_64) { if ( !t.m_lock ) - t.m_curr.tstack = cast(void*) context.Rsp; + t.m_ctxt.tstack = cast(void*) context.Rsp; // rax,rbx,rcx,rdx,rdi,rsi,rbp,rsp t.m_reg[0] = context.Rax; t.m_reg[1] = context.Rbx; @@ -2422,7 +2419,7 @@ private bool suspend( Thread t ) nothrow if ( thread_get_state( t.m_tmach, x86_THREAD_STATE32, &state, &count ) != KERN_SUCCESS ) onThreadError( "Unable to load thread state" ); if ( !t.m_lock ) - t.m_curr.tstack = cast(void*) state.esp; + t.m_ctxt.tstack = cast(void*) state.esp; // eax,ebx,ecx,edx,edi,esi,ebp,esp t.m_reg[0] = state.eax; t.m_reg[1] = state.ebx; @@ -2441,7 +2438,7 @@ private bool suspend( Thread t ) nothrow if ( thread_get_state( t.m_tmach, x86_THREAD_STATE64, &state, &count ) != KERN_SUCCESS ) onThreadError( "Unable to load thread state" ); if ( !t.m_lock ) - t.m_curr.tstack = cast(void*) state.rsp; + t.m_ctxt.tstack = cast(void*) state.rsp; // rax,rbx,rcx,rdx,rdi,rsi,rbp,rsp t.m_reg[0] = state.rax; t.m_reg[1] = state.rbx; @@ -2482,7 +2479,7 @@ private bool suspend( Thread t ) nothrow } else if ( !t.m_lock ) { - t.m_curr.tstack = getStackTop(); + t.m_ctxt.tstack = getStackTop(); } } return true; @@ -2602,7 +2599,7 @@ private void resume( Thread t ) nothrow } if ( !t.m_lock ) - t.m_curr.tstack = t.m_curr.bstack; + t.m_ctxt.tstack = t.m_ctxt.bstack; t.m_reg[0 .. $] = 0; } else version (Darwin) @@ -2618,7 +2615,7 @@ private void resume( Thread t ) nothrow } if ( !t.m_lock ) - t.m_curr.tstack = t.m_curr.bstack; + t.m_ctxt.tstack = t.m_ctxt.bstack; t.m_reg[0 .. $] = 0; } else version (Posix) @@ -2637,7 +2634,7 @@ private void resume( Thread t ) nothrow } else if ( !t.m_lock ) { - t.m_curr.tstack = t.m_curr.bstack; + t.m_ctxt.tstack = t.m_ctxt.bstack; } } } @@ -2725,8 +2722,8 @@ private void scanAllTypeImpl( scope ScanAllThreadsTypeFn scan, void* curStackTop thisThread = Thread.getThis(); if ( !thisThread.m_lock ) { - oldStackTop = thisThread.m_curr.tstack; - thisThread.m_curr.tstack = curStackTop; + oldStackTop = thisThread.m_ctxt.tstack; + thisThread.m_ctxt.tstack = curStackTop; } } @@ -2736,7 +2733,7 @@ private void scanAllTypeImpl( scope ScanAllThreadsTypeFn scan, void* curStackTop { if ( !thisThread.m_lock ) { - thisThread.m_curr.tstack = oldStackTop; + thisThread.m_ctxt.tstack = oldStackTop; } } } From 93dde4806c1d5325127b39fe263a7eeea9dae9d5 Mon Sep 17 00:00:00 2001 From: Stefanos Baziotis Date: Sun, 15 Sep 2019 15:11:17 +0300 Subject: [PATCH 08/13] Decouple *Context functions --- src/core/thread/context.d | 101 ++++++++++++++++++++++++++++++++++++ src/core/thread/osthread.d | 102 ------------------------------------- 2 files changed, 101 insertions(+), 102 deletions(-) diff --git a/src/core/thread/context.d b/src/core/thread/context.d index f023707d79..9610ebc134 100644 --- a/src/core/thread/context.d +++ b/src/core/thread/context.d @@ -28,6 +28,72 @@ struct StackContext StackContext* within, next, prev; } +package(core.thread) +{ + import core.atomic, core.memory, core.sync.mutex; + + // + // exposed by compiler runtime + // + extern (C) void rt_moduleTlsCtor(); + extern (C) void rt_moduleTlsDtor(); + + /** + * Hook for whatever EH implementation is used to save/restore some data + * per stack. + * + * Params: + * newContext = The return value of the prior call to this function + * where the stack was last swapped out, or null when a fiber stack + * is switched in for the first time. + */ + extern(C) void* _d_eh_swapContext(void* newContext) nothrow @nogc; + + version (DigitalMars) + { + version (Windows) + alias swapContext = _d_eh_swapContext; + else + { + extern(C) void* _d_eh_swapContextDwarf(void* newContext) nothrow @nogc; + + void* swapContext(void* newContext) nothrow @nogc + { + /* Detect at runtime which scheme is being used. + * Eventually, determine it statically. + */ + static int which = 0; + final switch (which) + { + case 0: + { + assert(newContext == null); + auto p = _d_eh_swapContext(newContext); + auto pdwarf = _d_eh_swapContextDwarf(newContext); + if (p) + { + which = 1; + return p; + } + else if (pdwarf) + { + which = 2; + return pdwarf; + } + return null; + } + case 1: + return _d_eh_swapContext(newContext); + case 2: + return _d_eh_swapContextDwarf(newContext); + } + } + } + } + else + alias swapContext = _d_eh_swapContext; +} + /** A class that represents a thread of execution that manages a stack. This serves primarily as a superclass for Thread and Fiber. @@ -72,6 +138,41 @@ class StackContextExecutor break; } } + + final void pushContext( StackContext* c ) nothrow @nogc + in + { + assert( !c.within ); + } + do + { + m_ctxt.ehContext = swapContext(c.ehContext); + c.within = m_ctxt; + m_ctxt = c; + } + + final void popContext() nothrow @nogc + in + { + assert( m_ctxt && m_ctxt.within ); + } + do + { + StackContext* c = m_ctxt; + m_ctxt = c.within; + c.ehContext = swapContext(m_ctxt.ehContext); + c.within = null; + } + + final StackContext* topContext() nothrow @nogc + in + { + assert( m_ctxt ); + } + do + { + return m_ctxt; + } } struct GlobalStackContext diff --git a/src/core/thread/osthread.d b/src/core/thread/osthread.d index 1bf0db1b90..40def79f6d 100644 --- a/src/core/thread/osthread.d +++ b/src/core/thread/osthread.d @@ -186,73 +186,6 @@ class ThreadError : Error } } -private -{ - import core.atomic, core.memory, core.sync.mutex; - - // - // exposed by compiler runtime - // - extern (C) void rt_moduleTlsCtor(); - extern (C) void rt_moduleTlsDtor(); - - /** - * Hook for whatever EH implementation is used to save/restore some data - * per stack. - * - * Params: - * newContext = The return value of the prior call to this function - * where the stack was last swapped out, or null when a fiber stack - * is switched in for the first time. - */ - extern(C) void* _d_eh_swapContext(void* newContext) nothrow @nogc; - - version (DigitalMars) - { - version (Windows) - alias swapContext = _d_eh_swapContext; - else - { - extern(C) void* _d_eh_swapContextDwarf(void* newContext) nothrow @nogc; - - void* swapContext(void* newContext) nothrow @nogc - { - /* Detect at runtime which scheme is being used. - * Eventually, determine it statically. - */ - static int which = 0; - final switch (which) - { - case 0: - { - assert(newContext == null); - auto p = _d_eh_swapContext(newContext); - auto pdwarf = _d_eh_swapContextDwarf(newContext); - if (p) - { - which = 1; - return p; - } - else if (pdwarf) - { - which = 2; - return pdwarf; - } - return null; - } - case 1: - return _d_eh_swapContext(newContext); - case 2: - return _d_eh_swapContextDwarf(newContext); - } - } - } - } - else - alias swapContext = _d_eh_swapContext; -} - - /////////////////////////////////////////////////////////////////////////////// // Thread Entry Point and Signal Handlers /////////////////////////////////////////////////////////////////////////////// @@ -1618,41 +1551,6 @@ package(core.thread): bool m_lock; void* m_tlsgcdata; - final void pushContext( StackContext* c ) nothrow @nogc - in - { - assert( !c.within ); - } - do - { - m_ctxt.ehContext = swapContext(c.ehContext); - c.within = m_ctxt; - m_ctxt = c; - } - - final void popContext() nothrow @nogc - in - { - assert( m_ctxt && m_ctxt.within ); - } - do - { - StackContext* c = m_ctxt; - m_ctxt = c.within; - c.ehContext = swapContext(m_ctxt.ehContext); - c.within = null; - } - - final StackContext* topContext() nothrow @nogc - in - { - assert( m_ctxt ); - } - do - { - return m_ctxt; - } - /////////////////////////////////////////////////////////////////////////// // Thread Context and GC Scanning Support /////////////////////////////////////////////////////////////////////////// From de730690f0807af36801f3ab92a0a2617a216da6 Mon Sep 17 00:00:00 2001 From: Stefanos Baziotis Date: Sun, 15 Sep 2019 16:05:00 +0300 Subject: [PATCH 09/13] Other simple fixes --- src/core/thread/context.d | 136 +++++++++++++++++-------------------- src/core/thread/osthread.d | 6 ++ 2 files changed, 70 insertions(+), 72 deletions(-) diff --git a/src/core/thread/context.d b/src/core/thread/context.d index 9610ebc134..afd1c64120 100644 --- a/src/core/thread/context.d +++ b/src/core/thread/context.d @@ -28,72 +28,6 @@ struct StackContext StackContext* within, next, prev; } -package(core.thread) -{ - import core.atomic, core.memory, core.sync.mutex; - - // - // exposed by compiler runtime - // - extern (C) void rt_moduleTlsCtor(); - extern (C) void rt_moduleTlsDtor(); - - /** - * Hook for whatever EH implementation is used to save/restore some data - * per stack. - * - * Params: - * newContext = The return value of the prior call to this function - * where the stack was last swapped out, or null when a fiber stack - * is switched in for the first time. - */ - extern(C) void* _d_eh_swapContext(void* newContext) nothrow @nogc; - - version (DigitalMars) - { - version (Windows) - alias swapContext = _d_eh_swapContext; - else - { - extern(C) void* _d_eh_swapContextDwarf(void* newContext) nothrow @nogc; - - void* swapContext(void* newContext) nothrow @nogc - { - /* Detect at runtime which scheme is being used. - * Eventually, determine it statically. - */ - static int which = 0; - final switch (which) - { - case 0: - { - assert(newContext == null); - auto p = _d_eh_swapContext(newContext); - auto pdwarf = _d_eh_swapContextDwarf(newContext); - if (p) - { - which = 1; - return p; - } - else if (pdwarf) - { - which = 2; - return pdwarf; - } - return null; - } - case 1: - return _d_eh_swapContext(newContext); - case 2: - return _d_eh_swapContextDwarf(newContext); - } - } - } - } - else - alias swapContext = _d_eh_swapContext; -} - /** A class that represents a thread of execution that manages a stack. This serves primarily as a superclass for Thread and Fiber. @@ -138,7 +72,7 @@ class StackContextExecutor break; } } - + final void pushContext( StackContext* c ) nothrow @nogc in { @@ -253,12 +187,12 @@ struct GlobalStackContext import core.thread : Thread; - __gshared Thread sm_tbeg; - __gshared size_t sm_tlen; + __gshared Thread sm_tbeg; + __gshared size_t sm_tlen; // can't use core.internal.util.array in public code __gshared Thread* pAboutToStart; - __gshared size_t nAboutToStart; + __gshared size_t nAboutToStart; __gshared uint suspendDepth = 0; @@ -326,7 +260,7 @@ struct GlobalStackContext // // Add a thread to the global thread list. // - static void add( Thread t, bool rmAboutToStart = true ) nothrow @nogc + static void add(Thread t, bool rmAboutToStart = true) nothrow @nogc in { assert( t ); @@ -373,7 +307,7 @@ struct GlobalStackContext // // Remove a thread from the global thread list. // - static void remove( Thread t ) nothrow @nogc + static void remove(Thread t) nothrow @nogc in { assert( t ); @@ -415,3 +349,61 @@ struct GlobalStackContext slock.unlock_nothrow(); } } + +private +{ + /** + * Hook for whatever EH implementation is used to save/restore some data + * per stack. + * + * Params: + * newContext = The return value of the prior call to this function + * where the stack was last swapped out, or null when a fiber stack + * is switched in for the first time. + */ + extern(C) void* _d_eh_swapContext(void* newContext) nothrow @nogc; + + version (DigitalMars) + { + version (Windows) + alias swapContext = _d_eh_swapContext; + else + { + extern(C) void* _d_eh_swapContextDwarf(void* newContext) nothrow @nogc; + + void* swapContext(void* newContext) nothrow @nogc + { + /* Detect at runtime which scheme is being used. + * Eventually, determine it statically. + */ + static int which = 0; + final switch (which) + { + case 0: + { + assert(newContext == null); + auto p = _d_eh_swapContext(newContext); + auto pdwarf = _d_eh_swapContextDwarf(newContext); + if (p) + { + which = 1; + return p; + } + else if (pdwarf) + { + which = 2; + return pdwarf; + } + return null; + } + case 1: + return _d_eh_swapContext(newContext); + case 2: + return _d_eh_swapContextDwarf(newContext); + } + } + } + } + else + alias swapContext = _d_eh_swapContext; +} diff --git a/src/core/thread/osthread.d b/src/core/thread/osthread.d index 40def79f6d..8ef881bfce 100644 --- a/src/core/thread/osthread.d +++ b/src/core/thread/osthread.d @@ -393,6 +393,10 @@ else version (Posix) } try { + // exposed by compiler runtime + extern (C) void rt_moduleTlsCtor(); + extern (C) void rt_moduleTlsDtor(); + rt_moduleTlsCtor(); try { @@ -3322,6 +3326,8 @@ version (Posix) // lowlovel threading support private { + import core.atomic, core.memory, core.sync.mutex; + struct ll_ThreadData { ThreadID tid; From dc8b6a3700b0d97c583118764b04877230ab359a Mon Sep 17 00:00:00 2001 From: Stefanos Baziotis Date: Sun, 15 Sep 2019 16:16:32 +0300 Subject: [PATCH 10/13] Decouple getStackTop/Bottom --- src/core/thread/context.d | 162 +++++++++++++++++++++++++++++++++++++ src/core/thread/fiber.d | 1 + src/core/thread/osthread.d | 155 ----------------------------------- 3 files changed, 163 insertions(+), 155 deletions(-) diff --git a/src/core/thread/context.d b/src/core/thread/context.d index afd1c64120..dd8ad44dd0 100644 --- a/src/core/thread/context.d +++ b/src/core/thread/context.d @@ -407,3 +407,165 @@ private else alias swapContext = _d_eh_swapContext; } + +private +{ + import core.sys.posix.pthread : pthread_t; + extern (C) @nogc nothrow + { + version (CRuntime_Glibc) int pthread_getattr_np(pthread_t thread, pthread_attr_t* attr); + version (FreeBSD) int pthread_attr_get_np(pthread_t thread, pthread_attr_t* attr); + version (NetBSD) int pthread_attr_get_np(pthread_t thread, pthread_attr_t* attr); + version (OpenBSD) int pthread_stackseg_np(pthread_t thread, stack_t* sinfo); + version (DragonFlyBSD) int pthread_attr_get_np(pthread_t thread, pthread_attr_t* attr); + version (Solaris) int thr_stksegment(stack_t* stk); + version (CRuntime_Bionic) int pthread_getattr_np(pthread_t thid, pthread_attr_t* attr); + version (CRuntime_Musl) int pthread_getattr_np(pthread_t, pthread_attr_t*); + version (CRuntime_UClibc) int pthread_getattr_np(pthread_t thread, pthread_attr_t* attr); + } +} + +package(core.thread) +{ + + import core.sys.posix.pthread : pthread_attr_t, pthread_attr_getstack, + pthread_attr_destroy, pthread_self; + + void* getStackBottom() nothrow @nogc + { + version (Windows) + { + version (D_InlineAsm_X86) + asm pure nothrow @nogc { naked; mov EAX, FS:4; ret; } + else version (D_InlineAsm_X86_64) + asm pure nothrow @nogc + { naked; + mov RAX, 8; + mov RAX, GS:[RAX]; + ret; + } + else + static assert(false, "Architecture not supported."); + } + else version (Darwin) + { + import core.sys.darwin.pthread; + return pthread_get_stackaddr_np(pthread_self()); + } + else version (CRuntime_Glibc) + { + pthread_attr_t attr; + void* addr; size_t size; + + pthread_getattr_np(pthread_self(), &attr); + pthread_attr_getstack(&attr, &addr, &size); + pthread_attr_destroy(&attr); + version (StackGrowsDown) + addr += size; + return addr; + } + else version (FreeBSD) + { + pthread_attr_t attr; + void* addr; size_t size; + + pthread_attr_init(&attr); + pthread_attr_get_np(pthread_self(), &attr); + pthread_attr_getstack(&attr, &addr, &size); + pthread_attr_destroy(&attr); + version (StackGrowsDown) + addr += size; + return addr; + } + else version (NetBSD) + { + pthread_attr_t attr; + void* addr; size_t size; + + pthread_attr_init(&attr); + pthread_attr_get_np(pthread_self(), &attr); + pthread_attr_getstack(&attr, &addr, &size); + pthread_attr_destroy(&attr); + version (StackGrowsDown) + addr += size; + return addr; + } + else version (OpenBSD) + { + stack_t stk; + + pthread_stackseg_np(pthread_self(), &stk); + return stk.ss_sp; + } + else version (DragonFlyBSD) + { + pthread_attr_t attr; + void* addr; size_t size; + + pthread_attr_init(&attr); + pthread_attr_get_np(pthread_self(), &attr); + pthread_attr_getstack(&attr, &addr, &size); + pthread_attr_destroy(&attr); + version (StackGrowsDown) + addr += size; + return addr; + } + else version (Solaris) + { + stack_t stk; + + thr_stksegment(&stk); + return stk.ss_sp; + } + else version (CRuntime_Bionic) + { + pthread_attr_t attr; + void* addr; size_t size; + + pthread_getattr_np(pthread_self(), &attr); + pthread_attr_getstack(&attr, &addr, &size); + pthread_attr_destroy(&attr); + version (StackGrowsDown) + addr += size; + return addr; + } + else version (CRuntime_Musl) + { + pthread_attr_t attr; + void* addr; size_t size; + + pthread_getattr_np(pthread_self(), &attr); + pthread_attr_getstack(&attr, &addr, &size); + pthread_attr_destroy(&attr); + version (StackGrowsDown) + addr += size; + return addr; + } + else version (CRuntime_UClibc) + { + pthread_attr_t attr; + void* addr; size_t size; + + pthread_getattr_np(pthread_self(), &attr); + pthread_attr_getstack(&attr, &addr, &size); + pthread_attr_destroy(&attr); + version (StackGrowsDown) + addr += size; + return addr; + } + else + static assert(false, "Platform not supported."); + } + + void* getStackTop() nothrow @nogc + { + version (D_InlineAsm_X86) + asm pure nothrow @nogc { naked; mov EAX, ESP; ret; } + else version (D_InlineAsm_X86_64) + asm pure nothrow @nogc { naked; mov RAX, RSP; ret; } + else version (GNU) + return __builtin_frame_address(0); + else + static assert(false, "Architecture not supported."); + } +} diff --git a/src/core/thread/fiber.d b/src/core/thread/fiber.d index 2aaf213f8c..1b7657407f 100644 --- a/src/core/thread/fiber.d +++ b/src/core/thread/fiber.d @@ -1441,6 +1441,7 @@ private: // Context Switching /////////////////////////////////////////////////////////////////////////// + import core.thread.context : getStackTop; // // Switches into the stack held by this fiber. diff --git a/src/core/thread/osthread.d b/src/core/thread/osthread.d index 8ef881bfce..7df9b94fe1 100644 --- a/src/core/thread/osthread.d +++ b/src/core/thread/osthread.d @@ -2912,161 +2912,6 @@ extern(C) void thread_processGCMarks( scope IsMarkedDg isMarked ) nothrow } } - -extern (C) @nogc nothrow -{ - version (CRuntime_Glibc) int pthread_getattr_np(pthread_t thread, pthread_attr_t* attr); - version (FreeBSD) int pthread_attr_get_np(pthread_t thread, pthread_attr_t* attr); - version (NetBSD) int pthread_attr_get_np(pthread_t thread, pthread_attr_t* attr); - version (OpenBSD) int pthread_stackseg_np(pthread_t thread, stack_t* sinfo); - version (DragonFlyBSD) int pthread_attr_get_np(pthread_t thread, pthread_attr_t* attr); - version (Solaris) int thr_stksegment(stack_t* stk); - version (CRuntime_Bionic) int pthread_getattr_np(pthread_t thid, pthread_attr_t* attr); - version (CRuntime_Musl) int pthread_getattr_np(pthread_t, pthread_attr_t*); - version (CRuntime_UClibc) int pthread_getattr_np(pthread_t thread, pthread_attr_t* attr); -} - - -package(core.thread) void* getStackTop() nothrow @nogc -{ - version (D_InlineAsm_X86) - asm pure nothrow @nogc { naked; mov EAX, ESP; ret; } - else version (D_InlineAsm_X86_64) - asm pure nothrow @nogc { naked; mov RAX, RSP; ret; } - else version (GNU) - return __builtin_frame_address(0); - else - static assert(false, "Architecture not supported."); -} - - -package(core.thread) void* getStackBottom() nothrow @nogc -{ - version (Windows) - { - version (D_InlineAsm_X86) - asm pure nothrow @nogc { naked; mov EAX, FS:4; ret; } - else version (D_InlineAsm_X86_64) - asm pure nothrow @nogc - { naked; - mov RAX, 8; - mov RAX, GS:[RAX]; - ret; - } - else - static assert(false, "Architecture not supported."); - } - else version (Darwin) - { - import core.sys.darwin.pthread; - return pthread_get_stackaddr_np(pthread_self()); - } - else version (CRuntime_Glibc) - { - pthread_attr_t attr; - void* addr; size_t size; - - pthread_getattr_np(pthread_self(), &attr); - pthread_attr_getstack(&attr, &addr, &size); - pthread_attr_destroy(&attr); - version (StackGrowsDown) - addr += size; - return addr; - } - else version (FreeBSD) - { - pthread_attr_t attr; - void* addr; size_t size; - - pthread_attr_init(&attr); - pthread_attr_get_np(pthread_self(), &attr); - pthread_attr_getstack(&attr, &addr, &size); - pthread_attr_destroy(&attr); - version (StackGrowsDown) - addr += size; - return addr; - } - else version (NetBSD) - { - pthread_attr_t attr; - void* addr; size_t size; - - pthread_attr_init(&attr); - pthread_attr_get_np(pthread_self(), &attr); - pthread_attr_getstack(&attr, &addr, &size); - pthread_attr_destroy(&attr); - version (StackGrowsDown) - addr += size; - return addr; - } - else version (OpenBSD) - { - stack_t stk; - - pthread_stackseg_np(pthread_self(), &stk); - return stk.ss_sp; - } - else version (DragonFlyBSD) - { - pthread_attr_t attr; - void* addr; size_t size; - - pthread_attr_init(&attr); - pthread_attr_get_np(pthread_self(), &attr); - pthread_attr_getstack(&attr, &addr, &size); - pthread_attr_destroy(&attr); - version (StackGrowsDown) - addr += size; - return addr; - } - else version (Solaris) - { - stack_t stk; - - thr_stksegment(&stk); - return stk.ss_sp; - } - else version (CRuntime_Bionic) - { - pthread_attr_t attr; - void* addr; size_t size; - - pthread_getattr_np(pthread_self(), &attr); - pthread_attr_getstack(&attr, &addr, &size); - pthread_attr_destroy(&attr); - version (StackGrowsDown) - addr += size; - return addr; - } - else version (CRuntime_Musl) - { - pthread_attr_t attr; - void* addr; size_t size; - - pthread_getattr_np(pthread_self(), &attr); - pthread_attr_getstack(&attr, &addr, &size); - pthread_attr_destroy(&attr); - version (StackGrowsDown) - addr += size; - return addr; - } - else version (CRuntime_UClibc) - { - pthread_attr_t attr; - void* addr; size_t size; - - pthread_getattr_np(pthread_self(), &attr); - pthread_attr_getstack(&attr, &addr, &size); - pthread_attr_destroy(&attr); - version (StackGrowsDown) - addr += size; - return addr; - } - else - static assert(false, "Platform not supported."); -} - - /** * Returns the stack top of the currently active stack within the calling * thread. From 0b69e9807335ccc3f0e1a02de46cd9d2a29d655a Mon Sep 17 00:00:00 2001 From: Stefanos Baziotis Date: Sun, 15 Sep 2019 16:27:21 +0300 Subject: [PATCH 11/13] Decouple Rethrow and small refactorings --- src/core/thread/context.d | 42 ++++++++++++-------------- src/core/thread/fiber.d | 21 +++++++------ src/core/thread/osthread.d | 60 ++++++++++++++++++++------------------ 3 files changed, 60 insertions(+), 63 deletions(-) diff --git a/src/core/thread/context.d b/src/core/thread/context.d index dd8ad44dd0..28b8d9e38e 100644 --- a/src/core/thread/context.d +++ b/src/core/thread/context.d @@ -28,6 +28,10 @@ struct StackContext StackContext* within, next, prev; } + +/// Flag to control rethrow behavior of call, join and friends +enum Rethrow : bool { no, yes } + /** A class that represents a thread of execution that manages a stack. This serves primarily as a superclass for Thread and Fiber. @@ -45,7 +49,7 @@ class StackContextExecutor } // Common standard data for Thread / Fiber. - Call m_call = Call.NO; + Call m_call = Call.NO; union { void function() m_fn; @@ -56,6 +60,7 @@ class StackContextExecutor size_t m_size; Throwable m_unhandled; + // Thread / Fiber entry point. Invokes the function or delegate passed on // construction (if any). final void run() @@ -233,16 +238,16 @@ struct GlobalStackContext static void remove(StackContext* c) nothrow @nogc in { - assert( c ); - assert( c.next || c.prev ); + assert(c); + assert(c.next || c.prev); } do { - if ( c.prev ) + if (c.prev) c.prev.next = c.next; - if ( c.next ) + if (c.next) c.next.prev = c.prev; - if ( sm_cbeg == c ) + if (sm_cbeg == c) sm_cbeg = c.next; // NOTE: Don't null out c.next or c.prev because opApply currently // follows c.next after removing a node. This could be easily @@ -263,8 +268,8 @@ struct GlobalStackContext static void add(Thread t, bool rmAboutToStart = true) nothrow @nogc in { - assert( t ); - assert( !t.next && !t.prev ); + assert(t); + assert(!t.next && !t.prev); } do { @@ -310,7 +315,7 @@ struct GlobalStackContext static void remove(Thread t) nothrow @nogc in { - assert( t ); + assert(t); } do { @@ -329,13 +334,13 @@ struct GlobalStackContext // elsewhere. Therefore, it is the responsibility of any // object that creates contexts to clean them up properly // when it is done with them. - remove( &t.m_main ); + remove(&t.m_main); - if ( t.prev ) + if (t.prev) t.prev.next = t.next; - if ( t.next ) + if (t.next) t.next.prev = t.prev; - if ( sm_tbeg is t ) + if (sm_tbeg is t) sm_tbeg = t.next; t.prev = t.next = null; --sm_tlen; @@ -456,7 +461,7 @@ package(core.thread) { pthread_attr_t attr; void* addr; size_t size; - + pthread_getattr_np(pthread_self(), &attr); pthread_attr_getstack(&attr, &addr, &size); pthread_attr_destroy(&attr); @@ -468,7 +473,6 @@ package(core.thread) { pthread_attr_t attr; void* addr; size_t size; - pthread_attr_init(&attr); pthread_attr_get_np(pthread_self(), &attr); pthread_attr_getstack(&attr, &addr, &size); @@ -481,7 +485,6 @@ package(core.thread) { pthread_attr_t attr; void* addr; size_t size; - pthread_attr_init(&attr); pthread_attr_get_np(pthread_self(), &attr); pthread_attr_getstack(&attr, &addr, &size); @@ -493,7 +496,6 @@ package(core.thread) else version (OpenBSD) { stack_t stk; - pthread_stackseg_np(pthread_self(), &stk); return stk.ss_sp; } @@ -501,7 +503,6 @@ package(core.thread) { pthread_attr_t attr; void* addr; size_t size; - pthread_attr_init(&attr); pthread_attr_get_np(pthread_self(), &attr); pthread_attr_getstack(&attr, &addr, &size); @@ -513,7 +514,6 @@ package(core.thread) else version (Solaris) { stack_t stk; - thr_stksegment(&stk); return stk.ss_sp; } @@ -521,7 +521,6 @@ package(core.thread) { pthread_attr_t attr; void* addr; size_t size; - pthread_getattr_np(pthread_self(), &attr); pthread_attr_getstack(&attr, &addr, &size); pthread_attr_destroy(&attr); @@ -533,7 +532,6 @@ package(core.thread) { pthread_attr_t attr; void* addr; size_t size; - pthread_getattr_np(pthread_self(), &attr); pthread_attr_getstack(&attr, &addr, &size); pthread_attr_destroy(&attr); @@ -545,7 +543,6 @@ package(core.thread) { pthread_attr_t attr; void* addr; size_t size; - pthread_getattr_np(pthread_self(), &attr); pthread_attr_getstack(&attr, &addr, &size); pthread_attr_destroy(&attr); @@ -556,7 +553,6 @@ package(core.thread) else static assert(false, "Platform not supported."); } - void* getStackTop() nothrow @nogc { version (D_InlineAsm_X86) diff --git a/src/core/thread/fiber.d b/src/core/thread/fiber.d index 1b7657407f..e400cb83d5 100644 --- a/src/core/thread/fiber.d +++ b/src/core/thread/fiber.d @@ -617,8 +617,7 @@ class Fiber : StackContextExecutor // General Actions /////////////////////////////////////////////////////////////////////////// - /// Flag to control rethrow behavior of $(D $(LREF call)) - enum Rethrow : bool { no, yes } + public import core.thread.context : Rethrow; /** * Transfers execution to this fiber object. The calling context will be @@ -644,7 +643,7 @@ class Fiber : StackContextExecutor // is already `@nogc nothrow`, but in order for `Fiber.call` to // propagate the attributes of the user's function, the Fiber // class needs to be templated. - final Throwable call( Rethrow rethrow = Rethrow.yes ) + final Throwable call(Rethrow rethrow = Rethrow.yes) { return rethrow ? call!(Rethrow.yes)() : call!(Rethrow.no); } @@ -668,20 +667,20 @@ class Fiber : StackContextExecutor private void callImpl() nothrow @nogc in { - assert( m_state == State.HOLD ); + assert(m_state == State.HOLD); } do { - Fiber cur = getThis(); + Fiber cur = getThis(); - static if ( __traits( compiles, ucontext_t ) ) + static if (__traits(compiles, ucontext_t)) m_ucur = cur ? &cur.m_utxt : &Fiber.sm_utxt; - setThis( this ); + setThis(this); this.switchIn(); - setThis( cur ); + setThis(cur); - static if ( __traits( compiles, ucontext_t ) ) + static if (__traits(compiles, ucontext_t)) m_ucur = null; // NOTE: If the fiber has terminated then the stack pointers must be @@ -691,7 +690,7 @@ class Fiber : StackContextExecutor // the collection of otherwise dead objects. The most notable // being the current object, which is referenced at the top of // fiber_entryPoint. - if ( m_state == State.TERM ) + if (m_state == State.TERM) { m_ctxt.tstack = m_ctxt.bstack; } @@ -711,7 +710,7 @@ class Fiber : StackContextExecutor final void reset() nothrow @nogc in { - assert( m_state == State.TERM || m_state == State.HOLD ); + assert(m_state == State.TERM || m_state == State.HOLD); } do { diff --git a/src/core/thread/osthread.d b/src/core/thread/osthread.d index 7df9b94fe1..2c77a911cb 100644 --- a/src/core/thread/osthread.d +++ b/src/core/thread/osthread.d @@ -19,17 +19,19 @@ import core.exception : onOutOfMemoryError; // Platform Detection and Memory Allocation /////////////////////////////////////////////////////////////////////////////// -version (OSX) - version = Darwin; -else version (iOS) - version = Darwin; -else version (TVOS) - version = Darwin; -else version (WatchOS) - version = Darwin; + package(core.thread) { + version (OSX) + version = Darwin; + else version (iOS) + version = Darwin; + else version (TVOS) + version = Darwin; + else version (WatchOS) + version = Darwin; + version (D_InlineAsm_X86) { version (Windows) @@ -304,6 +306,10 @@ else version (Posix) import core.sys.darwin.pthread : pthread_mach_thread_np; } + // exposed by compiler runtime + extern (C) void rt_moduleTlsCtor(); + extern (C) void rt_moduleTlsDtor(); + // // Entry point for POSIX threads // @@ -393,10 +399,6 @@ else version (Posix) } try { - // exposed by compiler runtime - extern (C) void rt_moduleTlsCtor(); - extern (C) void rt_moduleTlsDtor(); - rt_moduleTlsCtor(); try { @@ -772,12 +774,12 @@ class Thread : StackContextExecutor * Any exception not handled by this thread if rethrow = false, null * otherwise. */ - final Throwable join( bool rethrow = true ) + final Throwable join(Rethrow rethrow = Rethrow.yes) { version (Windows) { - if ( WaitForSingleObject( m_hndl, INFINITE ) != WAIT_OBJECT_0 ) - throw new ThreadException( "Unable to join thread" ); + if (WaitForSingleObject( m_hndl, INFINITE ) != WAIT_OBJECT_0) + throw new ThreadException("Unable to join thread"); // NOTE: m_addr must be cleared before m_hndl is closed to avoid // a race condition with isRunning. The operation is done // with atomicStore to prevent compiler reordering. @@ -2997,11 +2999,11 @@ class ThreadGroup * Returns: * A reference to the newly created thread. */ - final Thread create( void delegate() dg ) + final Thread create(void delegate() dg) { - Thread t = new Thread( dg ).start(); + Thread t = new Thread(dg).start(); - synchronized( this ) + synchronized(this) { m_all[t] = t; } @@ -3018,14 +3020,14 @@ class ThreadGroup * In: * t must not be null. */ - final void add( Thread t ) + final void add(Thread t) in { - assert( t ); + assert(t); } do { - synchronized( this ) + synchronized(this) { m_all[t] = t; } @@ -3042,16 +3044,16 @@ class ThreadGroup * In: * t must not be null. */ - final void remove( Thread t ) + final void remove(Thread t) in { - assert( t ); + assert(t); } do { - synchronized( this ) + synchronized(this) { - m_all.remove( t ); + m_all.remove(t); } } @@ -3089,15 +3091,15 @@ class ThreadGroup * Throws: * Any exception not handled by the joined threads. */ - final void joinAll( bool rethrow = true ) + final void joinAll(Rethrow rethrow = Rethrow.yes) { - synchronized( this ) + synchronized(this) { // NOTE: This loop relies on the knowledge that m_all uses the // Thread object for both the key and the mapped value. - foreach ( Thread t; m_all.keys ) + foreach (Thread t; m_all.keys) { - t.join( rethrow ); + t.join(rethrow); } } } From 24d9540aebaddcaf89c315280ca04af6b1d40868 Mon Sep 17 00:00:00 2001 From: Stefanos Baziotis Date: Sun, 15 Sep 2019 17:42:09 +0300 Subject: [PATCH 12/13] Temporarily duplicating version stuff --- src/core/thread/context.d | 57 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/src/core/thread/context.d b/src/core/thread/context.d index 28b8d9e38e..d9e4100df9 100644 --- a/src/core/thread/context.d +++ b/src/core/thread/context.d @@ -355,6 +355,62 @@ struct GlobalStackContext } } +version (OSX) + version = Darwin; +else version (iOS) + version = Darwin; +else version (TVOS) + version = Darwin; +else version (WatchOS) + version = Darwin; + +version (D_InlineAsm_X86) +{ + version (Windows) + version = AsmX86_Windows; + else version (Posix) + version = AsmX86_Posix; +} +else version (D_InlineAsm_X86_64) +{ + version (Windows) + { + version = AsmX86_64_Windows; + } + else version (Posix) + { + version = AsmX86_64_Posix; + } +} + +version (Posix) +{ + import core.sys.posix.unistd; + + version (AsmX86_Windows) {} else + version (AsmX86_Posix) {} else + version (AsmX86_64_Windows) {} else + version (AsmX86_64_Posix) {} else + version (AsmExternal) {} else + { + // NOTE: The ucontext implementation requires architecture specific + // data definitions to operate so testing for it must be done + // by checking for the existence of ucontext_t rather than by + // a version identifier. Please note that this is considered + // an obsolescent feature according to the POSIX spec, so a + // custom solution is still preferred. + import core.sys.posix.ucontext; + } +} + +version (Solaris) +{ + import core.sys.solaris.sys.priocntl; + import core.sys.solaris.sys.types; + import core.sys.posix.sys.wait : idtype_t; +} + + private { /** @@ -415,7 +471,6 @@ private private { - import core.sys.posix.pthread : pthread_t; extern (C) @nogc nothrow { version (CRuntime_Glibc) int pthread_getattr_np(pthread_t thread, pthread_attr_t* attr); From 1b9a53277452b06ee2e05b86bd9c19fc0d017ecc Mon Sep 17 00:00:00 2001 From: Stefanos Baziotis Date: Sun, 15 Sep 2019 17:51:18 +0300 Subject: [PATCH 13/13] Generic posix import removed --- src/core/thread/context.d | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/core/thread/context.d b/src/core/thread/context.d index d9e4100df9..2bda556007 100644 --- a/src/core/thread/context.d +++ b/src/core/thread/context.d @@ -385,6 +385,7 @@ else version (D_InlineAsm_X86_64) version (Posix) { + import core.sys.posix.pthread; import core.sys.posix.unistd; version (AsmX86_Windows) {} else @@ -487,10 +488,6 @@ private package(core.thread) { - - import core.sys.posix.pthread : pthread_attr_t, pthread_attr_getstack, - pthread_attr_destroy, pthread_self; - void* getStackBottom() nothrow @nogc { version (Windows)