From 60766d46f42788e0e28a851cb16836540b125925 Mon Sep 17 00:00:00 2001 From: Eugen Wissner Date: Sun, 5 May 2019 06:18:53 +0200 Subject: [PATCH] [GDC] Support GDC in core.atomic --- src/core/atomic.d | 328 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 328 insertions(+) diff --git a/src/core/atomic.d b/src/core/atomic.d index 05f96ec62d..7b5ec0400c 100644 --- a/src/core/atomic.d +++ b/src/core/atomic.d @@ -24,6 +24,12 @@ else version (D_InlineAsm_X86_64) enum has64BitCAS = true; enum has128BitCAS = true; } +else version (GNU) +{ + import gcc.config; + enum has64BitCAS = GNU_Have_64Bit_Atomics; + enum has128BitCAS = GNU_Have_LibAtomic; +} else { enum has64BitCAS = false; @@ -1404,6 +1410,328 @@ else version (AsmX86_64) } } } +else version (GNU) +{ + import gcc.builtins; + + TailShared!T atomicOp(string op, T, V1)( ref shared T val, V1 mod ) pure nothrow @nogc @trusted + if ( __traits( compiles, mixin( "*cast(T*)&val" ~ op ~ "mod" ) ) ) + { + // binary operators + // + // + - * / % ^^ & + // | ^ << >> >>> ~ in + // == != < <= > >= + static if ( op == "+" || op == "-" || op == "*" || op == "/" || + op == "%" || op == "^^" || op == "&" || op == "|" || + op == "^" || op == "<<" || op == ">>" || op == ">>>" || + op == "~" || // skip "in" + op == "==" || op == "!=" || op == "<" || op == "<=" || + op == ">" || op == ">=" ) + { + TailShared!T get = atomicLoad!(MemoryOrder.raw)( val ); + mixin( "return get " ~ op ~ " mod;" ); + } + else + // assignment operators + // + // += -= *= /= %= ^^= &= + // |= ^= <<= >>= >>>= ~= + static if ( op == "+=" || op == "-=" || op == "*=" || op == "/=" || + op == "%=" || op == "^^=" || op == "&=" || op == "|=" || + op == "^=" || op == "<<=" || op == ">>=" || op == ">>>=" ) // skip "~=" + { + TailShared!T get, set; + + do + { + get = set = atomicLoad!(MemoryOrder.raw)( val ); + mixin( "set " ~ op ~ " mod;" ); + } while ( !cas( &val, get, set ) ); + return set; + } + else + { + static assert( false, "Operation not supported." ); + } + } + + + bool cas(T,V1,V2)( shared(T)* here, const V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe + if ( !is(T == class) && !is(T U : U*) && __traits( compiles, { *here = writeThis; } ) ) + { + return casImpl(here, ifThis, writeThis); + } + + bool cas(T,V1,V2)( shared(T)* here, const shared(V1) ifThis, shared(V2) writeThis ) pure nothrow @nogc @safe + if ( is(T == class) && __traits( compiles, { *here = writeThis; } ) ) + { + return casImpl(here, ifThis, writeThis); + } + + bool cas(T,V1,V2)( shared(T)* here, const shared(V1)* ifThis, shared(V2)* writeThis ) pure nothrow @nogc @safe + if ( is(T U : U*) && __traits( compiles, { *here = writeThis; } ) ) + { + return casImpl(here, ifThis, writeThis); + } + + private bool casImpl(T,V1,V2)( shared(T)* here, V1 ifThis, V2 writeThis ) pure nothrow @nogc @trusted + { + bool res = void; + + static if (GNU_Have_Atomics || GNU_Have_LibAtomic) + { + static if (T.sizeof == byte.sizeof) + { + res = __atomic_compare_exchange_1(here, cast(void*) &ifThis, *cast(ubyte*) &writeThis, + false, MemoryOrder.seq, MemoryOrder.seq); + } + else static if (T.sizeof == short.sizeof) + { + res = __atomic_compare_exchange_2(here, cast(void*) &ifThis, *cast(ushort*) &writeThis, + false, MemoryOrder.seq, MemoryOrder.seq); + } + else static if (T.sizeof == int.sizeof) + { + res = __atomic_compare_exchange_4(here, cast(void*) &ifThis, *cast(uint*) &writeThis, + false, MemoryOrder.seq, MemoryOrder.seq); + } + else static if (T.sizeof == long.sizeof && GNU_Have_64Bit_Atomics) + { + res = __atomic_compare_exchange_8(here, cast(void*) &ifThis, *cast(ulong*) &writeThis, + false, MemoryOrder.seq, MemoryOrder.seq); + } + else static if (GNU_Have_LibAtomic) + { + res = __atomic_compare_exchange(T.sizeof, here, cast(void*) &ifThis, cast(void*) &writeThis, + MemoryOrder.seq, MemoryOrder.seq); + } + else + static assert(0, "Invalid template type specified."); + } + else + { + static if (T.sizeof == byte.sizeof) + alias U = byte; + else static if (T.sizeof == short.sizeof) + alias U = short; + else static if (T.sizeof == int.sizeof) + alias U = int; + else static if (T.sizeof == long.sizeof) + alias U = long; + else + static assert(0, "Invalid template type specified."); + + getAtomicMutex.lock(); + scope(exit) getAtomicMutex.unlock(); + + if (*cast(U*)here == *cast(U*)&ifThis) + { + *here = writeThis; + res = true; + } + else + res = false; + } + + return res; + } + + // Memory model types for the __atomic* builtins. + enum MemoryOrder + { + raw = 0, + acq = 2, + rel = 3, + seq = 5, + } + + deprecated("Please use MemoryOrder instead.") + alias MemoryOrder msync; + + TailShared!T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T val ) pure nothrow @nogc @trusted + if (!__traits(isFloating, T)) + { + static assert(ms != MemoryOrder.rel, "Invalid MemoryOrder for atomicLoad"); + static assert(__traits(isPOD, T), "argument to atomicLoad() must be POD"); + + static if (GNU_Have_Atomics || GNU_Have_LibAtomic) + { + static if (T.sizeof == ubyte.sizeof) + { + ubyte value = __atomic_load_1(&val, ms); + return *cast(TailShared!T*) &value; + } + else static if (T.sizeof == ushort.sizeof) + { + ushort value = __atomic_load_2(&val, ms); + return *cast(TailShared!T*) &value; + } + else static if (T.sizeof == uint.sizeof) + { + uint value = __atomic_load_4(&val, ms); + return *cast(TailShared!T*) &value; + } + else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics) + { + ulong value = __atomic_load_8(&val, ms); + return *cast(TailShared!T*) &value; + } + else static if (GNU_Have_LibAtomic) + { + T value; + __atomic_load(T.sizeof, &val, cast(void*)&value, ms); + return *cast(TailShared!T*) &value; + } + else + static assert(0, "Invalid template type specified."); + } + else + { + getAtomicMutex.lock(); + scope(exit) getAtomicMutex.unlock(); + return *cast(TailShared!T*)&val; + } + } + + void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V1)( ref shared T val, V1 newval ) pure nothrow @nogc @trusted + if ( __traits( compiles, { val = newval; } ) ) + { + static assert(ms != MemoryOrder.acq, "Invalid MemoryOrder for atomicStore"); + static assert(__traits(isPOD, T), "argument to atomicLoad() must be POD"); + + static if (GNU_Have_Atomics || GNU_Have_LibAtomic) + { + static if (T.sizeof == ubyte.sizeof) + { + __atomic_store_1(&val, *cast(ubyte*) &newval, ms); + } + else static if (T.sizeof == ushort.sizeof) + { + __atomic_store_2(&val, *cast(ushort*) &newval, ms); + } + else static if (T.sizeof == uint.sizeof) + { + __atomic_store_4(&val, *cast(uint*) &newval, ms); + } + else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics) + { + __atomic_store_8(&val, *cast(ulong*) &newval, ms); + } + else static if (GNU_Have_LibAtomic) + { + __atomic_store(T.sizeof, &val, cast(void*)&newval, ms); + } + else + static assert(0, "Invalid template type specified."); + } + else + { + getAtomicMutex.lock(); + val = newval; + getAtomicMutex.unlock(); + } + } + + void atomicFence() nothrow @nogc + { + static if (GNU_Have_Atomics || GNU_Have_LibAtomic) + __atomic_thread_fence(MemoryOrder.seq); + else + { + getAtomicMutex.lock(); + getAtomicMutex.unlock(); + } + } + + static if (!GNU_Have_Atomics && !GNU_Have_LibAtomic) + { + // Use system mutex for atomics, faking the purity of the functions so + // that they can be used in pure/nothrow/@safe code. + extern (C) private pure @trusted @nogc nothrow + { + static if (GNU_Thread_Model == ThreadModel.Posix) + { + import core.sys.posix.pthread; + alias atomicMutexHandle = pthread_mutex_t; + + pragma(mangle, "pthread_mutex_init") int fakePureMutexInit(pthread_mutex_t*, pthread_mutexattr_t*); + pragma(mangle, "pthread_mutex_lock") int fakePureMutexLock(pthread_mutex_t*); + pragma(mangle, "pthread_mutex_unlock") int fakePureMutexUnlock(pthread_mutex_t*); + } + else static if (GNU_Thread_Model == ThreadModel.Win32) + { + import core.sys.windows.winbase; + alias atomicMutexHandle = CRITICAL_SECTION; + + pragma(mangle, "InitializeCriticalSection") int fakePureMutexInit(CRITICAL_SECTION*); + pragma(mangle, "EnterCriticalSection") void fakePureMutexLock(CRITICAL_SECTION*); + pragma(mangle, "LeaveCriticalSection") int fakePureMutexUnlock(CRITICAL_SECTION*); + } + else + { + alias atomicMutexHandle = int; + } + } + + // Implements lock/unlock operations. + private struct AtomicMutex + { + int lock() pure @trusted @nogc nothrow + { + static if (GNU_Thread_Model == ThreadModel.Posix) + { + if (!_inited) + { + fakePureMutexInit(&_handle, null); + _inited = true; + } + return fakePureMutexLock(&_handle); + } + else + { + static if (GNU_Thread_Model == ThreadModel.Win32) + { + if (!_inited) + { + fakePureMutexInit(&_handle); + _inited = true; + } + fakePureMutexLock(&_handle); + } + return 0; + } + } + + int unlock() pure @trusted @nogc nothrow + { + static if (GNU_Thread_Model == ThreadModel.Posix) + return fakePureMutexUnlock(&_handle); + else + { + static if (GNU_Thread_Model == ThreadModel.Win32) + fakePureMutexUnlock(&_handle); + return 0; + } + } + + private: + atomicMutexHandle _handle; + bool _inited; + } + + // Internal static mutex reference. + private AtomicMutex* _getAtomicMutex() @trusted @nogc nothrow + { + __gshared static AtomicMutex mutex; + return &mutex; + } + + // Pure alias for _getAtomicMutex. + pragma(mangle, _getAtomicMutex.mangleof) + private AtomicMutex* getAtomicMutex() pure @trusted @nogc nothrow @property; + } +} // This is an ABI adapter that works on all architectures. It type puns // floats and doubles to ints and longs, atomically loads them, then puns