Skip to content
This repository was archived by the owner on Oct 12, 2022. It is now read-only.
/ druntime Public archive
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
328 changes: 328 additions & 0 deletions src/core/atomic.d
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,12 @@ else version (D_InlineAsm_X86_64)
enum has64BitCAS = true;
enum has128BitCAS = true;
}
else version (GNU)
{
import gcc.config;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does GDC treat the modules gcc.config and gcc.builtins specially?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The former is a template config.d.in that's generated at configure-time. The latter is an empty module that has all builtins injected into it at compile-time, its contents may vary depending on compiler flags.

enum has64BitCAS = GNU_Have_64Bit_Atomics;
enum has128BitCAS = GNU_Have_LibAtomic;
}
else
{
enum has64BitCAS = false;
Expand Down Expand Up @@ -1404,6 +1410,328 @@ else version (AsmX86_64)
}
}
}
else version (GNU)
{
import gcc.builtins;

TailShared!T atomicOp(string op, T, V1)( ref shared T val, V1 mod ) pure nothrow @nogc @trusted
if ( __traits( compiles, mixin( "*cast(T*)&val" ~ op ~ "mod" ) ) )
{
// binary operators
//
// + - * / % ^^ &
// | ^ << >> >>> ~ in
// == != < <= > >=
static if ( op == "+" || op == "-" || op == "*" || op == "/" ||
op == "%" || op == "^^" || op == "&" || op == "|" ||
op == "^" || op == "<<" || op == ">>" || op == ">>>" ||
op == "~" || // skip "in"
op == "==" || op == "!=" || op == "<" || op == "<=" ||
op == ">" || op == ">=" )
{
TailShared!T get = atomicLoad!(MemoryOrder.raw)( val );
mixin( "return get " ~ op ~ " mod;" );
}
else
// assignment operators
//
// += -= *= /= %= ^^= &=
// |= ^= <<= >>= >>>= ~=
static if ( op == "+=" || op == "-=" || op == "*=" || op == "/=" ||
op == "%=" || op == "^^=" || op == "&=" || op == "|=" ||
op == "^=" || op == "<<=" || op == ">>=" || op == ">>>=" ) // skip "~="
{
TailShared!T get, set;

do
{
get = set = atomicLoad!(MemoryOrder.raw)( val );
mixin( "set " ~ op ~ " mod;" );
} while ( !cas( &val, get, set ) );
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can GDC use __atomic_add_fetch instead of a compare and swap loop for "+=" or is that not available?

return set;
}
else
{
static assert( false, "Operation not supported." );
}
}


bool cas(T,V1,V2)( shared(T)* here, const V1 ifThis, V2 writeThis ) pure nothrow @nogc @safe
if ( !is(T == class) && !is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
{
return casImpl(here, ifThis, writeThis);
}

bool cas(T,V1,V2)( shared(T)* here, const shared(V1) ifThis, shared(V2) writeThis ) pure nothrow @nogc @safe
if ( is(T == class) && __traits( compiles, { *here = writeThis; } ) )
{
return casImpl(here, ifThis, writeThis);
}

bool cas(T,V1,V2)( shared(T)* here, const shared(V1)* ifThis, shared(V2)* writeThis ) pure nothrow @nogc @safe
if ( is(T U : U*) && __traits( compiles, { *here = writeThis; } ) )
{
return casImpl(here, ifThis, writeThis);
}

private bool casImpl(T,V1,V2)( shared(T)* here, V1 ifThis, V2 writeThis ) pure nothrow @nogc @trusted
{
bool res = void;

static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
{
static if (T.sizeof == byte.sizeof)
{
res = __atomic_compare_exchange_1(here, cast(void*) &ifThis, *cast(ubyte*) &writeThis,
false, MemoryOrder.seq, MemoryOrder.seq);
}
else static if (T.sizeof == short.sizeof)
{
res = __atomic_compare_exchange_2(here, cast(void*) &ifThis, *cast(ushort*) &writeThis,
false, MemoryOrder.seq, MemoryOrder.seq);
}
else static if (T.sizeof == int.sizeof)
{
res = __atomic_compare_exchange_4(here, cast(void*) &ifThis, *cast(uint*) &writeThis,
false, MemoryOrder.seq, MemoryOrder.seq);
}
else static if (T.sizeof == long.sizeof && GNU_Have_64Bit_Atomics)
{
res = __atomic_compare_exchange_8(here, cast(void*) &ifThis, *cast(ulong*) &writeThis,
false, MemoryOrder.seq, MemoryOrder.seq);
}
else static if (GNU_Have_LibAtomic)
{
res = __atomic_compare_exchange(T.sizeof, here, cast(void*) &ifThis, cast(void*) &writeThis,
MemoryOrder.seq, MemoryOrder.seq);
}
else
static assert(0, "Invalid template type specified.");
}
else
{
static if (T.sizeof == byte.sizeof)
alias U = byte;
else static if (T.sizeof == short.sizeof)
alias U = short;
else static if (T.sizeof == int.sizeof)
alias U = int;
else static if (T.sizeof == long.sizeof)
alias U = long;
else
static assert(0, "Invalid template type specified.");

getAtomicMutex.lock();
scope(exit) getAtomicMutex.unlock();

if (*cast(U*)here == *cast(U*)&ifThis)
{
*here = writeThis;
res = true;
}
else
res = false;
}

return res;
}

// Memory model types for the __atomic* builtins.
enum MemoryOrder
{
raw = 0,
acq = 2,
rel = 3,
seq = 5,
}

deprecated("Please use MemoryOrder instead.")
alias MemoryOrder msync;

TailShared!T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T val ) pure nothrow @nogc @trusted
if (!__traits(isFloating, T))
{
static assert(ms != MemoryOrder.rel, "Invalid MemoryOrder for atomicLoad");
static assert(__traits(isPOD, T), "argument to atomicLoad() must be POD");

static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
{
static if (T.sizeof == ubyte.sizeof)
{
ubyte value = __atomic_load_1(&val, ms);
return *cast(TailShared!T*) &value;
}
else static if (T.sizeof == ushort.sizeof)
{
ushort value = __atomic_load_2(&val, ms);
return *cast(TailShared!T*) &value;
}
else static if (T.sizeof == uint.sizeof)
{
uint value = __atomic_load_4(&val, ms);
return *cast(TailShared!T*) &value;
}
else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
{
ulong value = __atomic_load_8(&val, ms);
return *cast(TailShared!T*) &value;
}
else static if (GNU_Have_LibAtomic)
{
T value;
__atomic_load(T.sizeof, &val, cast(void*)&value, ms);
return *cast(TailShared!T*) &value;
}
else
static assert(0, "Invalid template type specified.");
}
else
{
getAtomicMutex.lock();
scope(exit) getAtomicMutex.unlock();
return *cast(TailShared!T*)&val;
}
}

void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V1)( ref shared T val, V1 newval ) pure nothrow @nogc @trusted
if ( __traits( compiles, { val = newval; } ) )
{
static assert(ms != MemoryOrder.acq, "Invalid MemoryOrder for atomicStore");
static assert(__traits(isPOD, T), "argument to atomicLoad() must be POD");

static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
{
static if (T.sizeof == ubyte.sizeof)
{
__atomic_store_1(&val, *cast(ubyte*) &newval, ms);
}
else static if (T.sizeof == ushort.sizeof)
{
__atomic_store_2(&val, *cast(ushort*) &newval, ms);
}
else static if (T.sizeof == uint.sizeof)
{
__atomic_store_4(&val, *cast(uint*) &newval, ms);
}
else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
{
__atomic_store_8(&val, *cast(ulong*) &newval, ms);
}
else static if (GNU_Have_LibAtomic)
{
__atomic_store(T.sizeof, &val, cast(void*)&newval, ms);
}
else
static assert(0, "Invalid template type specified.");
}
else
{
getAtomicMutex.lock();
val = newval;
getAtomicMutex.unlock();
}
}

void atomicFence() nothrow @nogc
{
static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
__atomic_thread_fence(MemoryOrder.seq);
else
{
getAtomicMutex.lock();
getAtomicMutex.unlock();
}
}

static if (!GNU_Have_Atomics && !GNU_Have_LibAtomic)
{
// Use system mutex for atomics, faking the purity of the functions so
// that they can be used in pure/nothrow/@safe code.
extern (C) private pure @trusted @nogc nothrow
{
static if (GNU_Thread_Model == ThreadModel.Posix)
{
import core.sys.posix.pthread;
alias atomicMutexHandle = pthread_mutex_t;

pragma(mangle, "pthread_mutex_init") int fakePureMutexInit(pthread_mutex_t*, pthread_mutexattr_t*);
pragma(mangle, "pthread_mutex_lock") int fakePureMutexLock(pthread_mutex_t*);
pragma(mangle, "pthread_mutex_unlock") int fakePureMutexUnlock(pthread_mutex_t*);
}
else static if (GNU_Thread_Model == ThreadModel.Win32)
{
import core.sys.windows.winbase;
alias atomicMutexHandle = CRITICAL_SECTION;

pragma(mangle, "InitializeCriticalSection") int fakePureMutexInit(CRITICAL_SECTION*);
pragma(mangle, "EnterCriticalSection") void fakePureMutexLock(CRITICAL_SECTION*);
pragma(mangle, "LeaveCriticalSection") int fakePureMutexUnlock(CRITICAL_SECTION*);
}
else
{
alias atomicMutexHandle = int;
}
}

// Implements lock/unlock operations.
private struct AtomicMutex
{
int lock() pure @trusted @nogc nothrow
{
static if (GNU_Thread_Model == ThreadModel.Posix)
{
if (!_inited)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure if it is critical but this looks a bit dangerous if initialization is requested from two threads concurrently.
What platforms match the condition (!GNU_Have_Atomics && !GNU_Have_LibAtomic)? I guess Windows will always have atomics.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the only way that could happen is if a C program calls rt_init in two threads concurrently. Using pragma(crt_constructor) is another option, though I don't recall adding that feature to the dmd-cxx branch.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because there is an atomic operation somewhere called from rt_init? A comment to that respect would be nice in that case.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, I did write this at the 11th hour, so wasn't strictly thinking too clear about this. Also this was committed before @jpf91 added gcc.gthreads, so I guess we could remove some duplication here.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think something like this would be more succinct, and doesn't have the same pitfall.

static if (!GNU_Have_Atomics && !GNU_Have_LibAtomic)
{
    import gcc.gthread;

    // Internal static mutex reference.
    private __gthread_mutex_t* _getAtomicMutex() @trusted @nogc nothrow @property
    {
        __gshared static __gthread_mutex_t mutex = GTHREAD_MUTEX_INIT;
        return &mutex;
    }

    // Implements lock/unlock operations.
    private int _atomicMutexLock(__gthread_mutex_t* mutex) @trusted @nogc nothrow
    {
        return __gthread_mutex_lock(mutex);
    }

    private int _atomicMutexUnlock(__gthread_mutex_t* mutex) @trusted @nogc nothrow
    {
        return __gthread_mutex_unlock(mutex);
    }

    // Fake the purity of the functions so that they can be used in pure/nothrow/@safe code.
    pragma(mangle, _getAtomicMutex.mangleof)
    private __gthread_mutex_t* getAtomicMutex() pure @trusted @nogc nothrow @property;

    pragma(mangle, _atomicMutexLock.mangleof)
    private int atomicMutexLock(__gthread_mutex_t*) pure @trusted @nogc nothrow;

    pragma(mangle, _atomicMutexUnlock.mangleof)
    private int atomicMutexUnlock(__gthread_mutex_t*) pure @trusted @nogc nothrow;
}

Once __gthread_mutex_t and functions have been added to gcc.gthread, that is.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should work. However, I don't really like that we duplicate core.sync.mutex here. Is this only required because core.sync.mutex is not pure? If so, can we change it to be pure?

{
fakePureMutexInit(&_handle, null);
_inited = true;
}
return fakePureMutexLock(&_handle);
}
else
{
static if (GNU_Thread_Model == ThreadModel.Win32)
{
if (!_inited)
{
fakePureMutexInit(&_handle);
_inited = true;
}
fakePureMutexLock(&_handle);
}
return 0;
}
}

int unlock() pure @trusted @nogc nothrow
{
static if (GNU_Thread_Model == ThreadModel.Posix)
return fakePureMutexUnlock(&_handle);
else
{
static if (GNU_Thread_Model == ThreadModel.Win32)
fakePureMutexUnlock(&_handle);
return 0;
}
}

private:
atomicMutexHandle _handle;
bool _inited;
}

// Internal static mutex reference.
private AtomicMutex* _getAtomicMutex() @trusted @nogc nothrow
{
__gshared static AtomicMutex mutex;
return &mutex;
}

// Pure alias for _getAtomicMutex.
pragma(mangle, _getAtomicMutex.mangleof)
private AtomicMutex* getAtomicMutex() pure @trusted @nogc nothrow @property;
}
}

// This is an ABI adapter that works on all architectures. It type puns
// floats and doubles to ints and longs, atomically loads them, then puns
Expand Down