66#include "pycore_code.h" // CO_FAST_FREE
77#include "pycore_dict.h" // _PyDict_KeysSize()
88#include "pycore_frame.h" // _PyInterpreterFrame
9+ #include "pycore_lock.h" // _PySeqLock_*
910#include "pycore_long.h" // _PyLong_IsNegative()
1011#include "pycore_memoryobject.h" // _PyMemoryView_FromBufferProc()
1112#include "pycore_modsupport.h" // _PyArg_NoKwnames()
@@ -4895,43 +4896,21 @@ static void
48954896update_cache_gil_disabled (struct type_cache_entry * entry , PyObject * name ,
48964897 unsigned int version_tag , PyObject * value )
48974898{
4898- // Similar to linux seqlock: https://en.wikipedia.org/wiki/Seqlock
4899- // We use a sequence number to lock the writer, an even sequence means we're unlocked, an odd
4900- // sequence means we're locked.
4901- // Differs a little bit in that we use CAS on sequence as the lock, instead of a seperate spin lock.
4902- // If our writer detects that another thread has already done the same write we'll just bail
4903- // and restore the previous sequence number without doing any updates.
4904-
4905- // lock the entry by setting by moving to an odd sequence number
4906- int prev = entry -> sequence ;
4907- while (1 ) {
4908- if (TYPE_CACHE_IS_UPDATING (prev )) {
4909- // Someone else is currently updating the cache
4910- _Py_yield ();
4911- prev = _Py_atomic_load_int32_relaxed (& entry -> sequence );
4912- } else if (_Py_atomic_compare_exchange_int32 (& entry -> sequence , & prev , prev + 1 )) {
4913- // We've locked the cache
4914- break ;
4915- } else {
4916- _Py_yield ();
4917- }
4918- }
4899+ _PySeqLock_LockWrite (& entry -> sequence );
49194900
49204901 // update the entry
49214902 if (entry -> name == name &&
49224903 entry -> value == value &&
49234904 entry -> version == version_tag ) {
4924- // We reaced with another update, bail and restore previous sequence.
4925- _Py_atomic_exchange_int32 (& entry -> sequence , prev );
4905+ // We raced with another update, bail and restore previous sequence.
4906+ _PySeqLock_AbandonWrite (& entry -> sequence );
49264907 return ;
49274908 }
49284909
49294910 update_cache (entry , name , version_tag , value );
49304911
49314912 // Then update sequence to the next valid value
4932- int new_sequence = prev + 2 ;
4933- assert (!TYPE_CACHE_IS_UPDATING (new_sequence ));
4934- _Py_atomic_exchange_int32 (& entry -> sequence , new_sequence );
4913+ _PySeqLock_UnlockWrite (& entry -> sequence );
49354914}
49364915
49374916#endif
@@ -4941,10 +4920,8 @@ void _PyTypes_AfterFork() {
49414920 struct type_cache * cache = get_type_cache ();
49424921 for (int i = 0 ; i < 1 << MCACHE_SIZE_EXP ; i ++ ) {
49434922 struct type_cache_entry * entry = & cache -> hashtable [i ];
4944- int sequence = _Py_atomic_load_int_acquire (& entry -> sequence );
4945- if (TYPE_CACHE_IS_UPDATING (sequence )) {
4923+ if (_PySeqLock_AfterFork (& entry -> sequence )) {
49464924 // Entry was in the process of updating while forking, clear it...
4947- entry -> sequence = 0 ;
49484925 entry -> value = NULL ;
49494926 entry -> name = NULL ;
49504927 entry -> version = 0 ;
@@ -4968,27 +4945,22 @@ _PyType_Lookup(PyTypeObject *type, PyObject *name)
49684945#ifdef Py_GIL_DISABLED
49694946 // synchronize-with other writing threads by doing an acquire load on the sequence
49704947 while (1 ) {
4971- int sequence = _Py_atomic_load_int_acquire (& entry -> sequence );
4972- if (!TYPE_CACHE_IS_UPDATING (sequence )) {
4973- if (_Py_atomic_load_uint32_relaxed (& entry -> version ) == type -> tp_version_tag &&
4974- _Py_atomic_load_ptr_relaxed (& entry -> name ) == name ) {
4975- assert (_PyType_HasFeature (type , Py_TPFLAGS_VALID_VERSION_TAG ));
4976- OBJECT_STAT_INC_COND (type_cache_hits , !is_dunder_name (name ));
4977- OBJECT_STAT_INC_COND (type_cache_dunder_hits , is_dunder_name (name ));
4978- PyObject * value = _Py_atomic_load_ptr_relaxed (& entry -> value );
4979-
4980- // Synchronize again and validate that the entry hasn't been updated
4981- // while we were readying the values.
4982- if (_Py_atomic_load_int_acquire (& entry -> sequence ) == sequence ) {
4983- return value ;
4984- }
4985- } else {
4986- // Cache miss
4987- break ;
4988- }
4948+ int sequence = _PySeqLock_BeginRead (& entry -> sequence );
4949+ if (_Py_atomic_load_uint32_relaxed (& entry -> version ) == type -> tp_version_tag &&
4950+ _Py_atomic_load_ptr_relaxed (& entry -> name ) == name ) {
4951+ assert (_PyType_HasFeature (type , Py_TPFLAGS_VALID_VERSION_TAG ));
4952+ OBJECT_STAT_INC_COND (type_cache_hits , !is_dunder_name (name ));
4953+ OBJECT_STAT_INC_COND (type_cache_dunder_hits , is_dunder_name (name ));
4954+ PyObject * value = _Py_atomic_load_ptr_relaxed (& entry -> value );
4955+
4956+ // If the sequence is still valid then we're done
4957+ if (_PySeqLock_EndRead (& entry -> sequence , sequence )) {
4958+ return value ;
4959+ }
4960+ } else {
4961+ // cache miss
4962+ break ;
49894963 }
4990- // We are in progress of updating the cache, retry
4991- _Py_yield ();
49924964 }
49934965#else
49944966 if (entry -> version == type -> tp_version_tag &&
0 commit comments