From a3ebde6936f925779bcac4f872e89b83ecc83419 Mon Sep 17 00:00:00 2001 From: Susan Hinrichs Date: Mon, 4 Feb 2019 21:33:14 +0000 Subject: [PATCH] Make mutex lock routines deal with null mutex and general clean up. --- iocore/eventsystem/I_Lock.h | 185 +++-------------------------- iocore/hostdb/HostDB.cc | 37 +++--- iocore/hostdb/P_HostDB.h | 2 +- iocore/hostdb/P_RefCountCache.h | 6 +- iocore/net/UnixNetAccept.cc | 10 +- proxy/ProxySession.cc | 27 ++--- proxy/http/HttpSM.cc | 44 ++----- proxy/http/HttpSessionManager.cc | 9 +- src/traffic_server/InkAPI.cc | 28 ++--- src/traffic_server/InkIOCoreAPI.cc | 23 ++-- 10 files changed, 98 insertions(+), 273 deletions(-) diff --git a/iocore/eventsystem/I_Lock.h b/iocore/eventsystem/I_Lock.h index 9664606e55e..00177253de0 100644 --- a/iocore/eventsystem/I_Lock.h +++ b/iocore/eventsystem/I_Lock.h @@ -242,7 +242,7 @@ Mutex_trylock( #ifdef DEBUG const SourceLocation &location, const char *ahandler, #endif - ProxyMutex *m, EThread *t) + Ptr &m, EThread *t) { ink_assert(t != nullptr); ink_assert(t == (EThread *)this_thread()); @@ -281,90 +281,12 @@ Mutex_trylock( return true; } -inline bool -Mutex_trylock( -#ifdef DEBUG - const SourceLocation &location, const char *ahandler, -#endif - Ptr &m, EThread *t) -{ - return Mutex_trylock( -#ifdef DEBUG - location, ahandler, -#endif - m.get(), t); -} - -inline bool -Mutex_trylock_spin( -#ifdef DEBUG - const SourceLocation &location, const char *ahandler, -#endif - ProxyMutex *m, EThread *t, int spincnt = 1) -{ - ink_assert(t != nullptr); - if (m->thread_holding != t) { - int locked; - do { - if ((locked = ink_mutex_try_acquire(&m->the_mutex))) { - break; - } - } while (--spincnt); - if (!locked) { -#ifdef DEBUG - lock_waiting(m->srcloc, m->handler); -#ifdef LOCK_CONTENTION_PROFILING - m->unsuccessful_nonblocking_acquires++; - m->nonblocking_acquires++; - m->total_acquires++; - m->print_lock_stats(0); -#endif // LOCK_CONTENTION_PROFILING -#endif // DEBUG - return false; - } - m->thread_holding = t; - ink_assert(m->thread_holding); -#ifdef DEBUG - m->srcloc = location; - m->handler = ahandler; - m->hold_time = Thread::get_hrtime(); -#ifdef MAX_LOCK_TAKEN - m->taken++; -#endif // MAX_LOCK_TAKEN -#endif // DEBUG - } -#ifdef DEBUG -#ifdef LOCK_CONTENTION_PROFILING - m->successful_nonblocking_acquires++; - m->nonblocking_acquires++; - m->total_acquires++; - m->print_lock_stats(0); -#endif // LOCK_CONTENTION_PROFILING -#endif // DEBUG - m->nthread_holding++; - return true; -} - -inline bool -Mutex_trylock_spin( -#ifdef DEBUG - const SourceLocation &location, const char *ahandler, -#endif - Ptr &m, EThread *t, int spincnt = 1) -{ - return Mutex_trylock_spin( -#ifdef DEBUG - location, ahandler, -#endif - m.get(), t, spincnt); -} - inline int Mutex_lock( #ifdef DEBUG const SourceLocation &location, const char *ahandler, #endif - ProxyMutex *m, EThread *t) + Ptr &m, EThread *t) { ink_assert(t != nullptr); if (m->thread_holding != t) { @@ -391,22 +313,8 @@ Mutex_lock( return true; } -inline int -Mutex_lock( -#ifdef DEBUG - const SourceLocation &location, const char *ahandler, -#endif - Ptr &m, EThread *t) -{ - return Mutex_lock( -#ifdef DEBUG - location, ahandler, -#endif - m.get(), t); -} - inline void -Mutex_unlock(ProxyMutex *m, EThread *t) +Mutex_unlock(Ptr &m, EThread *t) { if (m->nthread_holding) { ink_assert(t == m->thread_holding); @@ -429,12 +337,6 @@ Mutex_unlock(ProxyMutex *m, EThread *t) } } -inline void -Mutex_unlock(Ptr &m, EThread *t) -{ - Mutex_unlock(m.get(), t); -} - /** Scoped lock class for ProxyMutex */ class MutexLock @@ -444,20 +346,6 @@ class MutexLock bool locked_p; public: - MutexLock( -#ifdef DEBUG - const SourceLocation &location, const char *ahandler, -#endif // DEBUG - ProxyMutex *am, EThread *t) - : m(am), locked_p(true) - { - Mutex_lock( -#ifdef DEBUG - location, ahandler, -#endif // DEBUG - m.get(), t); - } - MutexLock( #ifdef DEBUG const SourceLocation &location, const char *ahandler, @@ -469,7 +357,7 @@ class MutexLock #ifdef DEBUG location, ahandler, #endif // DEBUG - m.get(), t); + m, t); } void @@ -493,20 +381,6 @@ class MutexTryLock bool lock_acquired; public: - MutexTryLock( -#ifdef DEBUG - const SourceLocation &location, const char *ahandler, -#endif // DEBUG - ProxyMutex *am, EThread *t) - : m(am) - { - lock_acquired = Mutex_trylock( -#ifdef DEBUG - location, ahandler, -#endif // DEBUG - m.get(), t); - } - MutexTryLock( #ifdef DEBUG const SourceLocation &location, const char *ahandler, @@ -514,45 +388,21 @@ class MutexTryLock Ptr &am, EThread *t) : m(am) { - lock_acquired = Mutex_trylock( -#ifdef DEBUG - location, ahandler, -#endif // DEBUG - m.get(), t); - } - - MutexTryLock( -#ifdef DEBUG - const SourceLocation &location, const char *ahandler, -#endif // DEBUG - ProxyMutex *am, EThread *t, int sp) - : m(am) - { - lock_acquired = Mutex_trylock_spin( + if (am) { + lock_acquired = Mutex_trylock( #ifdef DEBUG - location, ahandler, -#endif // DEBUG - m.get(), t, sp); - } - - MutexTryLock( -#ifdef DEBUG - const SourceLocation &location, const char *ahandler, -#endif // DEBUG - Ptr &am, EThread *t, int sp) - : m(am) - { - lock_acquired = Mutex_trylock_spin( -#ifdef DEBUG - location, ahandler, + location, ahandler, #endif // DEBUG - m.get(), t, sp); + m, t); + } else { + lock_acquired = true; + } } ~MutexTryLock() { - if (lock_acquired) { - Mutex_unlock(m.get(), m->thread_holding); + if (lock_acquired && m.get()) { + Mutex_unlock(m, m->thread_holding); } } @@ -561,16 +411,17 @@ class MutexTryLock void acquire(EThread *t) { - MUTEX_TAKE_LOCK(m.get(), t); lock_acquired = true; + if (m.get()) { + MUTEX_TAKE_LOCK(m, t); + } } void release() { - ink_assert(lock_acquired); // generate a warning because it shouldn't be done. - if (lock_acquired) { - Mutex_unlock(m.get(), m->thread_holding); + if (lock_acquired && m.get()) { + Mutex_unlock(m, m->thread_holding); } lock_acquired = false; } diff --git a/iocore/hostdb/HostDB.cc b/iocore/hostdb/HostDB.cc index 947b2c234b2..dd80c58ecc6 100644 --- a/iocore/hostdb/HostDB.cc +++ b/iocore/hostdb/HostDB.cc @@ -449,13 +449,13 @@ HostDBContinuation::init(HostDBHash const &the_hash, Options const &opt) void HostDBContinuation::refresh_hash() { - ProxyMutex *old_bucket_mutex = hostDB.refcountcache->lock_for_key(hash.hash.fold()); + Ptr old_bucket_mutex = hostDB.refcountcache->lock_for_key(hash.hash.fold()); // We're not pending DNS anymore. remove_trigger_pending_dns(); hash.refresh(); // Update the mutex if it's from the bucket. // Some call sites modify this after calling @c init so need to check. - if (mutex.get() == old_bucket_mutex) { + if (mutex == old_bucket_mutex) { mutex = hostDB.refcountcache->lock_for_key(hash.hash.fold()); } } @@ -533,7 +533,7 @@ db_mark_for(IpAddr const &ip) } Ptr -probe(ProxyMutex *mutex, HostDBHash const &hash, bool ignore_timeout) +probe(Ptr mutex, HostDBHash const &hash, bool ignore_timeout) { // If hostdb is disabled, don't return anything if (!hostdb_enable) { @@ -609,8 +609,8 @@ HostDBProcessor::getby(Continuation *cont, const char *hostname, int len, sockad HostResStyle host_res_style, int dns_lookup_timeout) { HostDBHash hash; - EThread *thread = this_ethread(); - ProxyMutex *mutex = thread->mutex.get(); + EThread *thread = this_ethread(); + Ptr mutex = thread->mutex; ip_text_buffer ipb; HOSTDB_INCREMENT_DYN_STAT(hostdb_total_lookups_stat); @@ -640,7 +640,7 @@ HostDBProcessor::getby(Continuation *cont, const char *hostname, int len, sockad // find the partition lock // // TODO: Could we reuse the "mutex" above safely? I think so but not sure. - ProxyMutex *bmutex = hostDB.refcountcache->lock_for_key(hash.hash.fold()); + Ptr bmutex = hostDB.refcountcache->lock_for_key(hash.hash.fold()); MUTEX_TRY_LOCK(lock, bmutex, thread); MUTEX_TRY_LOCK(lock2, cont->mutex, thread); @@ -763,7 +763,7 @@ HostDBProcessor::getSRVbyname_imm(Continuation *cont, process_srv_info_pfn proce // Attempt to find the result in-line, for level 1 hits if (!force_dns) { // find the partition lock - ProxyMutex *bucket_mutex = hostDB.refcountcache->lock_for_key(hash.hash.fold()); + Ptr bucket_mutex = hostDB.refcountcache->lock_for_key(hash.hash.fold()); MUTEX_TRY_LOCK(lock, bucket_mutex, thread); // If we can get the lock and a level 1 probe succeeds, return @@ -836,7 +836,7 @@ HostDBProcessor::getbyname_imm(Continuation *cont, process_hostdb_info_pfn proce do { loop = false; // loop only on explicit set for retry // find the partition lock - ProxyMutex *bucket_mutex = hostDB.refcountcache->lock_for_key(hash.hash.fold()); + Ptr bucket_mutex = hostDB.refcountcache->lock_for_key(hash.hash.fold()); SCOPED_MUTEX_LOCK(lock, bucket_mutex, thread); // do a level 1 probe for immediate result. Ptr r = probe(bucket_mutex, hash, false); @@ -952,8 +952,8 @@ HostDBProcessor::setby(const char *hostname, int len, sockaddr const *ip, HostDB // Attempt to find the result in-line, for level 1 hits - ProxyMutex *mutex = hostDB.refcountcache->lock_for_key(hash.hash.fold()); - EThread *thread = this_ethread(); + Ptr mutex = hostDB.refcountcache->lock_for_key(hash.hash.fold()); + EThread *thread = this_ethread(); MUTEX_TRY_LOCK(lock, mutex, thread); if (lock.is_locked()) { @@ -999,7 +999,7 @@ HostDBProcessor::setby_srv(const char *hostname, int len, const char *target, Ho int HostDBContinuation::setbyEvent(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) { - Ptr r = probe(mutex.get(), hash, false); + Ptr r = probe(mutex, hash, false); if (r) { do_setby(r.get(), &app, hash.host_name, hash.ip, is_srv()); @@ -1056,8 +1056,11 @@ int HostDBContinuation::removeEvent(int /* event ATS_UNUSED */, Event *e) { Continuation *cont = action.continuation; - - MUTEX_TRY_LOCK(lock, cont ? cont->mutex.get() : (ProxyMutex *)nullptr, e->ethread); + Ptr proxy_mutex; + if (cont) { + proxy_mutex = cont->mutex; + } + MUTEX_TRY_LOCK(lock, proxy_mutex, e->ethread); if (!lock.is_locked()) { e->schedule_in(HOST_DB_RETRY_PERIOD); return EVENT_CONT; @@ -1068,7 +1071,7 @@ HostDBContinuation::removeEvent(int /* event ATS_UNUSED */, Event *e) cont->handleEvent(EVENT_HOST_DB_IP_REMOVED, (void *)nullptr); } } else { - Ptr r = probe(mutex.get(), hash, false); + Ptr r = probe(mutex, hash, false); bool res = remove_round_robin(r.get(), hash.host_name, hash.ip); if (cont) { cont->handleEvent(EVENT_HOST_DB_IP_REMOVED, res ? static_cast(&hash.ip) : static_cast(nullptr)); @@ -1277,7 +1280,7 @@ HostDBContinuation::dnsEvent(int event, HostEnt *e) ttl = failed ? 0 : e->ttl / 60; int ttl_seconds = failed ? 0 : e->ttl; // ebalsa: moving to second accuracy - Ptr old_r = probe(mutex.get(), hash, false); + Ptr old_r = probe(mutex, hash, false); // If the DNS lookup failed with NXDOMAIN, remove the old record if (e && e->isNameError() && old_r) { hostDB.refcountcache->erase(old_r->key); @@ -1535,7 +1538,7 @@ HostDBContinuation::iterateEvent(int event, Event *e) // let's iterate through another record and then reschedule ourself. if (current_iterate_pos < hostDB.refcountcache->partition_count()) { // TODO: configurable number at a time? - ProxyMutex *bucket_mutex = hostDB.refcountcache->get_partition(current_iterate_pos).lock.get(); + Ptr bucket_mutex = hostDB.refcountcache->get_partition(current_iterate_pos).lock; MUTEX_TRY_LOCK(lock_bucket, bucket_mutex, t); if (!lock_bucket.is_locked()) { // we couldn't get the bucket lock, let's just reschedule and try later. @@ -1612,7 +1615,7 @@ HostDBContinuation::probeEvent(int /* event ATS_UNUSED */, Event *e) if (!force_dns) { // Do the probe // - Ptr r = probe(mutex.get(), hash, false); + Ptr r = probe(mutex, hash, false); if (r) { HOSTDB_INCREMENT_DYN_STAT(hostdb_total_hits_stat); diff --git a/iocore/hostdb/P_HostDB.h b/iocore/hostdb/P_HostDB.h index c29d3c613e7..43c5a5ba4bc 100644 --- a/iocore/hostdb/P_HostDB.h +++ b/iocore/hostdb/P_HostDB.h @@ -47,6 +47,6 @@ static constexpr ts::ModuleVersion HOSTDB_MODULE_INTERNAL_VERSION{HOSTDB_MODULE_PUBLIC_VERSION, ts::ModuleVersion::PRIVATE}; -Ptr probe(ProxyMutex *mutex, CryptoHash const &hash, bool ignore_timeout); +Ptr probe(Ptr mutex, CryptoHash const &hash, bool ignore_timeout); void make_crypto_hash(CryptoHash &hash, const char *hostname, int len, int port, const char *pDNSServers, HostDBMark mark); diff --git a/iocore/hostdb/P_RefCountCache.h b/iocore/hostdb/P_RefCountCache.h index 8655d345607..9ca3764e807 100644 --- a/iocore/hostdb/P_RefCountCache.h +++ b/iocore/hostdb/P_RefCountCache.h @@ -408,7 +408,7 @@ template class RefCountCache // Some methods to get some internal state int partition_for_key(uint64_t key); - ProxyMutex *lock_for_key(uint64_t key); + Ptr lock_for_key(uint64_t key); size_t partition_count() const; RefCountCachePartition &get_partition(int pnum); size_t count() const; @@ -510,10 +510,10 @@ RefCountCache::get_header() } template -ProxyMutex * +Ptr RefCountCache::lock_for_key(uint64_t key) { - return this->partitions[this->partition_for_key(key)]->lock.get(); + return this->partitions[this->partition_for_key(key)]->lock; } template diff --git a/iocore/net/UnixNetAccept.cc b/iocore/net/UnixNetAccept.cc index b730c86ca77..0164d64b4b3 100644 --- a/iocore/net/UnixNetAccept.cc +++ b/iocore/net/UnixNetAccept.cc @@ -78,7 +78,7 @@ net_accept(NetAccept *na, void *ep, bool blockable) Connection con; if (!blockable) { - if (!MUTEX_TAKE_TRY_LOCK(na->action_->mutex.get(), e->ethread)) { + if (!MUTEX_TAKE_TRY_LOCK(na->action_->mutex, e->ethread)) { return 0; } } @@ -149,7 +149,7 @@ net_accept(NetAccept *na, void *ep, bool blockable) Ldone: if (!blockable) { - MUTEX_UNTAKE_LOCK(na->action_->mutex.get(), e->ethread); + MUTEX_UNTAKE_LOCK(na->action_->mutex, e->ethread); } return count; } @@ -379,12 +379,12 @@ NetAccept::acceptEvent(int event, void *ep) (void)event; Event *e = (Event *)ep; // PollDescriptor *pd = get_PollDescriptor(e->ethread); - ProxyMutex *m = nullptr; + Ptr m; if (action_->mutex) { - m = action_->mutex.get(); + m = action_->mutex; } else { - m = mutex.get(); + m = mutex; } MUTEX_TRY_LOCK(lock, m, e->ethread); diff --git a/proxy/ProxySession.cc b/proxy/ProxySession.cc index be78f6bc4ab..6afb04f65ea 100644 --- a/proxy/ProxySession.cc +++ b/proxy/ProxySession.cc @@ -119,29 +119,20 @@ ProxySession::state_api_callout(int event, void *data) } if (this->api_current) { - bool plugin_lock = false; - APIHook *hook = this->api_current; - Ptr plugin_mutex; - - if (hook->m_cont->mutex) { - plugin_mutex = hook->m_cont->mutex; - plugin_lock = MUTEX_TAKE_TRY_LOCK(hook->m_cont->mutex, mutex->thread_holding); - if (!plugin_lock) { - SET_HANDLER(&ProxySession::state_api_callout); - if (!schedule_event) { // Don't bother to schedule is there is already one out. - schedule_event = mutex->thread_holding->schedule_in(this, HRTIME_MSECONDS(10)); - } - return 0; + APIHook *hook = this->api_current; + + MUTEX_TRY_LOCK(lock, hook->m_cont->mutex, mutex->thread_holding); + // Have a mutex but did't get the lock, reschedule + if (!lock.is_locked()) { + SET_HANDLER(&ProxySession::state_api_callout); + if (!schedule_event) { // Don't bother to schedule is there is already one out. + schedule_event = mutex->thread_holding->schedule_in(this, HRTIME_MSECONDS(10)); } + return 0; } this->api_current = this->api_current->next(); hook->invoke(eventmap[this->api_hookid], this); - - if (plugin_lock) { - Mutex_unlock(plugin_mutex, this_ethread()); - } - return 0; } } diff --git a/proxy/http/HttpSM.cc b/proxy/http/HttpSM.cc index 57caf2ba24f..e9233c1d36e 100644 --- a/proxy/http/HttpSM.cc +++ b/proxy/http/HttpSM.cc @@ -1428,34 +1428,19 @@ plugins required to work with sni_routing. callout_state = HTTP_API_IN_CALLOUT; } - /* The MUTEX_TRY_LOCK macro was changed so - that it can't handle NULL mutex'es. The plugins - can use null mutexes so we have to do this manually. - We need to take a smart pointer to the mutex since - the plugin could release it's mutex while we're on - the callout - */ - bool plugin_lock; - Ptr plugin_mutex; - if (cur_hook->m_cont->mutex) { - plugin_mutex = cur_hook->m_cont->mutex; - plugin_lock = MUTEX_TAKE_TRY_LOCK(cur_hook->m_cont->mutex, mutex->thread_holding); - - if (!plugin_lock) { - api_timer = -Thread::get_hrtime_updated(); - HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_api_callout); - ink_assert(pending_action == nullptr); - pending_action = mutex->thread_holding->schedule_in(this, HRTIME_MSECONDS(10)); - // Should @a callout_state be reset back to HTTP_API_NO_CALLOUT here? Because the default - // handler has been changed the value isn't important to the rest of the state machine - // but not resetting means there is no way to reliably detect re-entrance to this state with an - // outstanding callout. - return 0; - } - } else { - plugin_lock = false; + MUTEX_TRY_LOCK(lock, cur_hook->m_cont->mutex, mutex->thread_holding); + // Have a mutex but didn't get the lock, reschedule + if (!lock.is_locked()) { + api_timer = -Thread::get_hrtime_updated(); + HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_api_callout); + ink_assert(pending_action == nullptr); + pending_action = mutex->thread_holding->schedule_in(this, HRTIME_MSECONDS(10)); + // Should @a callout_state be reset back to HTTP_API_NO_CALLOUT here? Because the default + // handler has been changed the value isn't important to the rest of the state machine + // but not resetting means there is no way to reliably detect re-entrance to this state with an + // outstanding callout. + return 0; } - SMDebug("http", "[%" PRId64 "] calling plugin on hook %s at hook %p", sm_id, HttpDebugNames::get_api_hook_name(cur_hook_id), cur_hook); @@ -1473,11 +1458,6 @@ plugins required to work with sni_routing. // tracking a non-complete callout from a chain so just let it ride. It will get cleaned // up in state_api_callback when the plugin re-enables this transaction. } - - if (plugin_lock) { - Mutex_unlock(plugin_mutex, mutex->thread_holding); - } - return 0; } } diff --git a/proxy/http/HttpSessionManager.cc b/proxy/http/HttpSessionManager.cc index 399f0354936..33033b2d48d 100644 --- a/proxy/http/HttpSessionManager.cc +++ b/proxy/http/HttpSessionManager.cc @@ -320,10 +320,11 @@ HttpSessionManager::acquire_session(Continuation * /* cont ATS_UNUSED */, sockad // client session { // Now check to see if we have a connection in our shared connection pool - EThread *ethread = this_ethread(); - ProxyMutex *pool_mutex = (TS_SERVER_SESSION_SHARING_POOL_THREAD == sm->t_state.http_config_param->server_session_sharing_pool) ? - ethread->server_session_pool->mutex.get() : - m_g_pool->mutex.get(); + EThread *ethread = this_ethread(); + Ptr pool_mutex = + (TS_SERVER_SESSION_SHARING_POOL_THREAD == sm->t_state.http_config_param->server_session_sharing_pool) ? + ethread->server_session_pool->mutex : + m_g_pool->mutex; MUTEX_TRY_LOCK(lock, pool_mutex, ethread); if (lock.is_locked()) { if (TS_SERVER_SESSION_SHARING_POOL_THREAD == sm->t_state.http_config_param->server_session_sharing_pool) { diff --git a/src/traffic_server/InkAPI.cc b/src/traffic_server/InkAPI.cc index c0682c239c5..cbe88ce9397 100644 --- a/src/traffic_server/InkAPI.cc +++ b/src/traffic_server/InkAPI.cc @@ -1295,16 +1295,12 @@ APIHook::invoke(int event, void *edata) ink_assert(!"not reached"); } } - if (m_cont->mutex != nullptr) { - MUTEX_TRY_LOCK(lock, m_cont->mutex, this_ethread()); - if (!lock.is_locked()) { - // If we cannot get the lock, the caller needs to restructure to handle rescheduling - ink_release_assert(0); - } - return m_cont->handleEvent(event, edata); - } else { - return m_cont->handleEvent(event, edata); + MUTEX_TRY_LOCK(lock, m_cont->mutex, this_ethread()); + if (!lock.is_locked()) { + // If we cannot get the lock, the caller needs to restructure to handle rescheduling + ink_release_assert(0); } + return m_cont->handleEvent(event, edata); } APIHook * @@ -4682,16 +4678,12 @@ int TSContCall(TSCont contp, TSEvent event, void *edata) { Continuation *c = (Continuation *)contp; - if (c->mutex != nullptr) { - MUTEX_TRY_LOCK(lock, c->mutex, this_ethread()); - if (!lock.is_locked()) { - // If we cannot get the lock, the caller needs to restructure to handle rescheduling - ink_release_assert(0); - } - return c->handleEvent((int)event, edata); - } else { - return c->handleEvent((int)event, edata); + MUTEX_TRY_LOCK(lock, c->mutex, this_ethread()); + if (!lock.is_locked()) { + // If we cannot get the lock, the caller needs to restructure to handle rescheduling + ink_release_assert(0); } + return c->handleEvent((int)event, edata); } TSMutex diff --git a/src/traffic_server/InkIOCoreAPI.cc b/src/traffic_server/InkIOCoreAPI.cc index 221aa368ee5..d8c3fda4452 100644 --- a/src/traffic_server/InkIOCoreAPI.cc +++ b/src/traffic_server/InkIOCoreAPI.cc @@ -54,7 +54,7 @@ sdk_sanity_check_mutex(TSMutex mutex) return TS_ERROR; } - ProxyMutex *mutexp = (ProxyMutex *)mutex; + ProxyMutex *mutexp = reinterpret_cast(mutex); if (mutexp->refcount() < 0) { return TS_ERROR; @@ -242,6 +242,7 @@ TSMutex TSMutexCreate() { ProxyMutex *mutexp = new_ProxyMutex(); + mutexp->refcount_inc(); // TODO: Remove this when allocations can never fail. sdk_assert(sdk_sanity_check_mutex((TSMutex)mutexp) == TS_SUCCESS); @@ -253,9 +254,12 @@ void TSMutexDestroy(TSMutex m) { sdk_assert(sdk_sanity_check_mutex(m) == TS_SUCCESS); - ink_release_assert(((ProxyMutex *)m)->refcount() == 0); - - ((ProxyMutex *)m)->free(); + ProxyMutex *mutexp = reinterpret_cast(m); + // Decrement the refcount added in TSMutexCreate. Delete if this + // was the last ref count + if (mutexp && mutexp->refcount_dec() == 0) { + mutexp->free(); + } } /* The following two APIs are for Into work, actually, APIs of Mutex @@ -292,21 +296,24 @@ void TSMutexLock(TSMutex mutexp) { sdk_assert(sdk_sanity_check_mutex(mutexp) == TS_SUCCESS); - MUTEX_TAKE_LOCK((ProxyMutex *)mutexp, this_ethread()); + Ptr proxy_mutex(reinterpret_cast(mutexp)); + MUTEX_TAKE_LOCK(proxy_mutex, this_ethread()); } TSReturnCode TSMutexLockTry(TSMutex mutexp) { sdk_assert(sdk_sanity_check_mutex(mutexp) == TS_SUCCESS); - return (MUTEX_TAKE_TRY_LOCK((ProxyMutex *)mutexp, this_ethread()) ? TS_SUCCESS : TS_ERROR); + Ptr proxy_mutex(reinterpret_cast(mutexp)); + return (MUTEX_TAKE_TRY_LOCK(proxy_mutex, this_ethread()) ? TS_SUCCESS : TS_ERROR); } void TSMutexUnlock(TSMutex mutexp) { sdk_assert(sdk_sanity_check_mutex(mutexp) == TS_SUCCESS); - MUTEX_UNTAKE_LOCK((ProxyMutex *)mutexp, this_ethread()); + Ptr proxy_mutex(reinterpret_cast(mutexp)); + MUTEX_UNTAKE_LOCK(proxy_mutex, this_ethread()); } /* VIOs */ @@ -409,7 +416,7 @@ TSVIOMutexGet(TSVIO viop) sdk_assert(sdk_sanity_check_iocore_structure(viop) == TS_SUCCESS); VIO *vio = (VIO *)viop; - return (TSMutex)(vio->mutex.get()); + return reinterpret_cast(vio->mutex.get()); } /* High Resolution Time */