michael@0: /* michael@0: * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson michael@0: * michael@0: * Redistribution and use in source and binary forms, with or without michael@0: * modification, are permitted provided that the following conditions michael@0: * are met: michael@0: * 1. Redistributions of source code must retain the above copyright michael@0: * notice, this list of conditions and the following disclaimer. michael@0: * 2. Redistributions in binary form must reproduce the above copyright michael@0: * notice, this list of conditions and the following disclaimer in the michael@0: * documentation and/or other materials provided with the distribution. michael@0: * 3. The name of the author may not be used to endorse or promote products michael@0: * derived from this software without specific prior written permission. michael@0: * michael@0: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR michael@0: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES michael@0: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. michael@0: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, michael@0: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT michael@0: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, michael@0: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY michael@0: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT michael@0: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF michael@0: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. michael@0: */ michael@0: michael@0: #include "event2/event-config.h" michael@0: michael@0: #ifndef _EVENT_DISABLE_THREAD_SUPPORT michael@0: michael@0: #include "event2/thread.h" michael@0: michael@0: #include michael@0: #include michael@0: michael@0: #include "log-internal.h" michael@0: #include "mm-internal.h" michael@0: #include "util-internal.h" michael@0: #include "evthread-internal.h" michael@0: michael@0: #ifdef EVTHREAD_EXPOSE_STRUCTS michael@0: #define GLOBAL michael@0: #else michael@0: #define GLOBAL static michael@0: #endif michael@0: michael@0: /* globals */ michael@0: GLOBAL int _evthread_lock_debugging_enabled = 0; michael@0: GLOBAL struct evthread_lock_callbacks _evthread_lock_fns = { michael@0: 0, 0, NULL, NULL, NULL, NULL michael@0: }; michael@0: GLOBAL unsigned long (*_evthread_id_fn)(void) = NULL; michael@0: GLOBAL struct evthread_condition_callbacks _evthread_cond_fns = { michael@0: 0, NULL, NULL, NULL, NULL michael@0: }; michael@0: michael@0: /* Used for debugging */ michael@0: static struct evthread_lock_callbacks _original_lock_fns = { michael@0: 0, 0, NULL, NULL, NULL, NULL michael@0: }; michael@0: static struct evthread_condition_callbacks _original_cond_fns = { michael@0: 0, NULL, NULL, NULL, NULL michael@0: }; michael@0: michael@0: void michael@0: evthread_set_id_callback(unsigned long (*id_fn)(void)) michael@0: { michael@0: _evthread_id_fn = id_fn; michael@0: } michael@0: michael@0: int michael@0: evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs) michael@0: { michael@0: struct evthread_lock_callbacks *target = michael@0: _evthread_lock_debugging_enabled michael@0: ? &_original_lock_fns : &_evthread_lock_fns; michael@0: michael@0: if (!cbs) { michael@0: if (target->alloc) michael@0: event_warnx("Trying to disable lock functions after " michael@0: "they have been set up will probaby not work."); michael@0: memset(target, 0, sizeof(_evthread_lock_fns)); michael@0: return 0; michael@0: } michael@0: if (target->alloc) { michael@0: /* Uh oh; we already had locking callbacks set up.*/ michael@0: if (target->lock_api_version == cbs->lock_api_version && michael@0: target->supported_locktypes == cbs->supported_locktypes && michael@0: target->alloc == cbs->alloc && michael@0: target->free == cbs->free && michael@0: target->lock == cbs->lock && michael@0: target->unlock == cbs->unlock) { michael@0: /* no change -- allow this. */ michael@0: return 0; michael@0: } michael@0: event_warnx("Can't change lock callbacks once they have been " michael@0: "initialized."); michael@0: return -1; michael@0: } michael@0: if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) { michael@0: memcpy(target, cbs, sizeof(_evthread_lock_fns)); michael@0: return event_global_setup_locks_(1); michael@0: } else { michael@0: return -1; michael@0: } michael@0: } michael@0: michael@0: int michael@0: evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs) michael@0: { michael@0: struct evthread_condition_callbacks *target = michael@0: _evthread_lock_debugging_enabled michael@0: ? &_original_cond_fns : &_evthread_cond_fns; michael@0: michael@0: if (!cbs) { michael@0: if (target->alloc_condition) michael@0: event_warnx("Trying to disable condition functions " michael@0: "after they have been set up will probaby not " michael@0: "work."); michael@0: memset(target, 0, sizeof(_evthread_cond_fns)); michael@0: return 0; michael@0: } michael@0: if (target->alloc_condition) { michael@0: /* Uh oh; we already had condition callbacks set up.*/ michael@0: if (target->condition_api_version == cbs->condition_api_version && michael@0: target->alloc_condition == cbs->alloc_condition && michael@0: target->free_condition == cbs->free_condition && michael@0: target->signal_condition == cbs->signal_condition && michael@0: target->wait_condition == cbs->wait_condition) { michael@0: /* no change -- allow this. */ michael@0: return 0; michael@0: } michael@0: event_warnx("Can't change condition callbacks once they " michael@0: "have been initialized."); michael@0: return -1; michael@0: } michael@0: if (cbs->alloc_condition && cbs->free_condition && michael@0: cbs->signal_condition && cbs->wait_condition) { michael@0: memcpy(target, cbs, sizeof(_evthread_cond_fns)); michael@0: } michael@0: if (_evthread_lock_debugging_enabled) { michael@0: _evthread_cond_fns.alloc_condition = cbs->alloc_condition; michael@0: _evthread_cond_fns.free_condition = cbs->free_condition; michael@0: _evthread_cond_fns.signal_condition = cbs->signal_condition; michael@0: } michael@0: return 0; michael@0: } michael@0: michael@0: struct debug_lock { michael@0: unsigned locktype; michael@0: unsigned long held_by; michael@0: /* XXXX if we ever use read-write locks, we will need a separate michael@0: * lock to protect count. */ michael@0: int count; michael@0: void *lock; michael@0: }; michael@0: michael@0: static void * michael@0: debug_lock_alloc(unsigned locktype) michael@0: { michael@0: struct debug_lock *result = mm_malloc(sizeof(struct debug_lock)); michael@0: if (!result) michael@0: return NULL; michael@0: if (_original_lock_fns.alloc) { michael@0: if (!(result->lock = _original_lock_fns.alloc( michael@0: locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) { michael@0: mm_free(result); michael@0: return NULL; michael@0: } michael@0: } else { michael@0: result->lock = NULL; michael@0: } michael@0: result->locktype = locktype; michael@0: result->count = 0; michael@0: result->held_by = 0; michael@0: return result; michael@0: } michael@0: michael@0: static void michael@0: debug_lock_free(void *lock_, unsigned locktype) michael@0: { michael@0: struct debug_lock *lock = lock_; michael@0: EVUTIL_ASSERT(lock->count == 0); michael@0: EVUTIL_ASSERT(locktype == lock->locktype); michael@0: if (_original_lock_fns.free) { michael@0: _original_lock_fns.free(lock->lock, michael@0: lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE); michael@0: } michael@0: lock->lock = NULL; michael@0: lock->count = -100; michael@0: mm_free(lock); michael@0: } michael@0: michael@0: static void michael@0: evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock) michael@0: { michael@0: ++lock->count; michael@0: if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) michael@0: EVUTIL_ASSERT(lock->count == 1); michael@0: if (_evthread_id_fn) { michael@0: unsigned long me; michael@0: me = _evthread_id_fn(); michael@0: if (lock->count > 1) michael@0: EVUTIL_ASSERT(lock->held_by == me); michael@0: lock->held_by = me; michael@0: } michael@0: } michael@0: michael@0: static int michael@0: debug_lock_lock(unsigned mode, void *lock_) michael@0: { michael@0: struct debug_lock *lock = lock_; michael@0: int res = 0; michael@0: if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE) michael@0: EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE)); michael@0: else michael@0: EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0); michael@0: if (_original_lock_fns.lock) michael@0: res = _original_lock_fns.lock(mode, lock->lock); michael@0: if (!res) { michael@0: evthread_debug_lock_mark_locked(mode, lock); michael@0: } michael@0: return res; michael@0: } michael@0: michael@0: static void michael@0: evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock) michael@0: { michael@0: if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE) michael@0: EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE)); michael@0: else michael@0: EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0); michael@0: if (_evthread_id_fn) { michael@0: EVUTIL_ASSERT(lock->held_by == _evthread_id_fn()); michael@0: if (lock->count == 1) michael@0: lock->held_by = 0; michael@0: } michael@0: --lock->count; michael@0: EVUTIL_ASSERT(lock->count >= 0); michael@0: } michael@0: michael@0: static int michael@0: debug_lock_unlock(unsigned mode, void *lock_) michael@0: { michael@0: struct debug_lock *lock = lock_; michael@0: int res = 0; michael@0: evthread_debug_lock_mark_unlocked(mode, lock); michael@0: if (_original_lock_fns.unlock) michael@0: res = _original_lock_fns.unlock(mode, lock->lock); michael@0: return res; michael@0: } michael@0: michael@0: static int michael@0: debug_cond_wait(void *_cond, void *_lock, const struct timeval *tv) michael@0: { michael@0: int r; michael@0: struct debug_lock *lock = _lock; michael@0: EVUTIL_ASSERT(lock); michael@0: EVLOCK_ASSERT_LOCKED(_lock); michael@0: evthread_debug_lock_mark_unlocked(0, lock); michael@0: r = _original_cond_fns.wait_condition(_cond, lock->lock, tv); michael@0: evthread_debug_lock_mark_locked(0, lock); michael@0: return r; michael@0: } michael@0: michael@0: void michael@0: evthread_enable_lock_debuging(void) michael@0: { michael@0: struct evthread_lock_callbacks cbs = { michael@0: EVTHREAD_LOCK_API_VERSION, michael@0: EVTHREAD_LOCKTYPE_RECURSIVE, michael@0: debug_lock_alloc, michael@0: debug_lock_free, michael@0: debug_lock_lock, michael@0: debug_lock_unlock michael@0: }; michael@0: if (_evthread_lock_debugging_enabled) michael@0: return; michael@0: memcpy(&_original_lock_fns, &_evthread_lock_fns, michael@0: sizeof(struct evthread_lock_callbacks)); michael@0: memcpy(&_evthread_lock_fns, &cbs, michael@0: sizeof(struct evthread_lock_callbacks)); michael@0: michael@0: memcpy(&_original_cond_fns, &_evthread_cond_fns, michael@0: sizeof(struct evthread_condition_callbacks)); michael@0: _evthread_cond_fns.wait_condition = debug_cond_wait; michael@0: _evthread_lock_debugging_enabled = 1; michael@0: michael@0: /* XXX return value should get checked. */ michael@0: event_global_setup_locks_(0); michael@0: } michael@0: michael@0: int michael@0: _evthread_is_debug_lock_held(void *lock_) michael@0: { michael@0: struct debug_lock *lock = lock_; michael@0: if (! lock->count) michael@0: return 0; michael@0: if (_evthread_id_fn) { michael@0: unsigned long me = _evthread_id_fn(); michael@0: if (lock->held_by != me) michael@0: return 0; michael@0: } michael@0: return 1; michael@0: } michael@0: michael@0: void * michael@0: _evthread_debug_get_real_lock(void *lock_) michael@0: { michael@0: struct debug_lock *lock = lock_; michael@0: return lock->lock; michael@0: } michael@0: michael@0: void * michael@0: evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks) michael@0: { michael@0: /* there are four cases here: michael@0: 1) we're turning on debugging; locking is not on. michael@0: 2) we're turning on debugging; locking is on. michael@0: 3) we're turning on locking; debugging is not on. michael@0: 4) we're turning on locking; debugging is on. */ michael@0: michael@0: if (!enable_locks && _original_lock_fns.alloc == NULL) { michael@0: /* Case 1: allocate a debug lock. */ michael@0: EVUTIL_ASSERT(lock_ == NULL); michael@0: return debug_lock_alloc(locktype); michael@0: } else if (!enable_locks && _original_lock_fns.alloc != NULL) { michael@0: /* Case 2: wrap the lock in a debug lock. */ michael@0: struct debug_lock *lock; michael@0: EVUTIL_ASSERT(lock_ != NULL); michael@0: michael@0: if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) { michael@0: /* We can't wrap it: We need a recursive lock */ michael@0: _original_lock_fns.free(lock_, locktype); michael@0: return debug_lock_alloc(locktype); michael@0: } michael@0: lock = mm_malloc(sizeof(struct debug_lock)); michael@0: if (!lock) { michael@0: _original_lock_fns.free(lock_, locktype); michael@0: return NULL; michael@0: } michael@0: lock->lock = lock_; michael@0: lock->locktype = locktype; michael@0: lock->count = 0; michael@0: lock->held_by = 0; michael@0: return lock; michael@0: } else if (enable_locks && ! _evthread_lock_debugging_enabled) { michael@0: /* Case 3: allocate a regular lock */ michael@0: EVUTIL_ASSERT(lock_ == NULL); michael@0: return _evthread_lock_fns.alloc(locktype); michael@0: } else { michael@0: /* Case 4: Fill in a debug lock with a real lock */ michael@0: struct debug_lock *lock = lock_; michael@0: EVUTIL_ASSERT(enable_locks && michael@0: _evthread_lock_debugging_enabled); michael@0: EVUTIL_ASSERT(lock->locktype == locktype); michael@0: EVUTIL_ASSERT(lock->lock == NULL); michael@0: lock->lock = _original_lock_fns.alloc( michael@0: locktype|EVTHREAD_LOCKTYPE_RECURSIVE); michael@0: if (!lock->lock) { michael@0: lock->count = -200; michael@0: mm_free(lock); michael@0: return NULL; michael@0: } michael@0: return lock; michael@0: } michael@0: } michael@0: michael@0: michael@0: #ifndef EVTHREAD_EXPOSE_STRUCTS michael@0: unsigned long michael@0: _evthreadimpl_get_id() michael@0: { michael@0: return _evthread_id_fn ? _evthread_id_fn() : 1; michael@0: } michael@0: void * michael@0: _evthreadimpl_lock_alloc(unsigned locktype) michael@0: { michael@0: return _evthread_lock_fns.alloc ? michael@0: _evthread_lock_fns.alloc(locktype) : NULL; michael@0: } michael@0: void michael@0: _evthreadimpl_lock_free(void *lock, unsigned locktype) michael@0: { michael@0: if (_evthread_lock_fns.free) michael@0: _evthread_lock_fns.free(lock, locktype); michael@0: } michael@0: int michael@0: _evthreadimpl_lock_lock(unsigned mode, void *lock) michael@0: { michael@0: if (_evthread_lock_fns.lock) michael@0: return _evthread_lock_fns.lock(mode, lock); michael@0: else michael@0: return 0; michael@0: } michael@0: int michael@0: _evthreadimpl_lock_unlock(unsigned mode, void *lock) michael@0: { michael@0: if (_evthread_lock_fns.unlock) michael@0: return _evthread_lock_fns.unlock(mode, lock); michael@0: else michael@0: return 0; michael@0: } michael@0: void * michael@0: _evthreadimpl_cond_alloc(unsigned condtype) michael@0: { michael@0: return _evthread_cond_fns.alloc_condition ? michael@0: _evthread_cond_fns.alloc_condition(condtype) : NULL; michael@0: } michael@0: void michael@0: _evthreadimpl_cond_free(void *cond) michael@0: { michael@0: if (_evthread_cond_fns.free_condition) michael@0: _evthread_cond_fns.free_condition(cond); michael@0: } michael@0: int michael@0: _evthreadimpl_cond_signal(void *cond, int broadcast) michael@0: { michael@0: if (_evthread_cond_fns.signal_condition) michael@0: return _evthread_cond_fns.signal_condition(cond, broadcast); michael@0: else michael@0: return 0; michael@0: } michael@0: int michael@0: _evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv) michael@0: { michael@0: if (_evthread_cond_fns.wait_condition) michael@0: return _evthread_cond_fns.wait_condition(cond, lock, tv); michael@0: else michael@0: return 0; michael@0: } michael@0: int michael@0: _evthreadimpl_is_lock_debugging_enabled(void) michael@0: { michael@0: return _evthread_lock_debugging_enabled; michael@0: } michael@0: michael@0: int michael@0: _evthreadimpl_locking_enabled(void) michael@0: { michael@0: return _evthread_lock_fns.lock != NULL; michael@0: } michael@0: #endif michael@0: michael@0: #endif