ipc/chromium/src/third_party/libevent/evthread.c

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/ipc/chromium/src/third_party/libevent/evthread.c	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,445 @@
     1.4 +/*
     1.5 + * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
     1.6 + *
     1.7 + * Redistribution and use in source and binary forms, with or without
     1.8 + * modification, are permitted provided that the following conditions
     1.9 + * are met:
    1.10 + * 1. Redistributions of source code must retain the above copyright
    1.11 + *    notice, this list of conditions and the following disclaimer.
    1.12 + * 2. Redistributions in binary form must reproduce the above copyright
    1.13 + *    notice, this list of conditions and the following disclaimer in the
    1.14 + *    documentation and/or other materials provided with the distribution.
    1.15 + * 3. The name of the author may not be used to endorse or promote products
    1.16 + *    derived from this software without specific prior written permission.
    1.17 + *
    1.18 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
    1.19 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
    1.20 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
    1.21 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
    1.22 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
    1.23 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    1.24 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    1.25 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    1.26 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
    1.27 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    1.28 + */
    1.29 +
    1.30 +#include "event2/event-config.h"
    1.31 +
    1.32 +#ifndef _EVENT_DISABLE_THREAD_SUPPORT
    1.33 +
    1.34 +#include "event2/thread.h"
    1.35 +
    1.36 +#include <stdlib.h>
    1.37 +#include <string.h>
    1.38 +
    1.39 +#include "log-internal.h"
    1.40 +#include "mm-internal.h"
    1.41 +#include "util-internal.h"
    1.42 +#include "evthread-internal.h"
    1.43 +
    1.44 +#ifdef EVTHREAD_EXPOSE_STRUCTS
    1.45 +#define GLOBAL
    1.46 +#else
    1.47 +#define GLOBAL static
    1.48 +#endif
    1.49 +
    1.50 +/* globals */
    1.51 +GLOBAL int _evthread_lock_debugging_enabled = 0;
    1.52 +GLOBAL struct evthread_lock_callbacks _evthread_lock_fns = {
    1.53 +	0, 0, NULL, NULL, NULL, NULL
    1.54 +};
    1.55 +GLOBAL unsigned long (*_evthread_id_fn)(void) = NULL;
    1.56 +GLOBAL struct evthread_condition_callbacks _evthread_cond_fns = {
    1.57 +	0, NULL, NULL, NULL, NULL
    1.58 +};
    1.59 +
    1.60 +/* Used for debugging */
    1.61 +static struct evthread_lock_callbacks _original_lock_fns = {
    1.62 +	0, 0, NULL, NULL, NULL, NULL
    1.63 +};
    1.64 +static struct evthread_condition_callbacks _original_cond_fns = {
    1.65 +	0, NULL, NULL, NULL, NULL
    1.66 +};
    1.67 +
    1.68 +void
    1.69 +evthread_set_id_callback(unsigned long (*id_fn)(void))
    1.70 +{
    1.71 +	_evthread_id_fn = id_fn;
    1.72 +}
    1.73 +
    1.74 +int
    1.75 +evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
    1.76 +{
    1.77 +	struct evthread_lock_callbacks *target =
    1.78 +	    _evthread_lock_debugging_enabled
    1.79 +	    ? &_original_lock_fns : &_evthread_lock_fns;
    1.80 +
    1.81 +	if (!cbs) {
    1.82 +		if (target->alloc)
    1.83 +			event_warnx("Trying to disable lock functions after "
    1.84 +			    "they have been set up will probaby not work.");
    1.85 +		memset(target, 0, sizeof(_evthread_lock_fns));
    1.86 +		return 0;
    1.87 +	}
    1.88 +	if (target->alloc) {
    1.89 +		/* Uh oh; we already had locking callbacks set up.*/
    1.90 +		if (target->lock_api_version == cbs->lock_api_version &&
    1.91 +			target->supported_locktypes == cbs->supported_locktypes &&
    1.92 +			target->alloc == cbs->alloc &&
    1.93 +			target->free == cbs->free &&
    1.94 +			target->lock == cbs->lock &&
    1.95 +			target->unlock == cbs->unlock) {
    1.96 +			/* no change -- allow this. */
    1.97 +			return 0;
    1.98 +		}
    1.99 +		event_warnx("Can't change lock callbacks once they have been "
   1.100 +		    "initialized.");
   1.101 +		return -1;
   1.102 +	}
   1.103 +	if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
   1.104 +		memcpy(target, cbs, sizeof(_evthread_lock_fns));
   1.105 +		return event_global_setup_locks_(1);
   1.106 +	} else {
   1.107 +		return -1;
   1.108 +	}
   1.109 +}
   1.110 +
   1.111 +int
   1.112 +evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
   1.113 +{
   1.114 +	struct evthread_condition_callbacks *target =
   1.115 +	    _evthread_lock_debugging_enabled
   1.116 +	    ? &_original_cond_fns : &_evthread_cond_fns;
   1.117 +
   1.118 +	if (!cbs) {
   1.119 +		if (target->alloc_condition)
   1.120 +			event_warnx("Trying to disable condition functions "
   1.121 +			    "after they have been set up will probaby not "
   1.122 +			    "work.");
   1.123 +		memset(target, 0, sizeof(_evthread_cond_fns));
   1.124 +		return 0;
   1.125 +	}
   1.126 +	if (target->alloc_condition) {
   1.127 +		/* Uh oh; we already had condition callbacks set up.*/
   1.128 +		if (target->condition_api_version == cbs->condition_api_version &&
   1.129 +			target->alloc_condition == cbs->alloc_condition &&
   1.130 +			target->free_condition == cbs->free_condition &&
   1.131 +			target->signal_condition == cbs->signal_condition &&
   1.132 +			target->wait_condition == cbs->wait_condition) {
   1.133 +			/* no change -- allow this. */
   1.134 +			return 0;
   1.135 +		}
   1.136 +		event_warnx("Can't change condition callbacks once they "
   1.137 +		    "have been initialized.");
   1.138 +		return -1;
   1.139 +	}
   1.140 +	if (cbs->alloc_condition && cbs->free_condition &&
   1.141 +	    cbs->signal_condition && cbs->wait_condition) {
   1.142 +		memcpy(target, cbs, sizeof(_evthread_cond_fns));
   1.143 +	}
   1.144 +	if (_evthread_lock_debugging_enabled) {
   1.145 +		_evthread_cond_fns.alloc_condition = cbs->alloc_condition;
   1.146 +		_evthread_cond_fns.free_condition = cbs->free_condition;
   1.147 +		_evthread_cond_fns.signal_condition = cbs->signal_condition;
   1.148 +	}
   1.149 +	return 0;
   1.150 +}
   1.151 +
   1.152 +struct debug_lock {
   1.153 +	unsigned locktype;
   1.154 +	unsigned long held_by;
   1.155 +	/* XXXX if we ever use read-write locks, we will need a separate
   1.156 +	 * lock to protect count. */
   1.157 +	int count;
   1.158 +	void *lock;
   1.159 +};
   1.160 +
   1.161 +static void *
   1.162 +debug_lock_alloc(unsigned locktype)
   1.163 +{
   1.164 +	struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
   1.165 +	if (!result)
   1.166 +		return NULL;
   1.167 +	if (_original_lock_fns.alloc) {
   1.168 +		if (!(result->lock = _original_lock_fns.alloc(
   1.169 +				locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
   1.170 +			mm_free(result);
   1.171 +			return NULL;
   1.172 +		}
   1.173 +	} else {
   1.174 +		result->lock = NULL;
   1.175 +	}
   1.176 +	result->locktype = locktype;
   1.177 +	result->count = 0;
   1.178 +	result->held_by = 0;
   1.179 +	return result;
   1.180 +}
   1.181 +
   1.182 +static void
   1.183 +debug_lock_free(void *lock_, unsigned locktype)
   1.184 +{
   1.185 +	struct debug_lock *lock = lock_;
   1.186 +	EVUTIL_ASSERT(lock->count == 0);
   1.187 +	EVUTIL_ASSERT(locktype == lock->locktype);
   1.188 +	if (_original_lock_fns.free) {
   1.189 +		_original_lock_fns.free(lock->lock,
   1.190 +		    lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
   1.191 +	}
   1.192 +	lock->lock = NULL;
   1.193 +	lock->count = -100;
   1.194 +	mm_free(lock);
   1.195 +}
   1.196 +
   1.197 +static void
   1.198 +evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
   1.199 +{
   1.200 +	++lock->count;
   1.201 +	if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
   1.202 +		EVUTIL_ASSERT(lock->count == 1);
   1.203 +	if (_evthread_id_fn) {
   1.204 +		unsigned long me;
   1.205 +		me = _evthread_id_fn();
   1.206 +		if (lock->count > 1)
   1.207 +			EVUTIL_ASSERT(lock->held_by == me);
   1.208 +		lock->held_by = me;
   1.209 +	}
   1.210 +}
   1.211 +
   1.212 +static int
   1.213 +debug_lock_lock(unsigned mode, void *lock_)
   1.214 +{
   1.215 +	struct debug_lock *lock = lock_;
   1.216 +	int res = 0;
   1.217 +	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
   1.218 +		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
   1.219 +	else
   1.220 +		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
   1.221 +	if (_original_lock_fns.lock)
   1.222 +		res = _original_lock_fns.lock(mode, lock->lock);
   1.223 +	if (!res) {
   1.224 +		evthread_debug_lock_mark_locked(mode, lock);
   1.225 +	}
   1.226 +	return res;
   1.227 +}
   1.228 +
   1.229 +static void
   1.230 +evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
   1.231 +{
   1.232 +	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
   1.233 +		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
   1.234 +	else
   1.235 +		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
   1.236 +	if (_evthread_id_fn) {
   1.237 +		EVUTIL_ASSERT(lock->held_by == _evthread_id_fn());
   1.238 +		if (lock->count == 1)
   1.239 +			lock->held_by = 0;
   1.240 +	}
   1.241 +	--lock->count;
   1.242 +	EVUTIL_ASSERT(lock->count >= 0);
   1.243 +}
   1.244 +
   1.245 +static int
   1.246 +debug_lock_unlock(unsigned mode, void *lock_)
   1.247 +{
   1.248 +	struct debug_lock *lock = lock_;
   1.249 +	int res = 0;
   1.250 +	evthread_debug_lock_mark_unlocked(mode, lock);
   1.251 +	if (_original_lock_fns.unlock)
   1.252 +		res = _original_lock_fns.unlock(mode, lock->lock);
   1.253 +	return res;
   1.254 +}
   1.255 +
   1.256 +static int
   1.257 +debug_cond_wait(void *_cond, void *_lock, const struct timeval *tv)
   1.258 +{
   1.259 +	int r;
   1.260 +	struct debug_lock *lock = _lock;
   1.261 +	EVUTIL_ASSERT(lock);
   1.262 +	EVLOCK_ASSERT_LOCKED(_lock);
   1.263 +	evthread_debug_lock_mark_unlocked(0, lock);
   1.264 +	r = _original_cond_fns.wait_condition(_cond, lock->lock, tv);
   1.265 +	evthread_debug_lock_mark_locked(0, lock);
   1.266 +	return r;
   1.267 +}
   1.268 +
   1.269 +void
   1.270 +evthread_enable_lock_debuging(void)
   1.271 +{
   1.272 +	struct evthread_lock_callbacks cbs = {
   1.273 +		EVTHREAD_LOCK_API_VERSION,
   1.274 +		EVTHREAD_LOCKTYPE_RECURSIVE,
   1.275 +		debug_lock_alloc,
   1.276 +		debug_lock_free,
   1.277 +		debug_lock_lock,
   1.278 +		debug_lock_unlock
   1.279 +	};
   1.280 +	if (_evthread_lock_debugging_enabled)
   1.281 +		return;
   1.282 +	memcpy(&_original_lock_fns, &_evthread_lock_fns,
   1.283 +	    sizeof(struct evthread_lock_callbacks));
   1.284 +	memcpy(&_evthread_lock_fns, &cbs,
   1.285 +	    sizeof(struct evthread_lock_callbacks));
   1.286 +
   1.287 +	memcpy(&_original_cond_fns, &_evthread_cond_fns,
   1.288 +	    sizeof(struct evthread_condition_callbacks));
   1.289 +	_evthread_cond_fns.wait_condition = debug_cond_wait;
   1.290 +	_evthread_lock_debugging_enabled = 1;
   1.291 +
   1.292 +	/* XXX return value should get checked. */
   1.293 +	event_global_setup_locks_(0);
   1.294 +}
   1.295 +
   1.296 +int
   1.297 +_evthread_is_debug_lock_held(void *lock_)
   1.298 +{
   1.299 +	struct debug_lock *lock = lock_;
   1.300 +	if (! lock->count)
   1.301 +		return 0;
   1.302 +	if (_evthread_id_fn) {
   1.303 +		unsigned long me = _evthread_id_fn();
   1.304 +		if (lock->held_by != me)
   1.305 +			return 0;
   1.306 +	}
   1.307 +	return 1;
   1.308 +}
   1.309 +
   1.310 +void *
   1.311 +_evthread_debug_get_real_lock(void *lock_)
   1.312 +{
   1.313 +	struct debug_lock *lock = lock_;
   1.314 +	return lock->lock;
   1.315 +}
   1.316 +
   1.317 +void *
   1.318 +evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
   1.319 +{
   1.320 +	/* there are four cases here:
   1.321 +	   1) we're turning on debugging; locking is not on.
   1.322 +	   2) we're turning on debugging; locking is on.
   1.323 +	   3) we're turning on locking; debugging is not on.
   1.324 +	   4) we're turning on locking; debugging is on. */
   1.325 +
   1.326 +	if (!enable_locks && _original_lock_fns.alloc == NULL) {
   1.327 +		/* Case 1: allocate a debug lock. */
   1.328 +		EVUTIL_ASSERT(lock_ == NULL);
   1.329 +		return debug_lock_alloc(locktype);
   1.330 +	} else if (!enable_locks && _original_lock_fns.alloc != NULL) {
   1.331 +		/* Case 2: wrap the lock in a debug lock. */
   1.332 +		struct debug_lock *lock;
   1.333 +		EVUTIL_ASSERT(lock_ != NULL);
   1.334 +
   1.335 +		if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
   1.336 +			/* We can't wrap it: We need a recursive lock */
   1.337 +			_original_lock_fns.free(lock_, locktype);
   1.338 +			return debug_lock_alloc(locktype);
   1.339 +		}
   1.340 +		lock = mm_malloc(sizeof(struct debug_lock));
   1.341 +		if (!lock) {
   1.342 +			_original_lock_fns.free(lock_, locktype);
   1.343 +			return NULL;
   1.344 +		}
   1.345 +		lock->lock = lock_;
   1.346 +		lock->locktype = locktype;
   1.347 +		lock->count = 0;
   1.348 +		lock->held_by = 0;
   1.349 +		return lock;
   1.350 +	} else if (enable_locks && ! _evthread_lock_debugging_enabled) {
   1.351 +		/* Case 3: allocate a regular lock */
   1.352 +		EVUTIL_ASSERT(lock_ == NULL);
   1.353 +		return _evthread_lock_fns.alloc(locktype);
   1.354 +	} else {
   1.355 +		/* Case 4: Fill in a debug lock with a real lock */
   1.356 +		struct debug_lock *lock = lock_;
   1.357 +		EVUTIL_ASSERT(enable_locks &&
   1.358 +		              _evthread_lock_debugging_enabled);
   1.359 +		EVUTIL_ASSERT(lock->locktype == locktype);
   1.360 +		EVUTIL_ASSERT(lock->lock == NULL);
   1.361 +		lock->lock = _original_lock_fns.alloc(
   1.362 +			locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
   1.363 +		if (!lock->lock) {
   1.364 +			lock->count = -200;
   1.365 +			mm_free(lock);
   1.366 +			return NULL;
   1.367 +		}
   1.368 +		return lock;
   1.369 +	}
   1.370 +}
   1.371 +
   1.372 +
   1.373 +#ifndef EVTHREAD_EXPOSE_STRUCTS
   1.374 +unsigned long
   1.375 +_evthreadimpl_get_id()
   1.376 +{
   1.377 +	return _evthread_id_fn ? _evthread_id_fn() : 1;
   1.378 +}
   1.379 +void *
   1.380 +_evthreadimpl_lock_alloc(unsigned locktype)
   1.381 +{
   1.382 +	return _evthread_lock_fns.alloc ?
   1.383 +	    _evthread_lock_fns.alloc(locktype) : NULL;
   1.384 +}
   1.385 +void
   1.386 +_evthreadimpl_lock_free(void *lock, unsigned locktype)
   1.387 +{
   1.388 +	if (_evthread_lock_fns.free)
   1.389 +		_evthread_lock_fns.free(lock, locktype);
   1.390 +}
   1.391 +int
   1.392 +_evthreadimpl_lock_lock(unsigned mode, void *lock)
   1.393 +{
   1.394 +	if (_evthread_lock_fns.lock)
   1.395 +		return _evthread_lock_fns.lock(mode, lock);
   1.396 +	else
   1.397 +		return 0;
   1.398 +}
   1.399 +int
   1.400 +_evthreadimpl_lock_unlock(unsigned mode, void *lock)
   1.401 +{
   1.402 +	if (_evthread_lock_fns.unlock)
   1.403 +		return _evthread_lock_fns.unlock(mode, lock);
   1.404 +	else
   1.405 +		return 0;
   1.406 +}
   1.407 +void *
   1.408 +_evthreadimpl_cond_alloc(unsigned condtype)
   1.409 +{
   1.410 +	return _evthread_cond_fns.alloc_condition ?
   1.411 +	    _evthread_cond_fns.alloc_condition(condtype) : NULL;
   1.412 +}
   1.413 +void
   1.414 +_evthreadimpl_cond_free(void *cond)
   1.415 +{
   1.416 +	if (_evthread_cond_fns.free_condition)
   1.417 +		_evthread_cond_fns.free_condition(cond);
   1.418 +}
   1.419 +int
   1.420 +_evthreadimpl_cond_signal(void *cond, int broadcast)
   1.421 +{
   1.422 +	if (_evthread_cond_fns.signal_condition)
   1.423 +		return _evthread_cond_fns.signal_condition(cond, broadcast);
   1.424 +	else
   1.425 +		return 0;
   1.426 +}
   1.427 +int
   1.428 +_evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv)
   1.429 +{
   1.430 +	if (_evthread_cond_fns.wait_condition)
   1.431 +		return _evthread_cond_fns.wait_condition(cond, lock, tv);
   1.432 +	else
   1.433 +		return 0;
   1.434 +}
   1.435 +int
   1.436 +_evthreadimpl_is_lock_debugging_enabled(void)
   1.437 +{
   1.438 +	return _evthread_lock_debugging_enabled;
   1.439 +}
   1.440 +
   1.441 +int
   1.442 +_evthreadimpl_locking_enabled(void)
   1.443 +{
   1.444 +	return _evthread_lock_fns.lock != NULL;
   1.445 +}
   1.446 +#endif
   1.447 +
   1.448 +#endif

mercurial