ipc/chromium/src/third_party/libevent/evthread-internal.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/ipc/chromium/src/third_party/libevent/evthread-internal.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,382 @@
     1.4 +/*
     1.5 + * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
     1.6 + *
     1.7 + * Redistribution and use in source and binary forms, with or without
     1.8 + * modification, are permitted provided that the following conditions
     1.9 + * are met:
    1.10 + * 1. Redistributions of source code must retain the above copyright
    1.11 + *    notice, this list of conditions and the following disclaimer.
    1.12 + * 2. Redistributions in binary form must reproduce the above copyright
    1.13 + *    notice, this list of conditions and the following disclaimer in the
    1.14 + *    documentation and/or other materials provided with the distribution.
    1.15 + * 3. The name of the author may not be used to endorse or promote products
    1.16 + *    derived from this software without specific prior written permission.
    1.17 + *
    1.18 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
    1.19 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
    1.20 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
    1.21 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
    1.22 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
    1.23 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    1.24 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    1.25 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    1.26 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
    1.27 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    1.28 + */
    1.29 +#ifndef _EVTHREAD_INTERNAL_H_
    1.30 +#define _EVTHREAD_INTERNAL_H_
    1.31 +
    1.32 +#ifdef __cplusplus
    1.33 +extern "C" {
    1.34 +#endif
    1.35 +
    1.36 +#include "event2/thread.h"
    1.37 +#include "event2/event-config.h"
    1.38 +#include "util-internal.h"
    1.39 +
    1.40 +struct event_base;
    1.41 +
    1.42 +#ifndef WIN32
    1.43 +/* On Windows, the way we currently make DLLs, it's not allowed for us to
    1.44 + * have shared global structures.  Thus, we only do the direct-call-to-function
    1.45 + * code path if we know that the local shared library system supports it.
    1.46 + */
    1.47 +#define EVTHREAD_EXPOSE_STRUCTS
    1.48 +#endif
    1.49 +
    1.50 +#if ! defined(_EVENT_DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS)
    1.51 +/* Global function pointers to lock-related functions. NULL if locking isn't
    1.52 +   enabled. */
    1.53 +extern struct evthread_lock_callbacks _evthread_lock_fns;
    1.54 +extern struct evthread_condition_callbacks _evthread_cond_fns;
    1.55 +extern unsigned long (*_evthread_id_fn)(void);
    1.56 +extern int _evthread_lock_debugging_enabled;
    1.57 +
    1.58 +/** Return the ID of the current thread, or 1 if threading isn't enabled. */
    1.59 +#define EVTHREAD_GET_ID() \
    1.60 +	(_evthread_id_fn ? _evthread_id_fn() : 1)
    1.61 +
    1.62 +/** Return true iff we're in the thread that is currently (or most recently)
    1.63 + * running a given event_base's loop. Requires lock. */
    1.64 +#define EVBASE_IN_THREAD(base)				 \
    1.65 +	(_evthread_id_fn == NULL ||			 \
    1.66 +	(base)->th_owner_id == _evthread_id_fn())
    1.67 +
    1.68 +/** Return true iff we need to notify the base's main thread about changes to
    1.69 + * its state, because it's currently running the main loop in another
    1.70 + * thread. Requires lock. */
    1.71 +#define EVBASE_NEED_NOTIFY(base)			 \
    1.72 +	(_evthread_id_fn != NULL &&			 \
    1.73 +	    (base)->running_loop &&			 \
    1.74 +	    (base)->th_owner_id != _evthread_id_fn())
    1.75 +
    1.76 +/** Allocate a new lock, and store it in lockvar, a void*.  Sets lockvar to
    1.77 +    NULL if locking is not enabled. */
    1.78 +#define EVTHREAD_ALLOC_LOCK(lockvar, locktype)		\
    1.79 +	((lockvar) = _evthread_lock_fns.alloc ?		\
    1.80 +	    _evthread_lock_fns.alloc(locktype) : NULL)
    1.81 +
    1.82 +/** Free a given lock, if it is present and locking is enabled. */
    1.83 +#define EVTHREAD_FREE_LOCK(lockvar, locktype)				\
    1.84 +	do {								\
    1.85 +		void *_lock_tmp_ = (lockvar);				\
    1.86 +		if (_lock_tmp_ && _evthread_lock_fns.free)		\
    1.87 +			_evthread_lock_fns.free(_lock_tmp_, (locktype)); \
    1.88 +	} while (0)
    1.89 +
    1.90 +/** Acquire a lock. */
    1.91 +#define EVLOCK_LOCK(lockvar,mode)					\
    1.92 +	do {								\
    1.93 +		if (lockvar)						\
    1.94 +			_evthread_lock_fns.lock(mode, lockvar);		\
    1.95 +	} while (0)
    1.96 +
    1.97 +/** Release a lock */
    1.98 +#define EVLOCK_UNLOCK(lockvar,mode)					\
    1.99 +	do {								\
   1.100 +		if (lockvar)						\
   1.101 +			_evthread_lock_fns.unlock(mode, lockvar);	\
   1.102 +	} while (0)
   1.103 +
   1.104 +/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
   1.105 +#define _EVLOCK_SORTLOCKS(lockvar1, lockvar2)				\
   1.106 +	do {								\
   1.107 +		if (lockvar1 && lockvar2 && lockvar1 > lockvar2) {	\
   1.108 +			void *tmp = lockvar1;				\
   1.109 +			lockvar1 = lockvar2;				\
   1.110 +			lockvar2 = tmp;					\
   1.111 +		}							\
   1.112 +	} while (0)
   1.113 +
   1.114 +/** Lock an event_base, if it is set up for locking.  Acquires the lock
   1.115 +    in the base structure whose field is named 'lockvar'. */
   1.116 +#define EVBASE_ACQUIRE_LOCK(base, lockvar) do {				\
   1.117 +		EVLOCK_LOCK((base)->lockvar, 0);			\
   1.118 +	} while (0)
   1.119 +
   1.120 +/** Unlock an event_base, if it is set up for locking. */
   1.121 +#define EVBASE_RELEASE_LOCK(base, lockvar) do {				\
   1.122 +		EVLOCK_UNLOCK((base)->lockvar, 0);			\
   1.123 +	} while (0)
   1.124 +
   1.125 +/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
   1.126 + * locked and held by us. */
   1.127 +#define EVLOCK_ASSERT_LOCKED(lock)					\
   1.128 +	do {								\
   1.129 +		if ((lock) && _evthread_lock_debugging_enabled) {	\
   1.130 +			EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \
   1.131 +		}							\
   1.132 +	} while (0)
   1.133 +
   1.134 +/** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
   1.135 + * manage to get it. */
   1.136 +static inline int EVLOCK_TRY_LOCK(void *lock);
   1.137 +static inline int
   1.138 +EVLOCK_TRY_LOCK(void *lock)
   1.139 +{
   1.140 +	if (lock && _evthread_lock_fns.lock) {
   1.141 +		int r = _evthread_lock_fns.lock(EVTHREAD_TRY, lock);
   1.142 +		return !r;
   1.143 +	} else {
   1.144 +		/* Locking is disabled either globally or for this thing;
   1.145 +		 * of course we count as having the lock. */
   1.146 +		return 1;
   1.147 +	}
   1.148 +}
   1.149 +
   1.150 +/** Allocate a new condition variable and store it in the void *, condvar */
   1.151 +#define EVTHREAD_ALLOC_COND(condvar)					\
   1.152 +	do {								\
   1.153 +		(condvar) = _evthread_cond_fns.alloc_condition ?	\
   1.154 +		    _evthread_cond_fns.alloc_condition(0) : NULL;	\
   1.155 +	} while (0)
   1.156 +/** Deallocate and free a condition variable in condvar */
   1.157 +#define EVTHREAD_FREE_COND(cond)					\
   1.158 +	do {								\
   1.159 +		if (cond)						\
   1.160 +			_evthread_cond_fns.free_condition((cond));	\
   1.161 +	} while (0)
   1.162 +/** Signal one thread waiting on cond */
   1.163 +#define EVTHREAD_COND_SIGNAL(cond)					\
   1.164 +	( (cond) ? _evthread_cond_fns.signal_condition((cond), 0) : 0 )
   1.165 +/** Signal all threads waiting on cond */
   1.166 +#define EVTHREAD_COND_BROADCAST(cond)					\
   1.167 +	( (cond) ? _evthread_cond_fns.signal_condition((cond), 1) : 0 )
   1.168 +/** Wait until the condition 'cond' is signalled.  Must be called while
   1.169 + * holding 'lock'.  The lock will be released until the condition is
   1.170 + * signalled, at which point it will be acquired again.  Returns 0 for
   1.171 + * success, -1 for failure. */
   1.172 +#define EVTHREAD_COND_WAIT(cond, lock)					\
   1.173 +	( (cond) ? _evthread_cond_fns.wait_condition((cond), (lock), NULL) : 0 )
   1.174 +/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed.  Returns 1
   1.175 + * on timeout. */
   1.176 +#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv)			\
   1.177 +	( (cond) ? _evthread_cond_fns.wait_condition((cond), (lock), (tv)) : 0 )
   1.178 +
   1.179 +/** True iff locking functions have been configured. */
   1.180 +#define EVTHREAD_LOCKING_ENABLED()		\
   1.181 +	(_evthread_lock_fns.lock != NULL)
   1.182 +
   1.183 +#elif ! defined(_EVENT_DISABLE_THREAD_SUPPORT)
   1.184 +
   1.185 +unsigned long _evthreadimpl_get_id(void);
   1.186 +int _evthreadimpl_is_lock_debugging_enabled(void);
   1.187 +void *_evthreadimpl_lock_alloc(unsigned locktype);
   1.188 +void _evthreadimpl_lock_free(void *lock, unsigned locktype);
   1.189 +int _evthreadimpl_lock_lock(unsigned mode, void *lock);
   1.190 +int _evthreadimpl_lock_unlock(unsigned mode, void *lock);
   1.191 +void *_evthreadimpl_cond_alloc(unsigned condtype);
   1.192 +void _evthreadimpl_cond_free(void *cond);
   1.193 +int _evthreadimpl_cond_signal(void *cond, int broadcast);
   1.194 +int _evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv);
   1.195 +int _evthreadimpl_locking_enabled(void);
   1.196 +
   1.197 +#define EVTHREAD_GET_ID() _evthreadimpl_get_id()
   1.198 +#define EVBASE_IN_THREAD(base)				\
   1.199 +	((base)->th_owner_id == _evthreadimpl_get_id())
   1.200 +#define EVBASE_NEED_NOTIFY(base)			 \
   1.201 +	((base)->running_loop &&			 \
   1.202 +	    ((base)->th_owner_id != _evthreadimpl_get_id()))
   1.203 +
   1.204 +#define EVTHREAD_ALLOC_LOCK(lockvar, locktype)		\
   1.205 +	((lockvar) = _evthreadimpl_lock_alloc(locktype))
   1.206 +
   1.207 +#define EVTHREAD_FREE_LOCK(lockvar, locktype)				\
   1.208 +	do {								\
   1.209 +		void *_lock_tmp_ = (lockvar);				\
   1.210 +		if (_lock_tmp_)						\
   1.211 +			_evthreadimpl_lock_free(_lock_tmp_, (locktype)); \
   1.212 +	} while (0)
   1.213 +
   1.214 +/** Acquire a lock. */
   1.215 +#define EVLOCK_LOCK(lockvar,mode)					\
   1.216 +	do {								\
   1.217 +		if (lockvar)						\
   1.218 +			_evthreadimpl_lock_lock(mode, lockvar);		\
   1.219 +	} while (0)
   1.220 +
   1.221 +/** Release a lock */
   1.222 +#define EVLOCK_UNLOCK(lockvar,mode)					\
   1.223 +	do {								\
   1.224 +		if (lockvar)						\
   1.225 +			_evthreadimpl_lock_unlock(mode, lockvar);	\
   1.226 +	} while (0)
   1.227 +
   1.228 +/** Lock an event_base, if it is set up for locking.  Acquires the lock
   1.229 +    in the base structure whose field is named 'lockvar'. */
   1.230 +#define EVBASE_ACQUIRE_LOCK(base, lockvar) do {				\
   1.231 +		EVLOCK_LOCK((base)->lockvar, 0);			\
   1.232 +	} while (0)
   1.233 +
   1.234 +/** Unlock an event_base, if it is set up for locking. */
   1.235 +#define EVBASE_RELEASE_LOCK(base, lockvar) do {				\
   1.236 +		EVLOCK_UNLOCK((base)->lockvar, 0);			\
   1.237 +	} while (0)
   1.238 +
   1.239 +/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
   1.240 + * locked and held by us. */
   1.241 +#define EVLOCK_ASSERT_LOCKED(lock)					\
   1.242 +	do {								\
   1.243 +		if ((lock) && _evthreadimpl_is_lock_debugging_enabled()) { \
   1.244 +			EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \
   1.245 +		}							\
   1.246 +	} while (0)
   1.247 +
   1.248 +/** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
   1.249 + * manage to get it. */
   1.250 +static inline int EVLOCK_TRY_LOCK(void *lock);
   1.251 +static inline int
   1.252 +EVLOCK_TRY_LOCK(void *lock)
   1.253 +{
   1.254 +	if (lock) {
   1.255 +		int r = _evthreadimpl_lock_lock(EVTHREAD_TRY, lock);
   1.256 +		return !r;
   1.257 +	} else {
   1.258 +		/* Locking is disabled either globally or for this thing;
   1.259 +		 * of course we count as having the lock. */
   1.260 +		return 1;
   1.261 +	}
   1.262 +}
   1.263 +
   1.264 +/** Allocate a new condition variable and store it in the void *, condvar */
   1.265 +#define EVTHREAD_ALLOC_COND(condvar)					\
   1.266 +	do {								\
   1.267 +		(condvar) = _evthreadimpl_cond_alloc(0);		\
   1.268 +	} while (0)
   1.269 +/** Deallocate and free a condition variable in condvar */
   1.270 +#define EVTHREAD_FREE_COND(cond)					\
   1.271 +	do {								\
   1.272 +		if (cond)						\
   1.273 +			_evthreadimpl_cond_free((cond));		\
   1.274 +	} while (0)
   1.275 +/** Signal one thread waiting on cond */
   1.276 +#define EVTHREAD_COND_SIGNAL(cond)					\
   1.277 +	( (cond) ? _evthreadimpl_cond_signal((cond), 0) : 0 )
   1.278 +/** Signal all threads waiting on cond */
   1.279 +#define EVTHREAD_COND_BROADCAST(cond)					\
   1.280 +	( (cond) ? _evthreadimpl_cond_signal((cond), 1) : 0 )
   1.281 +/** Wait until the condition 'cond' is signalled.  Must be called while
   1.282 + * holding 'lock'.  The lock will be released until the condition is
   1.283 + * signalled, at which point it will be acquired again.  Returns 0 for
   1.284 + * success, -1 for failure. */
   1.285 +#define EVTHREAD_COND_WAIT(cond, lock)					\
   1.286 +	( (cond) ? _evthreadimpl_cond_wait((cond), (lock), NULL) : 0 )
   1.287 +/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed.  Returns 1
   1.288 + * on timeout. */
   1.289 +#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv)			\
   1.290 +	( (cond) ? _evthreadimpl_cond_wait((cond), (lock), (tv)) : 0 )
   1.291 +
   1.292 +#define EVTHREAD_LOCKING_ENABLED()		\
   1.293 +	(_evthreadimpl_locking_enabled())
   1.294 +
   1.295 +#else /* _EVENT_DISABLE_THREAD_SUPPORT */
   1.296 +
   1.297 +#define EVTHREAD_GET_ID()	1
   1.298 +#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) _EVUTIL_NIL_STMT
   1.299 +#define EVTHREAD_FREE_LOCK(lockvar, locktype) _EVUTIL_NIL_STMT
   1.300 +
   1.301 +#define EVLOCK_LOCK(lockvar, mode) _EVUTIL_NIL_STMT
   1.302 +#define EVLOCK_UNLOCK(lockvar, mode) _EVUTIL_NIL_STMT
   1.303 +#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) _EVUTIL_NIL_STMT
   1.304 +#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) _EVUTIL_NIL_STMT
   1.305 +
   1.306 +#define EVBASE_IN_THREAD(base)	1
   1.307 +#define EVBASE_NEED_NOTIFY(base) 0
   1.308 +#define EVBASE_ACQUIRE_LOCK(base, lock) _EVUTIL_NIL_STMT
   1.309 +#define EVBASE_RELEASE_LOCK(base, lock) _EVUTIL_NIL_STMT
   1.310 +#define EVLOCK_ASSERT_LOCKED(lock) _EVUTIL_NIL_STMT
   1.311 +
   1.312 +#define EVLOCK_TRY_LOCK(lock) 1
   1.313 +
   1.314 +#define EVTHREAD_ALLOC_COND(condvar) _EVUTIL_NIL_STMT
   1.315 +#define EVTHREAD_FREE_COND(cond) _EVUTIL_NIL_STMT
   1.316 +#define EVTHREAD_COND_SIGNAL(cond) _EVUTIL_NIL_STMT
   1.317 +#define EVTHREAD_COND_BROADCAST(cond) _EVUTIL_NIL_STMT
   1.318 +#define EVTHREAD_COND_WAIT(cond, lock) _EVUTIL_NIL_STMT
   1.319 +#define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) _EVUTIL_NIL_STMT
   1.320 +
   1.321 +#define EVTHREAD_LOCKING_ENABLED() 0
   1.322 +
   1.323 +#endif
   1.324 +
   1.325 +/* This code is shared between both lock impls */
   1.326 +#if ! defined(_EVENT_DISABLE_THREAD_SUPPORT)
   1.327 +/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
   1.328 +#define _EVLOCK_SORTLOCKS(lockvar1, lockvar2)				\
   1.329 +	do {								\
   1.330 +		if (lockvar1 && lockvar2 && lockvar1 > lockvar2) {	\
   1.331 +			void *tmp = lockvar1;				\
   1.332 +			lockvar1 = lockvar2;				\
   1.333 +			lockvar2 = tmp;					\
   1.334 +		}							\
   1.335 +	} while (0)
   1.336 +
   1.337 +/** Acquire both lock1 and lock2.  Always allocates locks in the same order,
   1.338 + * so that two threads locking two locks with LOCK2 will not deadlock. */
   1.339 +#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2)				\
   1.340 +	do {								\
   1.341 +		void *_lock1_tmplock = (lock1);				\
   1.342 +		void *_lock2_tmplock = (lock2);				\
   1.343 +		_EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock);	\
   1.344 +		EVLOCK_LOCK(_lock1_tmplock,mode1);			\
   1.345 +		if (_lock2_tmplock != _lock1_tmplock)			\
   1.346 +			EVLOCK_LOCK(_lock2_tmplock,mode2);		\
   1.347 +	} while (0)
   1.348 +/** Release both lock1 and lock2.  */
   1.349 +#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2)				\
   1.350 +	do {								\
   1.351 +		void *_lock1_tmplock = (lock1);				\
   1.352 +		void *_lock2_tmplock = (lock2);				\
   1.353 +		_EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock);	\
   1.354 +		if (_lock2_tmplock != _lock1_tmplock)			\
   1.355 +			EVLOCK_UNLOCK(_lock2_tmplock,mode2);		\
   1.356 +		EVLOCK_UNLOCK(_lock1_tmplock,mode1);			\
   1.357 +	} while (0)
   1.358 +
   1.359 +int _evthread_is_debug_lock_held(void *lock);
   1.360 +void *_evthread_debug_get_real_lock(void *lock);
   1.361 +
   1.362 +void *evthread_setup_global_lock_(void *lock_, unsigned locktype,
   1.363 +    int enable_locks);
   1.364 +
   1.365 +#define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype)			\
   1.366 +	do {								\
   1.367 +		lockvar = evthread_setup_global_lock_(lockvar,		\
   1.368 +		    (locktype), enable_locks);				\
   1.369 +		if (!lockvar) {						\
   1.370 +			event_warn("Couldn't allocate %s", #lockvar);	\
   1.371 +			return -1;					\
   1.372 +		}							\
   1.373 +	} while (0);
   1.374 +
   1.375 +int event_global_setup_locks_(const int enable_locks);
   1.376 +int evsig_global_setup_locks_(const int enable_locks);
   1.377 +int evutil_secure_rng_global_setup_locks_(const int enable_locks);
   1.378 +
   1.379 +#endif
   1.380 +
   1.381 +#ifdef __cplusplus
   1.382 +}
   1.383 +#endif
   1.384 +
   1.385 +#endif /* _EVTHREAD_INTERNAL_H_ */

mercurial