ipc/chromium/src/third_party/libevent/evthread-internal.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /*
michael@0 2 * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
michael@0 3 *
michael@0 4 * Redistribution and use in source and binary forms, with or without
michael@0 5 * modification, are permitted provided that the following conditions
michael@0 6 * are met:
michael@0 7 * 1. Redistributions of source code must retain the above copyright
michael@0 8 * notice, this list of conditions and the following disclaimer.
michael@0 9 * 2. Redistributions in binary form must reproduce the above copyright
michael@0 10 * notice, this list of conditions and the following disclaimer in the
michael@0 11 * documentation and/or other materials provided with the distribution.
michael@0 12 * 3. The name of the author may not be used to endorse or promote products
michael@0 13 * derived from this software without specific prior written permission.
michael@0 14 *
michael@0 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
michael@0 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
michael@0 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
michael@0 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
michael@0 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
michael@0 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
michael@0 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
michael@0 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
michael@0 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
michael@0 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
michael@0 25 */
michael@0 26 #ifndef _EVTHREAD_INTERNAL_H_
michael@0 27 #define _EVTHREAD_INTERNAL_H_
michael@0 28
michael@0 29 #ifdef __cplusplus
michael@0 30 extern "C" {
michael@0 31 #endif
michael@0 32
michael@0 33 #include "event2/thread.h"
michael@0 34 #include "event2/event-config.h"
michael@0 35 #include "util-internal.h"
michael@0 36
michael@0 37 struct event_base;
michael@0 38
michael@0 39 #ifndef WIN32
michael@0 40 /* On Windows, the way we currently make DLLs, it's not allowed for us to
michael@0 41 * have shared global structures. Thus, we only do the direct-call-to-function
michael@0 42 * code path if we know that the local shared library system supports it.
michael@0 43 */
michael@0 44 #define EVTHREAD_EXPOSE_STRUCTS
michael@0 45 #endif
michael@0 46
michael@0 47 #if ! defined(_EVENT_DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS)
michael@0 48 /* Global function pointers to lock-related functions. NULL if locking isn't
michael@0 49 enabled. */
michael@0 50 extern struct evthread_lock_callbacks _evthread_lock_fns;
michael@0 51 extern struct evthread_condition_callbacks _evthread_cond_fns;
michael@0 52 extern unsigned long (*_evthread_id_fn)(void);
michael@0 53 extern int _evthread_lock_debugging_enabled;
michael@0 54
michael@0 55 /** Return the ID of the current thread, or 1 if threading isn't enabled. */
michael@0 56 #define EVTHREAD_GET_ID() \
michael@0 57 (_evthread_id_fn ? _evthread_id_fn() : 1)
michael@0 58
michael@0 59 /** Return true iff we're in the thread that is currently (or most recently)
michael@0 60 * running a given event_base's loop. Requires lock. */
michael@0 61 #define EVBASE_IN_THREAD(base) \
michael@0 62 (_evthread_id_fn == NULL || \
michael@0 63 (base)->th_owner_id == _evthread_id_fn())
michael@0 64
michael@0 65 /** Return true iff we need to notify the base's main thread about changes to
michael@0 66 * its state, because it's currently running the main loop in another
michael@0 67 * thread. Requires lock. */
michael@0 68 #define EVBASE_NEED_NOTIFY(base) \
michael@0 69 (_evthread_id_fn != NULL && \
michael@0 70 (base)->running_loop && \
michael@0 71 (base)->th_owner_id != _evthread_id_fn())
michael@0 72
michael@0 73 /** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to
michael@0 74 NULL if locking is not enabled. */
michael@0 75 #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
michael@0 76 ((lockvar) = _evthread_lock_fns.alloc ? \
michael@0 77 _evthread_lock_fns.alloc(locktype) : NULL)
michael@0 78
michael@0 79 /** Free a given lock, if it is present and locking is enabled. */
michael@0 80 #define EVTHREAD_FREE_LOCK(lockvar, locktype) \
michael@0 81 do { \
michael@0 82 void *_lock_tmp_ = (lockvar); \
michael@0 83 if (_lock_tmp_ && _evthread_lock_fns.free) \
michael@0 84 _evthread_lock_fns.free(_lock_tmp_, (locktype)); \
michael@0 85 } while (0)
michael@0 86
michael@0 87 /** Acquire a lock. */
michael@0 88 #define EVLOCK_LOCK(lockvar,mode) \
michael@0 89 do { \
michael@0 90 if (lockvar) \
michael@0 91 _evthread_lock_fns.lock(mode, lockvar); \
michael@0 92 } while (0)
michael@0 93
michael@0 94 /** Release a lock */
michael@0 95 #define EVLOCK_UNLOCK(lockvar,mode) \
michael@0 96 do { \
michael@0 97 if (lockvar) \
michael@0 98 _evthread_lock_fns.unlock(mode, lockvar); \
michael@0 99 } while (0)
michael@0 100
michael@0 101 /** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
michael@0 102 #define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \
michael@0 103 do { \
michael@0 104 if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
michael@0 105 void *tmp = lockvar1; \
michael@0 106 lockvar1 = lockvar2; \
michael@0 107 lockvar2 = tmp; \
michael@0 108 } \
michael@0 109 } while (0)
michael@0 110
michael@0 111 /** Lock an event_base, if it is set up for locking. Acquires the lock
michael@0 112 in the base structure whose field is named 'lockvar'. */
michael@0 113 #define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
michael@0 114 EVLOCK_LOCK((base)->lockvar, 0); \
michael@0 115 } while (0)
michael@0 116
michael@0 117 /** Unlock an event_base, if it is set up for locking. */
michael@0 118 #define EVBASE_RELEASE_LOCK(base, lockvar) do { \
michael@0 119 EVLOCK_UNLOCK((base)->lockvar, 0); \
michael@0 120 } while (0)
michael@0 121
michael@0 122 /** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
michael@0 123 * locked and held by us. */
michael@0 124 #define EVLOCK_ASSERT_LOCKED(lock) \
michael@0 125 do { \
michael@0 126 if ((lock) && _evthread_lock_debugging_enabled) { \
michael@0 127 EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \
michael@0 128 } \
michael@0 129 } while (0)
michael@0 130
michael@0 131 /** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
michael@0 132 * manage to get it. */
michael@0 133 static inline int EVLOCK_TRY_LOCK(void *lock);
michael@0 134 static inline int
michael@0 135 EVLOCK_TRY_LOCK(void *lock)
michael@0 136 {
michael@0 137 if (lock && _evthread_lock_fns.lock) {
michael@0 138 int r = _evthread_lock_fns.lock(EVTHREAD_TRY, lock);
michael@0 139 return !r;
michael@0 140 } else {
michael@0 141 /* Locking is disabled either globally or for this thing;
michael@0 142 * of course we count as having the lock. */
michael@0 143 return 1;
michael@0 144 }
michael@0 145 }
michael@0 146
michael@0 147 /** Allocate a new condition variable and store it in the void *, condvar */
michael@0 148 #define EVTHREAD_ALLOC_COND(condvar) \
michael@0 149 do { \
michael@0 150 (condvar) = _evthread_cond_fns.alloc_condition ? \
michael@0 151 _evthread_cond_fns.alloc_condition(0) : NULL; \
michael@0 152 } while (0)
michael@0 153 /** Deallocate and free a condition variable in condvar */
michael@0 154 #define EVTHREAD_FREE_COND(cond) \
michael@0 155 do { \
michael@0 156 if (cond) \
michael@0 157 _evthread_cond_fns.free_condition((cond)); \
michael@0 158 } while (0)
michael@0 159 /** Signal one thread waiting on cond */
michael@0 160 #define EVTHREAD_COND_SIGNAL(cond) \
michael@0 161 ( (cond) ? _evthread_cond_fns.signal_condition((cond), 0) : 0 )
michael@0 162 /** Signal all threads waiting on cond */
michael@0 163 #define EVTHREAD_COND_BROADCAST(cond) \
michael@0 164 ( (cond) ? _evthread_cond_fns.signal_condition((cond), 1) : 0 )
michael@0 165 /** Wait until the condition 'cond' is signalled. Must be called while
michael@0 166 * holding 'lock'. The lock will be released until the condition is
michael@0 167 * signalled, at which point it will be acquired again. Returns 0 for
michael@0 168 * success, -1 for failure. */
michael@0 169 #define EVTHREAD_COND_WAIT(cond, lock) \
michael@0 170 ( (cond) ? _evthread_cond_fns.wait_condition((cond), (lock), NULL) : 0 )
michael@0 171 /** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
michael@0 172 * on timeout. */
michael@0 173 #define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
michael@0 174 ( (cond) ? _evthread_cond_fns.wait_condition((cond), (lock), (tv)) : 0 )
michael@0 175
michael@0 176 /** True iff locking functions have been configured. */
michael@0 177 #define EVTHREAD_LOCKING_ENABLED() \
michael@0 178 (_evthread_lock_fns.lock != NULL)
michael@0 179
michael@0 180 #elif ! defined(_EVENT_DISABLE_THREAD_SUPPORT)
michael@0 181
michael@0 182 unsigned long _evthreadimpl_get_id(void);
michael@0 183 int _evthreadimpl_is_lock_debugging_enabled(void);
michael@0 184 void *_evthreadimpl_lock_alloc(unsigned locktype);
michael@0 185 void _evthreadimpl_lock_free(void *lock, unsigned locktype);
michael@0 186 int _evthreadimpl_lock_lock(unsigned mode, void *lock);
michael@0 187 int _evthreadimpl_lock_unlock(unsigned mode, void *lock);
michael@0 188 void *_evthreadimpl_cond_alloc(unsigned condtype);
michael@0 189 void _evthreadimpl_cond_free(void *cond);
michael@0 190 int _evthreadimpl_cond_signal(void *cond, int broadcast);
michael@0 191 int _evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv);
michael@0 192 int _evthreadimpl_locking_enabled(void);
michael@0 193
michael@0 194 #define EVTHREAD_GET_ID() _evthreadimpl_get_id()
michael@0 195 #define EVBASE_IN_THREAD(base) \
michael@0 196 ((base)->th_owner_id == _evthreadimpl_get_id())
michael@0 197 #define EVBASE_NEED_NOTIFY(base) \
michael@0 198 ((base)->running_loop && \
michael@0 199 ((base)->th_owner_id != _evthreadimpl_get_id()))
michael@0 200
michael@0 201 #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
michael@0 202 ((lockvar) = _evthreadimpl_lock_alloc(locktype))
michael@0 203
michael@0 204 #define EVTHREAD_FREE_LOCK(lockvar, locktype) \
michael@0 205 do { \
michael@0 206 void *_lock_tmp_ = (lockvar); \
michael@0 207 if (_lock_tmp_) \
michael@0 208 _evthreadimpl_lock_free(_lock_tmp_, (locktype)); \
michael@0 209 } while (0)
michael@0 210
michael@0 211 /** Acquire a lock. */
michael@0 212 #define EVLOCK_LOCK(lockvar,mode) \
michael@0 213 do { \
michael@0 214 if (lockvar) \
michael@0 215 _evthreadimpl_lock_lock(mode, lockvar); \
michael@0 216 } while (0)
michael@0 217
michael@0 218 /** Release a lock */
michael@0 219 #define EVLOCK_UNLOCK(lockvar,mode) \
michael@0 220 do { \
michael@0 221 if (lockvar) \
michael@0 222 _evthreadimpl_lock_unlock(mode, lockvar); \
michael@0 223 } while (0)
michael@0 224
michael@0 225 /** Lock an event_base, if it is set up for locking. Acquires the lock
michael@0 226 in the base structure whose field is named 'lockvar'. */
michael@0 227 #define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
michael@0 228 EVLOCK_LOCK((base)->lockvar, 0); \
michael@0 229 } while (0)
michael@0 230
michael@0 231 /** Unlock an event_base, if it is set up for locking. */
michael@0 232 #define EVBASE_RELEASE_LOCK(base, lockvar) do { \
michael@0 233 EVLOCK_UNLOCK((base)->lockvar, 0); \
michael@0 234 } while (0)
michael@0 235
michael@0 236 /** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
michael@0 237 * locked and held by us. */
michael@0 238 #define EVLOCK_ASSERT_LOCKED(lock) \
michael@0 239 do { \
michael@0 240 if ((lock) && _evthreadimpl_is_lock_debugging_enabled()) { \
michael@0 241 EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \
michael@0 242 } \
michael@0 243 } while (0)
michael@0 244
michael@0 245 /** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
michael@0 246 * manage to get it. */
michael@0 247 static inline int EVLOCK_TRY_LOCK(void *lock);
michael@0 248 static inline int
michael@0 249 EVLOCK_TRY_LOCK(void *lock)
michael@0 250 {
michael@0 251 if (lock) {
michael@0 252 int r = _evthreadimpl_lock_lock(EVTHREAD_TRY, lock);
michael@0 253 return !r;
michael@0 254 } else {
michael@0 255 /* Locking is disabled either globally or for this thing;
michael@0 256 * of course we count as having the lock. */
michael@0 257 return 1;
michael@0 258 }
michael@0 259 }
michael@0 260
michael@0 261 /** Allocate a new condition variable and store it in the void *, condvar */
michael@0 262 #define EVTHREAD_ALLOC_COND(condvar) \
michael@0 263 do { \
michael@0 264 (condvar) = _evthreadimpl_cond_alloc(0); \
michael@0 265 } while (0)
michael@0 266 /** Deallocate and free a condition variable in condvar */
michael@0 267 #define EVTHREAD_FREE_COND(cond) \
michael@0 268 do { \
michael@0 269 if (cond) \
michael@0 270 _evthreadimpl_cond_free((cond)); \
michael@0 271 } while (0)
michael@0 272 /** Signal one thread waiting on cond */
michael@0 273 #define EVTHREAD_COND_SIGNAL(cond) \
michael@0 274 ( (cond) ? _evthreadimpl_cond_signal((cond), 0) : 0 )
michael@0 275 /** Signal all threads waiting on cond */
michael@0 276 #define EVTHREAD_COND_BROADCAST(cond) \
michael@0 277 ( (cond) ? _evthreadimpl_cond_signal((cond), 1) : 0 )
michael@0 278 /** Wait until the condition 'cond' is signalled. Must be called while
michael@0 279 * holding 'lock'. The lock will be released until the condition is
michael@0 280 * signalled, at which point it will be acquired again. Returns 0 for
michael@0 281 * success, -1 for failure. */
michael@0 282 #define EVTHREAD_COND_WAIT(cond, lock) \
michael@0 283 ( (cond) ? _evthreadimpl_cond_wait((cond), (lock), NULL) : 0 )
michael@0 284 /** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
michael@0 285 * on timeout. */
michael@0 286 #define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
michael@0 287 ( (cond) ? _evthreadimpl_cond_wait((cond), (lock), (tv)) : 0 )
michael@0 288
michael@0 289 #define EVTHREAD_LOCKING_ENABLED() \
michael@0 290 (_evthreadimpl_locking_enabled())
michael@0 291
michael@0 292 #else /* _EVENT_DISABLE_THREAD_SUPPORT */
michael@0 293
michael@0 294 #define EVTHREAD_GET_ID() 1
michael@0 295 #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) _EVUTIL_NIL_STMT
michael@0 296 #define EVTHREAD_FREE_LOCK(lockvar, locktype) _EVUTIL_NIL_STMT
michael@0 297
michael@0 298 #define EVLOCK_LOCK(lockvar, mode) _EVUTIL_NIL_STMT
michael@0 299 #define EVLOCK_UNLOCK(lockvar, mode) _EVUTIL_NIL_STMT
michael@0 300 #define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) _EVUTIL_NIL_STMT
michael@0 301 #define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) _EVUTIL_NIL_STMT
michael@0 302
michael@0 303 #define EVBASE_IN_THREAD(base) 1
michael@0 304 #define EVBASE_NEED_NOTIFY(base) 0
michael@0 305 #define EVBASE_ACQUIRE_LOCK(base, lock) _EVUTIL_NIL_STMT
michael@0 306 #define EVBASE_RELEASE_LOCK(base, lock) _EVUTIL_NIL_STMT
michael@0 307 #define EVLOCK_ASSERT_LOCKED(lock) _EVUTIL_NIL_STMT
michael@0 308
michael@0 309 #define EVLOCK_TRY_LOCK(lock) 1
michael@0 310
michael@0 311 #define EVTHREAD_ALLOC_COND(condvar) _EVUTIL_NIL_STMT
michael@0 312 #define EVTHREAD_FREE_COND(cond) _EVUTIL_NIL_STMT
michael@0 313 #define EVTHREAD_COND_SIGNAL(cond) _EVUTIL_NIL_STMT
michael@0 314 #define EVTHREAD_COND_BROADCAST(cond) _EVUTIL_NIL_STMT
michael@0 315 #define EVTHREAD_COND_WAIT(cond, lock) _EVUTIL_NIL_STMT
michael@0 316 #define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) _EVUTIL_NIL_STMT
michael@0 317
michael@0 318 #define EVTHREAD_LOCKING_ENABLED() 0
michael@0 319
michael@0 320 #endif
michael@0 321
michael@0 322 /* This code is shared between both lock impls */
michael@0 323 #if ! defined(_EVENT_DISABLE_THREAD_SUPPORT)
michael@0 324 /** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
michael@0 325 #define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \
michael@0 326 do { \
michael@0 327 if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
michael@0 328 void *tmp = lockvar1; \
michael@0 329 lockvar1 = lockvar2; \
michael@0 330 lockvar2 = tmp; \
michael@0 331 } \
michael@0 332 } while (0)
michael@0 333
michael@0 334 /** Acquire both lock1 and lock2. Always allocates locks in the same order,
michael@0 335 * so that two threads locking two locks with LOCK2 will not deadlock. */
michael@0 336 #define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \
michael@0 337 do { \
michael@0 338 void *_lock1_tmplock = (lock1); \
michael@0 339 void *_lock2_tmplock = (lock2); \
michael@0 340 _EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
michael@0 341 EVLOCK_LOCK(_lock1_tmplock,mode1); \
michael@0 342 if (_lock2_tmplock != _lock1_tmplock) \
michael@0 343 EVLOCK_LOCK(_lock2_tmplock,mode2); \
michael@0 344 } while (0)
michael@0 345 /** Release both lock1 and lock2. */
michael@0 346 #define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \
michael@0 347 do { \
michael@0 348 void *_lock1_tmplock = (lock1); \
michael@0 349 void *_lock2_tmplock = (lock2); \
michael@0 350 _EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
michael@0 351 if (_lock2_tmplock != _lock1_tmplock) \
michael@0 352 EVLOCK_UNLOCK(_lock2_tmplock,mode2); \
michael@0 353 EVLOCK_UNLOCK(_lock1_tmplock,mode1); \
michael@0 354 } while (0)
michael@0 355
michael@0 356 int _evthread_is_debug_lock_held(void *lock);
michael@0 357 void *_evthread_debug_get_real_lock(void *lock);
michael@0 358
michael@0 359 void *evthread_setup_global_lock_(void *lock_, unsigned locktype,
michael@0 360 int enable_locks);
michael@0 361
michael@0 362 #define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype) \
michael@0 363 do { \
michael@0 364 lockvar = evthread_setup_global_lock_(lockvar, \
michael@0 365 (locktype), enable_locks); \
michael@0 366 if (!lockvar) { \
michael@0 367 event_warn("Couldn't allocate %s", #lockvar); \
michael@0 368 return -1; \
michael@0 369 } \
michael@0 370 } while (0);
michael@0 371
michael@0 372 int event_global_setup_locks_(const int enable_locks);
michael@0 373 int evsig_global_setup_locks_(const int enable_locks);
michael@0 374 int evutil_secure_rng_global_setup_locks_(const int enable_locks);
michael@0 375
michael@0 376 #endif
michael@0 377
michael@0 378 #ifdef __cplusplus
michael@0 379 }
michael@0 380 #endif
michael@0 381
michael@0 382 #endif /* _EVTHREAD_INTERNAL_H_ */

mercurial