ipc/chromium/src/third_party/libevent/evthread.c

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /*
michael@0 2 * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
michael@0 3 *
michael@0 4 * Redistribution and use in source and binary forms, with or without
michael@0 5 * modification, are permitted provided that the following conditions
michael@0 6 * are met:
michael@0 7 * 1. Redistributions of source code must retain the above copyright
michael@0 8 * notice, this list of conditions and the following disclaimer.
michael@0 9 * 2. Redistributions in binary form must reproduce the above copyright
michael@0 10 * notice, this list of conditions and the following disclaimer in the
michael@0 11 * documentation and/or other materials provided with the distribution.
michael@0 12 * 3. The name of the author may not be used to endorse or promote products
michael@0 13 * derived from this software without specific prior written permission.
michael@0 14 *
michael@0 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
michael@0 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
michael@0 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
michael@0 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
michael@0 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
michael@0 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
michael@0 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
michael@0 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
michael@0 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
michael@0 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
michael@0 25 */
michael@0 26
michael@0 27 #include "event2/event-config.h"
michael@0 28
michael@0 29 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
michael@0 30
michael@0 31 #include "event2/thread.h"
michael@0 32
michael@0 33 #include <stdlib.h>
michael@0 34 #include <string.h>
michael@0 35
michael@0 36 #include "log-internal.h"
michael@0 37 #include "mm-internal.h"
michael@0 38 #include "util-internal.h"
michael@0 39 #include "evthread-internal.h"
michael@0 40
michael@0 41 #ifdef EVTHREAD_EXPOSE_STRUCTS
michael@0 42 #define GLOBAL
michael@0 43 #else
michael@0 44 #define GLOBAL static
michael@0 45 #endif
michael@0 46
michael@0 47 /* globals */
michael@0 48 GLOBAL int _evthread_lock_debugging_enabled = 0;
michael@0 49 GLOBAL struct evthread_lock_callbacks _evthread_lock_fns = {
michael@0 50 0, 0, NULL, NULL, NULL, NULL
michael@0 51 };
michael@0 52 GLOBAL unsigned long (*_evthread_id_fn)(void) = NULL;
michael@0 53 GLOBAL struct evthread_condition_callbacks _evthread_cond_fns = {
michael@0 54 0, NULL, NULL, NULL, NULL
michael@0 55 };
michael@0 56
michael@0 57 /* Used for debugging */
michael@0 58 static struct evthread_lock_callbacks _original_lock_fns = {
michael@0 59 0, 0, NULL, NULL, NULL, NULL
michael@0 60 };
michael@0 61 static struct evthread_condition_callbacks _original_cond_fns = {
michael@0 62 0, NULL, NULL, NULL, NULL
michael@0 63 };
michael@0 64
michael@0 65 void
michael@0 66 evthread_set_id_callback(unsigned long (*id_fn)(void))
michael@0 67 {
michael@0 68 _evthread_id_fn = id_fn;
michael@0 69 }
michael@0 70
michael@0 71 int
michael@0 72 evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
michael@0 73 {
michael@0 74 struct evthread_lock_callbacks *target =
michael@0 75 _evthread_lock_debugging_enabled
michael@0 76 ? &_original_lock_fns : &_evthread_lock_fns;
michael@0 77
michael@0 78 if (!cbs) {
michael@0 79 if (target->alloc)
michael@0 80 event_warnx("Trying to disable lock functions after "
michael@0 81 "they have been set up will probaby not work.");
michael@0 82 memset(target, 0, sizeof(_evthread_lock_fns));
michael@0 83 return 0;
michael@0 84 }
michael@0 85 if (target->alloc) {
michael@0 86 /* Uh oh; we already had locking callbacks set up.*/
michael@0 87 if (target->lock_api_version == cbs->lock_api_version &&
michael@0 88 target->supported_locktypes == cbs->supported_locktypes &&
michael@0 89 target->alloc == cbs->alloc &&
michael@0 90 target->free == cbs->free &&
michael@0 91 target->lock == cbs->lock &&
michael@0 92 target->unlock == cbs->unlock) {
michael@0 93 /* no change -- allow this. */
michael@0 94 return 0;
michael@0 95 }
michael@0 96 event_warnx("Can't change lock callbacks once they have been "
michael@0 97 "initialized.");
michael@0 98 return -1;
michael@0 99 }
michael@0 100 if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
michael@0 101 memcpy(target, cbs, sizeof(_evthread_lock_fns));
michael@0 102 return event_global_setup_locks_(1);
michael@0 103 } else {
michael@0 104 return -1;
michael@0 105 }
michael@0 106 }
michael@0 107
michael@0 108 int
michael@0 109 evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
michael@0 110 {
michael@0 111 struct evthread_condition_callbacks *target =
michael@0 112 _evthread_lock_debugging_enabled
michael@0 113 ? &_original_cond_fns : &_evthread_cond_fns;
michael@0 114
michael@0 115 if (!cbs) {
michael@0 116 if (target->alloc_condition)
michael@0 117 event_warnx("Trying to disable condition functions "
michael@0 118 "after they have been set up will probaby not "
michael@0 119 "work.");
michael@0 120 memset(target, 0, sizeof(_evthread_cond_fns));
michael@0 121 return 0;
michael@0 122 }
michael@0 123 if (target->alloc_condition) {
michael@0 124 /* Uh oh; we already had condition callbacks set up.*/
michael@0 125 if (target->condition_api_version == cbs->condition_api_version &&
michael@0 126 target->alloc_condition == cbs->alloc_condition &&
michael@0 127 target->free_condition == cbs->free_condition &&
michael@0 128 target->signal_condition == cbs->signal_condition &&
michael@0 129 target->wait_condition == cbs->wait_condition) {
michael@0 130 /* no change -- allow this. */
michael@0 131 return 0;
michael@0 132 }
michael@0 133 event_warnx("Can't change condition callbacks once they "
michael@0 134 "have been initialized.");
michael@0 135 return -1;
michael@0 136 }
michael@0 137 if (cbs->alloc_condition && cbs->free_condition &&
michael@0 138 cbs->signal_condition && cbs->wait_condition) {
michael@0 139 memcpy(target, cbs, sizeof(_evthread_cond_fns));
michael@0 140 }
michael@0 141 if (_evthread_lock_debugging_enabled) {
michael@0 142 _evthread_cond_fns.alloc_condition = cbs->alloc_condition;
michael@0 143 _evthread_cond_fns.free_condition = cbs->free_condition;
michael@0 144 _evthread_cond_fns.signal_condition = cbs->signal_condition;
michael@0 145 }
michael@0 146 return 0;
michael@0 147 }
michael@0 148
michael@0 149 struct debug_lock {
michael@0 150 unsigned locktype;
michael@0 151 unsigned long held_by;
michael@0 152 /* XXXX if we ever use read-write locks, we will need a separate
michael@0 153 * lock to protect count. */
michael@0 154 int count;
michael@0 155 void *lock;
michael@0 156 };
michael@0 157
michael@0 158 static void *
michael@0 159 debug_lock_alloc(unsigned locktype)
michael@0 160 {
michael@0 161 struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
michael@0 162 if (!result)
michael@0 163 return NULL;
michael@0 164 if (_original_lock_fns.alloc) {
michael@0 165 if (!(result->lock = _original_lock_fns.alloc(
michael@0 166 locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
michael@0 167 mm_free(result);
michael@0 168 return NULL;
michael@0 169 }
michael@0 170 } else {
michael@0 171 result->lock = NULL;
michael@0 172 }
michael@0 173 result->locktype = locktype;
michael@0 174 result->count = 0;
michael@0 175 result->held_by = 0;
michael@0 176 return result;
michael@0 177 }
michael@0 178
michael@0 179 static void
michael@0 180 debug_lock_free(void *lock_, unsigned locktype)
michael@0 181 {
michael@0 182 struct debug_lock *lock = lock_;
michael@0 183 EVUTIL_ASSERT(lock->count == 0);
michael@0 184 EVUTIL_ASSERT(locktype == lock->locktype);
michael@0 185 if (_original_lock_fns.free) {
michael@0 186 _original_lock_fns.free(lock->lock,
michael@0 187 lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
michael@0 188 }
michael@0 189 lock->lock = NULL;
michael@0 190 lock->count = -100;
michael@0 191 mm_free(lock);
michael@0 192 }
michael@0 193
michael@0 194 static void
michael@0 195 evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
michael@0 196 {
michael@0 197 ++lock->count;
michael@0 198 if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
michael@0 199 EVUTIL_ASSERT(lock->count == 1);
michael@0 200 if (_evthread_id_fn) {
michael@0 201 unsigned long me;
michael@0 202 me = _evthread_id_fn();
michael@0 203 if (lock->count > 1)
michael@0 204 EVUTIL_ASSERT(lock->held_by == me);
michael@0 205 lock->held_by = me;
michael@0 206 }
michael@0 207 }
michael@0 208
michael@0 209 static int
michael@0 210 debug_lock_lock(unsigned mode, void *lock_)
michael@0 211 {
michael@0 212 struct debug_lock *lock = lock_;
michael@0 213 int res = 0;
michael@0 214 if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
michael@0 215 EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
michael@0 216 else
michael@0 217 EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
michael@0 218 if (_original_lock_fns.lock)
michael@0 219 res = _original_lock_fns.lock(mode, lock->lock);
michael@0 220 if (!res) {
michael@0 221 evthread_debug_lock_mark_locked(mode, lock);
michael@0 222 }
michael@0 223 return res;
michael@0 224 }
michael@0 225
michael@0 226 static void
michael@0 227 evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
michael@0 228 {
michael@0 229 if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
michael@0 230 EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
michael@0 231 else
michael@0 232 EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
michael@0 233 if (_evthread_id_fn) {
michael@0 234 EVUTIL_ASSERT(lock->held_by == _evthread_id_fn());
michael@0 235 if (lock->count == 1)
michael@0 236 lock->held_by = 0;
michael@0 237 }
michael@0 238 --lock->count;
michael@0 239 EVUTIL_ASSERT(lock->count >= 0);
michael@0 240 }
michael@0 241
michael@0 242 static int
michael@0 243 debug_lock_unlock(unsigned mode, void *lock_)
michael@0 244 {
michael@0 245 struct debug_lock *lock = lock_;
michael@0 246 int res = 0;
michael@0 247 evthread_debug_lock_mark_unlocked(mode, lock);
michael@0 248 if (_original_lock_fns.unlock)
michael@0 249 res = _original_lock_fns.unlock(mode, lock->lock);
michael@0 250 return res;
michael@0 251 }
michael@0 252
michael@0 253 static int
michael@0 254 debug_cond_wait(void *_cond, void *_lock, const struct timeval *tv)
michael@0 255 {
michael@0 256 int r;
michael@0 257 struct debug_lock *lock = _lock;
michael@0 258 EVUTIL_ASSERT(lock);
michael@0 259 EVLOCK_ASSERT_LOCKED(_lock);
michael@0 260 evthread_debug_lock_mark_unlocked(0, lock);
michael@0 261 r = _original_cond_fns.wait_condition(_cond, lock->lock, tv);
michael@0 262 evthread_debug_lock_mark_locked(0, lock);
michael@0 263 return r;
michael@0 264 }
michael@0 265
michael@0 266 void
michael@0 267 evthread_enable_lock_debuging(void)
michael@0 268 {
michael@0 269 struct evthread_lock_callbacks cbs = {
michael@0 270 EVTHREAD_LOCK_API_VERSION,
michael@0 271 EVTHREAD_LOCKTYPE_RECURSIVE,
michael@0 272 debug_lock_alloc,
michael@0 273 debug_lock_free,
michael@0 274 debug_lock_lock,
michael@0 275 debug_lock_unlock
michael@0 276 };
michael@0 277 if (_evthread_lock_debugging_enabled)
michael@0 278 return;
michael@0 279 memcpy(&_original_lock_fns, &_evthread_lock_fns,
michael@0 280 sizeof(struct evthread_lock_callbacks));
michael@0 281 memcpy(&_evthread_lock_fns, &cbs,
michael@0 282 sizeof(struct evthread_lock_callbacks));
michael@0 283
michael@0 284 memcpy(&_original_cond_fns, &_evthread_cond_fns,
michael@0 285 sizeof(struct evthread_condition_callbacks));
michael@0 286 _evthread_cond_fns.wait_condition = debug_cond_wait;
michael@0 287 _evthread_lock_debugging_enabled = 1;
michael@0 288
michael@0 289 /* XXX return value should get checked. */
michael@0 290 event_global_setup_locks_(0);
michael@0 291 }
michael@0 292
michael@0 293 int
michael@0 294 _evthread_is_debug_lock_held(void *lock_)
michael@0 295 {
michael@0 296 struct debug_lock *lock = lock_;
michael@0 297 if (! lock->count)
michael@0 298 return 0;
michael@0 299 if (_evthread_id_fn) {
michael@0 300 unsigned long me = _evthread_id_fn();
michael@0 301 if (lock->held_by != me)
michael@0 302 return 0;
michael@0 303 }
michael@0 304 return 1;
michael@0 305 }
michael@0 306
michael@0 307 void *
michael@0 308 _evthread_debug_get_real_lock(void *lock_)
michael@0 309 {
michael@0 310 struct debug_lock *lock = lock_;
michael@0 311 return lock->lock;
michael@0 312 }
michael@0 313
michael@0 314 void *
michael@0 315 evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
michael@0 316 {
michael@0 317 /* there are four cases here:
michael@0 318 1) we're turning on debugging; locking is not on.
michael@0 319 2) we're turning on debugging; locking is on.
michael@0 320 3) we're turning on locking; debugging is not on.
michael@0 321 4) we're turning on locking; debugging is on. */
michael@0 322
michael@0 323 if (!enable_locks && _original_lock_fns.alloc == NULL) {
michael@0 324 /* Case 1: allocate a debug lock. */
michael@0 325 EVUTIL_ASSERT(lock_ == NULL);
michael@0 326 return debug_lock_alloc(locktype);
michael@0 327 } else if (!enable_locks && _original_lock_fns.alloc != NULL) {
michael@0 328 /* Case 2: wrap the lock in a debug lock. */
michael@0 329 struct debug_lock *lock;
michael@0 330 EVUTIL_ASSERT(lock_ != NULL);
michael@0 331
michael@0 332 if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
michael@0 333 /* We can't wrap it: We need a recursive lock */
michael@0 334 _original_lock_fns.free(lock_, locktype);
michael@0 335 return debug_lock_alloc(locktype);
michael@0 336 }
michael@0 337 lock = mm_malloc(sizeof(struct debug_lock));
michael@0 338 if (!lock) {
michael@0 339 _original_lock_fns.free(lock_, locktype);
michael@0 340 return NULL;
michael@0 341 }
michael@0 342 lock->lock = lock_;
michael@0 343 lock->locktype = locktype;
michael@0 344 lock->count = 0;
michael@0 345 lock->held_by = 0;
michael@0 346 return lock;
michael@0 347 } else if (enable_locks && ! _evthread_lock_debugging_enabled) {
michael@0 348 /* Case 3: allocate a regular lock */
michael@0 349 EVUTIL_ASSERT(lock_ == NULL);
michael@0 350 return _evthread_lock_fns.alloc(locktype);
michael@0 351 } else {
michael@0 352 /* Case 4: Fill in a debug lock with a real lock */
michael@0 353 struct debug_lock *lock = lock_;
michael@0 354 EVUTIL_ASSERT(enable_locks &&
michael@0 355 _evthread_lock_debugging_enabled);
michael@0 356 EVUTIL_ASSERT(lock->locktype == locktype);
michael@0 357 EVUTIL_ASSERT(lock->lock == NULL);
michael@0 358 lock->lock = _original_lock_fns.alloc(
michael@0 359 locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
michael@0 360 if (!lock->lock) {
michael@0 361 lock->count = -200;
michael@0 362 mm_free(lock);
michael@0 363 return NULL;
michael@0 364 }
michael@0 365 return lock;
michael@0 366 }
michael@0 367 }
michael@0 368
michael@0 369
michael@0 370 #ifndef EVTHREAD_EXPOSE_STRUCTS
michael@0 371 unsigned long
michael@0 372 _evthreadimpl_get_id()
michael@0 373 {
michael@0 374 return _evthread_id_fn ? _evthread_id_fn() : 1;
michael@0 375 }
michael@0 376 void *
michael@0 377 _evthreadimpl_lock_alloc(unsigned locktype)
michael@0 378 {
michael@0 379 return _evthread_lock_fns.alloc ?
michael@0 380 _evthread_lock_fns.alloc(locktype) : NULL;
michael@0 381 }
michael@0 382 void
michael@0 383 _evthreadimpl_lock_free(void *lock, unsigned locktype)
michael@0 384 {
michael@0 385 if (_evthread_lock_fns.free)
michael@0 386 _evthread_lock_fns.free(lock, locktype);
michael@0 387 }
michael@0 388 int
michael@0 389 _evthreadimpl_lock_lock(unsigned mode, void *lock)
michael@0 390 {
michael@0 391 if (_evthread_lock_fns.lock)
michael@0 392 return _evthread_lock_fns.lock(mode, lock);
michael@0 393 else
michael@0 394 return 0;
michael@0 395 }
michael@0 396 int
michael@0 397 _evthreadimpl_lock_unlock(unsigned mode, void *lock)
michael@0 398 {
michael@0 399 if (_evthread_lock_fns.unlock)
michael@0 400 return _evthread_lock_fns.unlock(mode, lock);
michael@0 401 else
michael@0 402 return 0;
michael@0 403 }
michael@0 404 void *
michael@0 405 _evthreadimpl_cond_alloc(unsigned condtype)
michael@0 406 {
michael@0 407 return _evthread_cond_fns.alloc_condition ?
michael@0 408 _evthread_cond_fns.alloc_condition(condtype) : NULL;
michael@0 409 }
michael@0 410 void
michael@0 411 _evthreadimpl_cond_free(void *cond)
michael@0 412 {
michael@0 413 if (_evthread_cond_fns.free_condition)
michael@0 414 _evthread_cond_fns.free_condition(cond);
michael@0 415 }
michael@0 416 int
michael@0 417 _evthreadimpl_cond_signal(void *cond, int broadcast)
michael@0 418 {
michael@0 419 if (_evthread_cond_fns.signal_condition)
michael@0 420 return _evthread_cond_fns.signal_condition(cond, broadcast);
michael@0 421 else
michael@0 422 return 0;
michael@0 423 }
michael@0 424 int
michael@0 425 _evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv)
michael@0 426 {
michael@0 427 if (_evthread_cond_fns.wait_condition)
michael@0 428 return _evthread_cond_fns.wait_condition(cond, lock, tv);
michael@0 429 else
michael@0 430 return 0;
michael@0 431 }
michael@0 432 int
michael@0 433 _evthreadimpl_is_lock_debugging_enabled(void)
michael@0 434 {
michael@0 435 return _evthread_lock_debugging_enabled;
michael@0 436 }
michael@0 437
michael@0 438 int
michael@0 439 _evthreadimpl_locking_enabled(void)
michael@0 440 {
michael@0 441 return _evthread_lock_fns.lock != NULL;
michael@0 442 }
michael@0 443 #endif
michael@0 444
michael@0 445 #endif

mercurial