ipc/chromium/src/third_party/libevent/event.c

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /*
michael@0 2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
michael@0 3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
michael@0 4 *
michael@0 5 * Redistribution and use in source and binary forms, with or without
michael@0 6 * modification, are permitted provided that the following conditions
michael@0 7 * are met:
michael@0 8 * 1. Redistributions of source code must retain the above copyright
michael@0 9 * notice, this list of conditions and the following disclaimer.
michael@0 10 * 2. Redistributions in binary form must reproduce the above copyright
michael@0 11 * notice, this list of conditions and the following disclaimer in the
michael@0 12 * documentation and/or other materials provided with the distribution.
michael@0 13 * 3. The name of the author may not be used to endorse or promote products
michael@0 14 * derived from this software without specific prior written permission.
michael@0 15 *
michael@0 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
michael@0 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
michael@0 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
michael@0 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
michael@0 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
michael@0 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
michael@0 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
michael@0 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
michael@0 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
michael@0 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
michael@0 26 */
michael@0 27 #include "event2/event-config.h"
michael@0 28
michael@0 29 #ifdef WIN32
michael@0 30 #include <winsock2.h>
michael@0 31 #define WIN32_LEAN_AND_MEAN
michael@0 32 #include <windows.h>
michael@0 33 #undef WIN32_LEAN_AND_MEAN
michael@0 34 #endif
michael@0 35 #include <sys/types.h>
michael@0 36 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
michael@0 37 #include <sys/time.h>
michael@0 38 #endif
michael@0 39 #include <sys/queue.h>
michael@0 40 #ifdef _EVENT_HAVE_SYS_SOCKET_H
michael@0 41 #include <sys/socket.h>
michael@0 42 #endif
michael@0 43 #include <stdio.h>
michael@0 44 #include <stdlib.h>
michael@0 45 #ifdef _EVENT_HAVE_UNISTD_H
michael@0 46 #include <unistd.h>
michael@0 47 #endif
michael@0 48 #ifdef _EVENT_HAVE_SYS_EVENTFD_H
michael@0 49 #include <sys/eventfd.h>
michael@0 50 #endif
michael@0 51 #include <ctype.h>
michael@0 52 #include <errno.h>
michael@0 53 #include <signal.h>
michael@0 54 #include <string.h>
michael@0 55 #include <time.h>
michael@0 56
michael@0 57 #include "event2/event.h"
michael@0 58 #include "event2/event_struct.h"
michael@0 59 #include "event2/event_compat.h"
michael@0 60 #include "event-internal.h"
michael@0 61 #include "defer-internal.h"
michael@0 62 #include "evthread-internal.h"
michael@0 63 #include "event2/thread.h"
michael@0 64 #include "event2/util.h"
michael@0 65 #include "log-internal.h"
michael@0 66 #include "evmap-internal.h"
michael@0 67 #include "iocp-internal.h"
michael@0 68 #include "changelist-internal.h"
michael@0 69 #include "ht-internal.h"
michael@0 70 #include "util-internal.h"
michael@0 71
michael@0 72 #ifdef _EVENT_HAVE_EVENT_PORTS
michael@0 73 extern const struct eventop evportops;
michael@0 74 #endif
michael@0 75 #ifdef _EVENT_HAVE_SELECT
michael@0 76 extern const struct eventop selectops;
michael@0 77 #endif
michael@0 78 #ifdef _EVENT_HAVE_POLL
michael@0 79 extern const struct eventop pollops;
michael@0 80 #endif
michael@0 81 #ifdef _EVENT_HAVE_EPOLL
michael@0 82 extern const struct eventop epollops;
michael@0 83 #endif
michael@0 84 #ifdef _EVENT_HAVE_WORKING_KQUEUE
michael@0 85 extern const struct eventop kqops;
michael@0 86 #endif
michael@0 87 #ifdef _EVENT_HAVE_DEVPOLL
michael@0 88 extern const struct eventop devpollops;
michael@0 89 #endif
michael@0 90 #ifdef WIN32
michael@0 91 extern const struct eventop win32ops;
michael@0 92 #endif
michael@0 93
michael@0 94 /* Array of backends in order of preference. */
michael@0 95 static const struct eventop *eventops[] = {
michael@0 96 #ifdef _EVENT_HAVE_EVENT_PORTS
michael@0 97 &evportops,
michael@0 98 #endif
michael@0 99 #ifdef _EVENT_HAVE_WORKING_KQUEUE
michael@0 100 &kqops,
michael@0 101 #endif
michael@0 102 #ifdef _EVENT_HAVE_EPOLL
michael@0 103 &epollops,
michael@0 104 #endif
michael@0 105 #ifdef _EVENT_HAVE_DEVPOLL
michael@0 106 &devpollops,
michael@0 107 #endif
michael@0 108 #ifdef _EVENT_HAVE_POLL
michael@0 109 &pollops,
michael@0 110 #endif
michael@0 111 #ifdef _EVENT_HAVE_SELECT
michael@0 112 &selectops,
michael@0 113 #endif
michael@0 114 #ifdef WIN32
michael@0 115 &win32ops,
michael@0 116 #endif
michael@0 117 NULL
michael@0 118 };
michael@0 119
michael@0 120 /* Global state; deprecated */
michael@0 121 struct event_base *event_global_current_base_ = NULL;
michael@0 122 #define current_base event_global_current_base_
michael@0 123
michael@0 124 /* Global state */
michael@0 125
michael@0 126 static int use_monotonic;
michael@0 127
michael@0 128 /* Prototypes */
michael@0 129 static inline int event_add_internal(struct event *ev,
michael@0 130 const struct timeval *tv, int tv_is_absolute);
michael@0 131 static inline int event_del_internal(struct event *ev);
michael@0 132
michael@0 133 static void event_queue_insert(struct event_base *, struct event *, int);
michael@0 134 static void event_queue_remove(struct event_base *, struct event *, int);
michael@0 135 static int event_haveevents(struct event_base *);
michael@0 136
michael@0 137 static int event_process_active(struct event_base *);
michael@0 138
michael@0 139 static int timeout_next(struct event_base *, struct timeval **);
michael@0 140 static void timeout_process(struct event_base *);
michael@0 141 static void timeout_correct(struct event_base *, struct timeval *);
michael@0 142
michael@0 143 static inline void event_signal_closure(struct event_base *, struct event *ev);
michael@0 144 static inline void event_persist_closure(struct event_base *, struct event *ev);
michael@0 145
michael@0 146 static int evthread_notify_base(struct event_base *base);
michael@0 147
michael@0 148 #ifndef _EVENT_DISABLE_DEBUG_MODE
michael@0 149 /* These functions implement a hashtable of which 'struct event *' structures
michael@0 150 * have been setup or added. We don't want to trust the content of the struct
michael@0 151 * event itself, since we're trying to work through cases where an event gets
michael@0 152 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
michael@0 153 */
michael@0 154
michael@0 155 struct event_debug_entry {
michael@0 156 HT_ENTRY(event_debug_entry) node;
michael@0 157 const struct event *ptr;
michael@0 158 unsigned added : 1;
michael@0 159 };
michael@0 160
michael@0 161 static inline unsigned
michael@0 162 hash_debug_entry(const struct event_debug_entry *e)
michael@0 163 {
michael@0 164 /* We need to do this silliness to convince compilers that we
michael@0 165 * honestly mean to cast e->ptr to an integer, and discard any
michael@0 166 * part of it that doesn't fit in an unsigned.
michael@0 167 */
michael@0 168 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
michael@0 169 /* Our hashtable implementation is pretty sensitive to low bits,
michael@0 170 * and every struct event is over 64 bytes in size, so we can
michael@0 171 * just say >>6. */
michael@0 172 return (u >> 6);
michael@0 173 }
michael@0 174
michael@0 175 static inline int
michael@0 176 eq_debug_entry(const struct event_debug_entry *a,
michael@0 177 const struct event_debug_entry *b)
michael@0 178 {
michael@0 179 return a->ptr == b->ptr;
michael@0 180 }
michael@0 181
michael@0 182 int _event_debug_mode_on = 0;
michael@0 183 /* Set if it's too late to enable event_debug_mode. */
michael@0 184 static int event_debug_mode_too_late = 0;
michael@0 185 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
michael@0 186 static void *_event_debug_map_lock = NULL;
michael@0 187 #endif
michael@0 188 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
michael@0 189 HT_INITIALIZER();
michael@0 190
michael@0 191 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
michael@0 192 eq_debug_entry)
michael@0 193 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
michael@0 194 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
michael@0 195
michael@0 196 /* Macro: record that ev is now setup (that is, ready for an add) */
michael@0 197 #define _event_debug_note_setup(ev) do { \
michael@0 198 if (_event_debug_mode_on) { \
michael@0 199 struct event_debug_entry *dent,find; \
michael@0 200 find.ptr = (ev); \
michael@0 201 EVLOCK_LOCK(_event_debug_map_lock, 0); \
michael@0 202 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
michael@0 203 if (dent) { \
michael@0 204 dent->added = 0; \
michael@0 205 } else { \
michael@0 206 dent = mm_malloc(sizeof(*dent)); \
michael@0 207 if (!dent) \
michael@0 208 event_err(1, \
michael@0 209 "Out of memory in debugging code"); \
michael@0 210 dent->ptr = (ev); \
michael@0 211 dent->added = 0; \
michael@0 212 HT_INSERT(event_debug_map, &global_debug_map, dent); \
michael@0 213 } \
michael@0 214 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
michael@0 215 } \
michael@0 216 event_debug_mode_too_late = 1; \
michael@0 217 } while (0)
michael@0 218 /* Macro: record that ev is no longer setup */
michael@0 219 #define _event_debug_note_teardown(ev) do { \
michael@0 220 if (_event_debug_mode_on) { \
michael@0 221 struct event_debug_entry *dent,find; \
michael@0 222 find.ptr = (ev); \
michael@0 223 EVLOCK_LOCK(_event_debug_map_lock, 0); \
michael@0 224 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
michael@0 225 if (dent) \
michael@0 226 mm_free(dent); \
michael@0 227 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
michael@0 228 } \
michael@0 229 event_debug_mode_too_late = 1; \
michael@0 230 } while (0)
michael@0 231 /* Macro: record that ev is now added */
michael@0 232 #define _event_debug_note_add(ev) do { \
michael@0 233 if (_event_debug_mode_on) { \
michael@0 234 struct event_debug_entry *dent,find; \
michael@0 235 find.ptr = (ev); \
michael@0 236 EVLOCK_LOCK(_event_debug_map_lock, 0); \
michael@0 237 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
michael@0 238 if (dent) { \
michael@0 239 dent->added = 1; \
michael@0 240 } else { \
michael@0 241 event_errx(_EVENT_ERR_ABORT, \
michael@0 242 "%s: noting an add on a non-setup event %p" \
michael@0 243 " (events: 0x%x, fd: "EV_SOCK_FMT \
michael@0 244 ", flags: 0x%x)", \
michael@0 245 __func__, (ev), (ev)->ev_events, \
michael@0 246 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
michael@0 247 } \
michael@0 248 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
michael@0 249 } \
michael@0 250 event_debug_mode_too_late = 1; \
michael@0 251 } while (0)
michael@0 252 /* Macro: record that ev is no longer added */
michael@0 253 #define _event_debug_note_del(ev) do { \
michael@0 254 if (_event_debug_mode_on) { \
michael@0 255 struct event_debug_entry *dent,find; \
michael@0 256 find.ptr = (ev); \
michael@0 257 EVLOCK_LOCK(_event_debug_map_lock, 0); \
michael@0 258 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
michael@0 259 if (dent) { \
michael@0 260 dent->added = 0; \
michael@0 261 } else { \
michael@0 262 event_errx(_EVENT_ERR_ABORT, \
michael@0 263 "%s: noting a del on a non-setup event %p" \
michael@0 264 " (events: 0x%x, fd: "EV_SOCK_FMT \
michael@0 265 ", flags: 0x%x)", \
michael@0 266 __func__, (ev), (ev)->ev_events, \
michael@0 267 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
michael@0 268 } \
michael@0 269 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
michael@0 270 } \
michael@0 271 event_debug_mode_too_late = 1; \
michael@0 272 } while (0)
michael@0 273 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
michael@0 274 #define _event_debug_assert_is_setup(ev) do { \
michael@0 275 if (_event_debug_mode_on) { \
michael@0 276 struct event_debug_entry *dent,find; \
michael@0 277 find.ptr = (ev); \
michael@0 278 EVLOCK_LOCK(_event_debug_map_lock, 0); \
michael@0 279 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
michael@0 280 if (!dent) { \
michael@0 281 event_errx(_EVENT_ERR_ABORT, \
michael@0 282 "%s called on a non-initialized event %p" \
michael@0 283 " (events: 0x%x, fd: "EV_SOCK_FMT\
michael@0 284 ", flags: 0x%x)", \
michael@0 285 __func__, (ev), (ev)->ev_events, \
michael@0 286 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
michael@0 287 } \
michael@0 288 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
michael@0 289 } \
michael@0 290 } while (0)
michael@0 291 /* Macro: assert that ev is not added (i.e., okay to tear down or set
michael@0 292 * up again) */
michael@0 293 #define _event_debug_assert_not_added(ev) do { \
michael@0 294 if (_event_debug_mode_on) { \
michael@0 295 struct event_debug_entry *dent,find; \
michael@0 296 find.ptr = (ev); \
michael@0 297 EVLOCK_LOCK(_event_debug_map_lock, 0); \
michael@0 298 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
michael@0 299 if (dent && dent->added) { \
michael@0 300 event_errx(_EVENT_ERR_ABORT, \
michael@0 301 "%s called on an already added event %p" \
michael@0 302 " (events: 0x%x, fd: "EV_SOCK_FMT", " \
michael@0 303 "flags: 0x%x)", \
michael@0 304 __func__, (ev), (ev)->ev_events, \
michael@0 305 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
michael@0 306 } \
michael@0 307 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
michael@0 308 } \
michael@0 309 } while (0)
michael@0 310 #else
michael@0 311 #define _event_debug_note_setup(ev) \
michael@0 312 ((void)0)
michael@0 313 #define _event_debug_note_teardown(ev) \
michael@0 314 ((void)0)
michael@0 315 #define _event_debug_note_add(ev) \
michael@0 316 ((void)0)
michael@0 317 #define _event_debug_note_del(ev) \
michael@0 318 ((void)0)
michael@0 319 #define _event_debug_assert_is_setup(ev) \
michael@0 320 ((void)0)
michael@0 321 #define _event_debug_assert_not_added(ev) \
michael@0 322 ((void)0)
michael@0 323 #endif
michael@0 324
michael@0 325 #define EVENT_BASE_ASSERT_LOCKED(base) \
michael@0 326 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
michael@0 327
michael@0 328 /* The first time this function is called, it sets use_monotonic to 1
michael@0 329 * if we have a clock function that supports monotonic time */
michael@0 330 static void
michael@0 331 detect_monotonic(void)
michael@0 332 {
michael@0 333 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
michael@0 334 struct timespec ts;
michael@0 335 static int use_monotonic_initialized = 0;
michael@0 336
michael@0 337 if (use_monotonic_initialized)
michael@0 338 return;
michael@0 339
michael@0 340 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
michael@0 341 use_monotonic = 1;
michael@0 342
michael@0 343 use_monotonic_initialized = 1;
michael@0 344 #endif
michael@0 345 }
michael@0 346
michael@0 347 /* How often (in seconds) do we check for changes in wall clock time relative
michael@0 348 * to monotonic time? Set this to -1 for 'never.' */
michael@0 349 #define CLOCK_SYNC_INTERVAL -1
michael@0 350
michael@0 351 /** Set 'tp' to the current time according to 'base'. We must hold the lock
michael@0 352 * on 'base'. If there is a cached time, return it. Otherwise, use
michael@0 353 * clock_gettime or gettimeofday as appropriate to find out the right time.
michael@0 354 * Return 0 on success, -1 on failure.
michael@0 355 */
michael@0 356 static int
michael@0 357 gettime(struct event_base *base, struct timeval *tp)
michael@0 358 {
michael@0 359 EVENT_BASE_ASSERT_LOCKED(base);
michael@0 360
michael@0 361 if (base->tv_cache.tv_sec) {
michael@0 362 *tp = base->tv_cache;
michael@0 363 return (0);
michael@0 364 }
michael@0 365
michael@0 366 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
michael@0 367 if (use_monotonic) {
michael@0 368 struct timespec ts;
michael@0 369
michael@0 370 if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
michael@0 371 return (-1);
michael@0 372
michael@0 373 tp->tv_sec = ts.tv_sec;
michael@0 374 tp->tv_usec = ts.tv_nsec / 1000;
michael@0 375 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
michael@0 376 < ts.tv_sec) {
michael@0 377 struct timeval tv;
michael@0 378 evutil_gettimeofday(&tv,NULL);
michael@0 379 evutil_timersub(&tv, tp, &base->tv_clock_diff);
michael@0 380 base->last_updated_clock_diff = ts.tv_sec;
michael@0 381 }
michael@0 382
michael@0 383 return (0);
michael@0 384 }
michael@0 385 #endif
michael@0 386
michael@0 387 return (evutil_gettimeofday(tp, NULL));
michael@0 388 }
michael@0 389
michael@0 390 int
michael@0 391 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
michael@0 392 {
michael@0 393 int r;
michael@0 394 if (!base) {
michael@0 395 base = current_base;
michael@0 396 if (!current_base)
michael@0 397 return evutil_gettimeofday(tv, NULL);
michael@0 398 }
michael@0 399
michael@0 400 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
michael@0 401 if (base->tv_cache.tv_sec == 0) {
michael@0 402 r = evutil_gettimeofday(tv, NULL);
michael@0 403 } else {
michael@0 404 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
michael@0 405 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
michael@0 406 #else
michael@0 407 *tv = base->tv_cache;
michael@0 408 #endif
michael@0 409 r = 0;
michael@0 410 }
michael@0 411 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 412 return r;
michael@0 413 }
michael@0 414
michael@0 415 /** Make 'base' have no current cached time. */
michael@0 416 static inline void
michael@0 417 clear_time_cache(struct event_base *base)
michael@0 418 {
michael@0 419 base->tv_cache.tv_sec = 0;
michael@0 420 }
michael@0 421
michael@0 422 /** Replace the cached time in 'base' with the current time. */
michael@0 423 static inline void
michael@0 424 update_time_cache(struct event_base *base)
michael@0 425 {
michael@0 426 base->tv_cache.tv_sec = 0;
michael@0 427 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
michael@0 428 gettime(base, &base->tv_cache);
michael@0 429 }
michael@0 430
michael@0 431 struct event_base *
michael@0 432 event_init(void)
michael@0 433 {
michael@0 434 struct event_base *base = event_base_new_with_config(NULL);
michael@0 435
michael@0 436 if (base == NULL) {
michael@0 437 event_errx(1, "%s: Unable to construct event_base", __func__);
michael@0 438 return NULL;
michael@0 439 }
michael@0 440
michael@0 441 current_base = base;
michael@0 442
michael@0 443 return (base);
michael@0 444 }
michael@0 445
michael@0 446 struct event_base *
michael@0 447 event_base_new(void)
michael@0 448 {
michael@0 449 struct event_base *base = NULL;
michael@0 450 struct event_config *cfg = event_config_new();
michael@0 451 if (cfg) {
michael@0 452 base = event_base_new_with_config(cfg);
michael@0 453 event_config_free(cfg);
michael@0 454 }
michael@0 455 return base;
michael@0 456 }
michael@0 457
michael@0 458 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
michael@0 459 * avoid. */
michael@0 460 static int
michael@0 461 event_config_is_avoided_method(const struct event_config *cfg,
michael@0 462 const char *method)
michael@0 463 {
michael@0 464 struct event_config_entry *entry;
michael@0 465
michael@0 466 TAILQ_FOREACH(entry, &cfg->entries, next) {
michael@0 467 if (entry->avoid_method != NULL &&
michael@0 468 strcmp(entry->avoid_method, method) == 0)
michael@0 469 return (1);
michael@0 470 }
michael@0 471
michael@0 472 return (0);
michael@0 473 }
michael@0 474
michael@0 475 /** Return true iff 'method' is disabled according to the environment. */
michael@0 476 static int
michael@0 477 event_is_method_disabled(const char *name)
michael@0 478 {
michael@0 479 char environment[64];
michael@0 480 int i;
michael@0 481
michael@0 482 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
michael@0 483 for (i = 8; environment[i] != '\0'; ++i)
michael@0 484 environment[i] = EVUTIL_TOUPPER(environment[i]);
michael@0 485 /* Note that evutil_getenv() ignores the environment entirely if
michael@0 486 * we're setuid */
michael@0 487 return (evutil_getenv(environment) != NULL);
michael@0 488 }
michael@0 489
michael@0 490 int
michael@0 491 event_base_get_features(const struct event_base *base)
michael@0 492 {
michael@0 493 return base->evsel->features;
michael@0 494 }
michael@0 495
michael@0 496 void
michael@0 497 event_deferred_cb_queue_init(struct deferred_cb_queue *cb)
michael@0 498 {
michael@0 499 memset(cb, 0, sizeof(struct deferred_cb_queue));
michael@0 500 TAILQ_INIT(&cb->deferred_cb_list);
michael@0 501 }
michael@0 502
michael@0 503 /** Helper for the deferred_cb queue: wake up the event base. */
michael@0 504 static void
michael@0 505 notify_base_cbq_callback(struct deferred_cb_queue *cb, void *baseptr)
michael@0 506 {
michael@0 507 struct event_base *base = baseptr;
michael@0 508 if (EVBASE_NEED_NOTIFY(base))
michael@0 509 evthread_notify_base(base);
michael@0 510 }
michael@0 511
michael@0 512 struct deferred_cb_queue *
michael@0 513 event_base_get_deferred_cb_queue(struct event_base *base)
michael@0 514 {
michael@0 515 return base ? &base->defer_queue : NULL;
michael@0 516 }
michael@0 517
michael@0 518 void
michael@0 519 event_enable_debug_mode(void)
michael@0 520 {
michael@0 521 #ifndef _EVENT_DISABLE_DEBUG_MODE
michael@0 522 if (_event_debug_mode_on)
michael@0 523 event_errx(1, "%s was called twice!", __func__);
michael@0 524 if (event_debug_mode_too_late)
michael@0 525 event_errx(1, "%s must be called *before* creating any events "
michael@0 526 "or event_bases",__func__);
michael@0 527
michael@0 528 _event_debug_mode_on = 1;
michael@0 529
michael@0 530 HT_INIT(event_debug_map, &global_debug_map);
michael@0 531 #endif
michael@0 532 }
michael@0 533
michael@0 534 #if 0
michael@0 535 void
michael@0 536 event_disable_debug_mode(void)
michael@0 537 {
michael@0 538 struct event_debug_entry **ent, *victim;
michael@0 539
michael@0 540 EVLOCK_LOCK(_event_debug_map_lock, 0);
michael@0 541 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
michael@0 542 victim = *ent;
michael@0 543 ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
michael@0 544 mm_free(victim);
michael@0 545 }
michael@0 546 HT_CLEAR(event_debug_map, &global_debug_map);
michael@0 547 EVLOCK_UNLOCK(_event_debug_map_lock , 0);
michael@0 548 }
michael@0 549 #endif
michael@0 550
michael@0 551 struct event_base *
michael@0 552 event_base_new_with_config(const struct event_config *cfg)
michael@0 553 {
michael@0 554 int i;
michael@0 555 struct event_base *base;
michael@0 556 int should_check_environment;
michael@0 557
michael@0 558 #ifndef _EVENT_DISABLE_DEBUG_MODE
michael@0 559 event_debug_mode_too_late = 1;
michael@0 560 #endif
michael@0 561
michael@0 562 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
michael@0 563 event_warn("%s: calloc", __func__);
michael@0 564 return NULL;
michael@0 565 }
michael@0 566 detect_monotonic();
michael@0 567 gettime(base, &base->event_tv);
michael@0 568
michael@0 569 min_heap_ctor(&base->timeheap);
michael@0 570 TAILQ_INIT(&base->eventqueue);
michael@0 571 base->sig.ev_signal_pair[0] = -1;
michael@0 572 base->sig.ev_signal_pair[1] = -1;
michael@0 573 base->th_notify_fd[0] = -1;
michael@0 574 base->th_notify_fd[1] = -1;
michael@0 575
michael@0 576 event_deferred_cb_queue_init(&base->defer_queue);
michael@0 577 base->defer_queue.notify_fn = notify_base_cbq_callback;
michael@0 578 base->defer_queue.notify_arg = base;
michael@0 579 if (cfg)
michael@0 580 base->flags = cfg->flags;
michael@0 581
michael@0 582 evmap_io_initmap(&base->io);
michael@0 583 evmap_signal_initmap(&base->sigmap);
michael@0 584 event_changelist_init(&base->changelist);
michael@0 585
michael@0 586 base->evbase = NULL;
michael@0 587
michael@0 588 should_check_environment =
michael@0 589 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
michael@0 590
michael@0 591 for (i = 0; eventops[i] && !base->evbase; i++) {
michael@0 592 if (cfg != NULL) {
michael@0 593 /* determine if this backend should be avoided */
michael@0 594 if (event_config_is_avoided_method(cfg,
michael@0 595 eventops[i]->name))
michael@0 596 continue;
michael@0 597 if ((eventops[i]->features & cfg->require_features)
michael@0 598 != cfg->require_features)
michael@0 599 continue;
michael@0 600 }
michael@0 601
michael@0 602 /* also obey the environment variables */
michael@0 603 if (should_check_environment &&
michael@0 604 event_is_method_disabled(eventops[i]->name))
michael@0 605 continue;
michael@0 606
michael@0 607 base->evsel = eventops[i];
michael@0 608
michael@0 609 base->evbase = base->evsel->init(base);
michael@0 610 }
michael@0 611
michael@0 612 if (base->evbase == NULL) {
michael@0 613 event_warnx("%s: no event mechanism available",
michael@0 614 __func__);
michael@0 615 base->evsel = NULL;
michael@0 616 event_base_free(base);
michael@0 617 return NULL;
michael@0 618 }
michael@0 619
michael@0 620 if (evutil_getenv("EVENT_SHOW_METHOD"))
michael@0 621 event_msgx("libevent using: %s", base->evsel->name);
michael@0 622
michael@0 623 /* allocate a single active event queue */
michael@0 624 if (event_base_priority_init(base, 1) < 0) {
michael@0 625 event_base_free(base);
michael@0 626 return NULL;
michael@0 627 }
michael@0 628
michael@0 629 /* prepare for threading */
michael@0 630
michael@0 631 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
michael@0 632 if (EVTHREAD_LOCKING_ENABLED() &&
michael@0 633 (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
michael@0 634 int r;
michael@0 635 EVTHREAD_ALLOC_LOCK(base->th_base_lock,
michael@0 636 EVTHREAD_LOCKTYPE_RECURSIVE);
michael@0 637 base->defer_queue.lock = base->th_base_lock;
michael@0 638 EVTHREAD_ALLOC_COND(base->current_event_cond);
michael@0 639 r = evthread_make_base_notifiable(base);
michael@0 640 if (r<0) {
michael@0 641 event_warnx("%s: Unable to make base notifiable.", __func__);
michael@0 642 event_base_free(base);
michael@0 643 return NULL;
michael@0 644 }
michael@0 645 }
michael@0 646 #endif
michael@0 647
michael@0 648 #ifdef WIN32
michael@0 649 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
michael@0 650 event_base_start_iocp(base, cfg->n_cpus_hint);
michael@0 651 #endif
michael@0 652
michael@0 653 return (base);
michael@0 654 }
michael@0 655
michael@0 656 int
michael@0 657 event_base_start_iocp(struct event_base *base, int n_cpus)
michael@0 658 {
michael@0 659 #ifdef WIN32
michael@0 660 if (base->iocp)
michael@0 661 return 0;
michael@0 662 base->iocp = event_iocp_port_launch(n_cpus);
michael@0 663 if (!base->iocp) {
michael@0 664 event_warnx("%s: Couldn't launch IOCP", __func__);
michael@0 665 return -1;
michael@0 666 }
michael@0 667 return 0;
michael@0 668 #else
michael@0 669 return -1;
michael@0 670 #endif
michael@0 671 }
michael@0 672
michael@0 673 void
michael@0 674 event_base_stop_iocp(struct event_base *base)
michael@0 675 {
michael@0 676 #ifdef WIN32
michael@0 677 int rv;
michael@0 678
michael@0 679 if (!base->iocp)
michael@0 680 return;
michael@0 681 rv = event_iocp_shutdown(base->iocp, -1);
michael@0 682 EVUTIL_ASSERT(rv >= 0);
michael@0 683 base->iocp = NULL;
michael@0 684 #endif
michael@0 685 }
michael@0 686
michael@0 687 void
michael@0 688 event_base_free(struct event_base *base)
michael@0 689 {
michael@0 690 int i, n_deleted=0;
michael@0 691 struct event *ev;
michael@0 692 /* XXXX grab the lock? If there is contention when one thread frees
michael@0 693 * the base, then the contending thread will be very sad soon. */
michael@0 694
michael@0 695 /* event_base_free(NULL) is how to free the current_base if we
michael@0 696 * made it with event_init and forgot to hold a reference to it. */
michael@0 697 if (base == NULL && current_base)
michael@0 698 base = current_base;
michael@0 699 /* If we're freeing current_base, there won't be a current_base. */
michael@0 700 if (base == current_base)
michael@0 701 current_base = NULL;
michael@0 702 /* Don't actually free NULL. */
michael@0 703 if (base == NULL) {
michael@0 704 event_warnx("%s: no base to free", __func__);
michael@0 705 return;
michael@0 706 }
michael@0 707 /* XXX(niels) - check for internal events first */
michael@0 708
michael@0 709 #ifdef WIN32
michael@0 710 event_base_stop_iocp(base);
michael@0 711 #endif
michael@0 712
michael@0 713 /* threading fds if we have them */
michael@0 714 if (base->th_notify_fd[0] != -1) {
michael@0 715 event_del(&base->th_notify);
michael@0 716 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
michael@0 717 if (base->th_notify_fd[1] != -1)
michael@0 718 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
michael@0 719 base->th_notify_fd[0] = -1;
michael@0 720 base->th_notify_fd[1] = -1;
michael@0 721 event_debug_unassign(&base->th_notify);
michael@0 722 }
michael@0 723
michael@0 724 /* Delete all non-internal events. */
michael@0 725 for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
michael@0 726 struct event *next = TAILQ_NEXT(ev, ev_next);
michael@0 727 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
michael@0 728 event_del(ev);
michael@0 729 ++n_deleted;
michael@0 730 }
michael@0 731 ev = next;
michael@0 732 }
michael@0 733 while ((ev = min_heap_top(&base->timeheap)) != NULL) {
michael@0 734 event_del(ev);
michael@0 735 ++n_deleted;
michael@0 736 }
michael@0 737 for (i = 0; i < base->n_common_timeouts; ++i) {
michael@0 738 struct common_timeout_list *ctl =
michael@0 739 base->common_timeout_queues[i];
michael@0 740 event_del(&ctl->timeout_event); /* Internal; doesn't count */
michael@0 741 event_debug_unassign(&ctl->timeout_event);
michael@0 742 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
michael@0 743 struct event *next = TAILQ_NEXT(ev,
michael@0 744 ev_timeout_pos.ev_next_with_common_timeout);
michael@0 745 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
michael@0 746 event_del(ev);
michael@0 747 ++n_deleted;
michael@0 748 }
michael@0 749 ev = next;
michael@0 750 }
michael@0 751 mm_free(ctl);
michael@0 752 }
michael@0 753 if (base->common_timeout_queues)
michael@0 754 mm_free(base->common_timeout_queues);
michael@0 755
michael@0 756 for (i = 0; i < base->nactivequeues; ++i) {
michael@0 757 for (ev = TAILQ_FIRST(&base->activequeues[i]); ev; ) {
michael@0 758 struct event *next = TAILQ_NEXT(ev, ev_active_next);
michael@0 759 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
michael@0 760 event_del(ev);
michael@0 761 ++n_deleted;
michael@0 762 }
michael@0 763 ev = next;
michael@0 764 }
michael@0 765 }
michael@0 766
michael@0 767 if (n_deleted)
michael@0 768 event_debug(("%s: %d events were still set in base",
michael@0 769 __func__, n_deleted));
michael@0 770
michael@0 771 if (base->evsel != NULL && base->evsel->dealloc != NULL)
michael@0 772 base->evsel->dealloc(base);
michael@0 773
michael@0 774 for (i = 0; i < base->nactivequeues; ++i)
michael@0 775 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
michael@0 776
michael@0 777 EVUTIL_ASSERT(min_heap_empty(&base->timeheap));
michael@0 778 min_heap_dtor(&base->timeheap);
michael@0 779
michael@0 780 mm_free(base->activequeues);
michael@0 781
michael@0 782 EVUTIL_ASSERT(TAILQ_EMPTY(&base->eventqueue));
michael@0 783
michael@0 784 evmap_io_clear(&base->io);
michael@0 785 evmap_signal_clear(&base->sigmap);
michael@0 786 event_changelist_freemem(&base->changelist);
michael@0 787
michael@0 788 EVTHREAD_FREE_LOCK(base->th_base_lock, EVTHREAD_LOCKTYPE_RECURSIVE);
michael@0 789 EVTHREAD_FREE_COND(base->current_event_cond);
michael@0 790
michael@0 791 mm_free(base);
michael@0 792 }
michael@0 793
michael@0 794 /* reinitialize the event base after a fork */
michael@0 795 int
michael@0 796 event_reinit(struct event_base *base)
michael@0 797 {
michael@0 798 const struct eventop *evsel;
michael@0 799 int res = 0;
michael@0 800 struct event *ev;
michael@0 801 int was_notifiable = 0;
michael@0 802
michael@0 803 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
michael@0 804
michael@0 805 evsel = base->evsel;
michael@0 806
michael@0 807 #if 0
michael@0 808 /* Right now, reinit always takes effect, since even if the
michael@0 809 backend doesn't require it, the signal socketpair code does.
michael@0 810
michael@0 811 XXX
michael@0 812 */
michael@0 813 /* check if this event mechanism requires reinit */
michael@0 814 if (!evsel->need_reinit)
michael@0 815 goto done;
michael@0 816 #endif
michael@0 817
michael@0 818 /* prevent internal delete */
michael@0 819 if (base->sig.ev_signal_added) {
michael@0 820 /* we cannot call event_del here because the base has
michael@0 821 * not been reinitialized yet. */
michael@0 822 event_queue_remove(base, &base->sig.ev_signal,
michael@0 823 EVLIST_INSERTED);
michael@0 824 if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
michael@0 825 event_queue_remove(base, &base->sig.ev_signal,
michael@0 826 EVLIST_ACTIVE);
michael@0 827 if (base->sig.ev_signal_pair[0] != -1)
michael@0 828 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
michael@0 829 if (base->sig.ev_signal_pair[1] != -1)
michael@0 830 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
michael@0 831 base->sig.ev_signal_added = 0;
michael@0 832 }
michael@0 833 if (base->th_notify_fd[0] != -1) {
michael@0 834 /* we cannot call event_del here because the base has
michael@0 835 * not been reinitialized yet. */
michael@0 836 was_notifiable = 1;
michael@0 837 event_queue_remove(base, &base->th_notify,
michael@0 838 EVLIST_INSERTED);
michael@0 839 if (base->th_notify.ev_flags & EVLIST_ACTIVE)
michael@0 840 event_queue_remove(base, &base->th_notify,
michael@0 841 EVLIST_ACTIVE);
michael@0 842 base->sig.ev_signal_added = 0;
michael@0 843 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
michael@0 844 if (base->th_notify_fd[1] != -1)
michael@0 845 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
michael@0 846 base->th_notify_fd[0] = -1;
michael@0 847 base->th_notify_fd[1] = -1;
michael@0 848 event_debug_unassign(&base->th_notify);
michael@0 849 }
michael@0 850
michael@0 851 if (base->evsel->dealloc != NULL)
michael@0 852 base->evsel->dealloc(base);
michael@0 853 base->evbase = evsel->init(base);
michael@0 854 if (base->evbase == NULL) {
michael@0 855 event_errx(1, "%s: could not reinitialize event mechanism",
michael@0 856 __func__);
michael@0 857 res = -1;
michael@0 858 goto done;
michael@0 859 }
michael@0 860
michael@0 861 event_changelist_freemem(&base->changelist); /* XXX */
michael@0 862 evmap_io_clear(&base->io);
michael@0 863 evmap_signal_clear(&base->sigmap);
michael@0 864
michael@0 865 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
michael@0 866 if (ev->ev_events & (EV_READ|EV_WRITE)) {
michael@0 867 if (ev == &base->sig.ev_signal) {
michael@0 868 /* If we run into the ev_signal event, it's only
michael@0 869 * in eventqueue because some signal event was
michael@0 870 * added, which made evsig_add re-add ev_signal.
michael@0 871 * So don't double-add it. */
michael@0 872 continue;
michael@0 873 }
michael@0 874 if (evmap_io_add(base, ev->ev_fd, ev) == -1)
michael@0 875 res = -1;
michael@0 876 } else if (ev->ev_events & EV_SIGNAL) {
michael@0 877 if (evmap_signal_add(base, (int)ev->ev_fd, ev) == -1)
michael@0 878 res = -1;
michael@0 879 }
michael@0 880 }
michael@0 881
michael@0 882 if (was_notifiable && res == 0)
michael@0 883 res = evthread_make_base_notifiable(base);
michael@0 884
michael@0 885 done:
michael@0 886 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 887 return (res);
michael@0 888 }
michael@0 889
michael@0 890 const char **
michael@0 891 event_get_supported_methods(void)
michael@0 892 {
michael@0 893 static const char **methods = NULL;
michael@0 894 const struct eventop **method;
michael@0 895 const char **tmp;
michael@0 896 int i = 0, k;
michael@0 897
michael@0 898 /* count all methods */
michael@0 899 for (method = &eventops[0]; *method != NULL; ++method) {
michael@0 900 ++i;
michael@0 901 }
michael@0 902
michael@0 903 /* allocate one more than we need for the NULL pointer */
michael@0 904 tmp = mm_calloc((i + 1), sizeof(char *));
michael@0 905 if (tmp == NULL)
michael@0 906 return (NULL);
michael@0 907
michael@0 908 /* populate the array with the supported methods */
michael@0 909 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
michael@0 910 tmp[i++] = eventops[k]->name;
michael@0 911 }
michael@0 912 tmp[i] = NULL;
michael@0 913
michael@0 914 if (methods != NULL)
michael@0 915 mm_free((char**)methods);
michael@0 916
michael@0 917 methods = tmp;
michael@0 918
michael@0 919 return (methods);
michael@0 920 }
michael@0 921
michael@0 922 struct event_config *
michael@0 923 event_config_new(void)
michael@0 924 {
michael@0 925 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
michael@0 926
michael@0 927 if (cfg == NULL)
michael@0 928 return (NULL);
michael@0 929
michael@0 930 TAILQ_INIT(&cfg->entries);
michael@0 931
michael@0 932 return (cfg);
michael@0 933 }
michael@0 934
michael@0 935 static void
michael@0 936 event_config_entry_free(struct event_config_entry *entry)
michael@0 937 {
michael@0 938 if (entry->avoid_method != NULL)
michael@0 939 mm_free((char *)entry->avoid_method);
michael@0 940 mm_free(entry);
michael@0 941 }
michael@0 942
michael@0 943 void
michael@0 944 event_config_free(struct event_config *cfg)
michael@0 945 {
michael@0 946 struct event_config_entry *entry;
michael@0 947
michael@0 948 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
michael@0 949 TAILQ_REMOVE(&cfg->entries, entry, next);
michael@0 950 event_config_entry_free(entry);
michael@0 951 }
michael@0 952 mm_free(cfg);
michael@0 953 }
michael@0 954
michael@0 955 int
michael@0 956 event_config_set_flag(struct event_config *cfg, int flag)
michael@0 957 {
michael@0 958 if (!cfg)
michael@0 959 return -1;
michael@0 960 cfg->flags |= flag;
michael@0 961 return 0;
michael@0 962 }
michael@0 963
michael@0 964 int
michael@0 965 event_config_avoid_method(struct event_config *cfg, const char *method)
michael@0 966 {
michael@0 967 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
michael@0 968 if (entry == NULL)
michael@0 969 return (-1);
michael@0 970
michael@0 971 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
michael@0 972 mm_free(entry);
michael@0 973 return (-1);
michael@0 974 }
michael@0 975
michael@0 976 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
michael@0 977
michael@0 978 return (0);
michael@0 979 }
michael@0 980
michael@0 981 int
michael@0 982 event_config_require_features(struct event_config *cfg,
michael@0 983 int features)
michael@0 984 {
michael@0 985 if (!cfg)
michael@0 986 return (-1);
michael@0 987 cfg->require_features = features;
michael@0 988 return (0);
michael@0 989 }
michael@0 990
michael@0 991 int
michael@0 992 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
michael@0 993 {
michael@0 994 if (!cfg)
michael@0 995 return (-1);
michael@0 996 cfg->n_cpus_hint = cpus;
michael@0 997 return (0);
michael@0 998 }
michael@0 999
michael@0 1000 int
michael@0 1001 event_priority_init(int npriorities)
michael@0 1002 {
michael@0 1003 return event_base_priority_init(current_base, npriorities);
michael@0 1004 }
michael@0 1005
michael@0 1006 int
michael@0 1007 event_base_priority_init(struct event_base *base, int npriorities)
michael@0 1008 {
michael@0 1009 int i;
michael@0 1010
michael@0 1011 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
michael@0 1012 || npriorities >= EVENT_MAX_PRIORITIES)
michael@0 1013 return (-1);
michael@0 1014
michael@0 1015 if (npriorities == base->nactivequeues)
michael@0 1016 return (0);
michael@0 1017
michael@0 1018 if (base->nactivequeues) {
michael@0 1019 mm_free(base->activequeues);
michael@0 1020 base->nactivequeues = 0;
michael@0 1021 }
michael@0 1022
michael@0 1023 /* Allocate our priority queues */
michael@0 1024 base->activequeues = (struct event_list *)
michael@0 1025 mm_calloc(npriorities, sizeof(struct event_list));
michael@0 1026 if (base->activequeues == NULL) {
michael@0 1027 event_warn("%s: calloc", __func__);
michael@0 1028 return (-1);
michael@0 1029 }
michael@0 1030 base->nactivequeues = npriorities;
michael@0 1031
michael@0 1032 for (i = 0; i < base->nactivequeues; ++i) {
michael@0 1033 TAILQ_INIT(&base->activequeues[i]);
michael@0 1034 }
michael@0 1035
michael@0 1036 return (0);
michael@0 1037 }
michael@0 1038
michael@0 1039 /* Returns true iff we're currently watching any events. */
michael@0 1040 static int
michael@0 1041 event_haveevents(struct event_base *base)
michael@0 1042 {
michael@0 1043 /* Caller must hold th_base_lock */
michael@0 1044 return (base->virtual_event_count > 0 || base->event_count > 0);
michael@0 1045 }
michael@0 1046
michael@0 1047 /* "closure" function called when processing active signal events */
michael@0 1048 static inline void
michael@0 1049 event_signal_closure(struct event_base *base, struct event *ev)
michael@0 1050 {
michael@0 1051 short ncalls;
michael@0 1052 int should_break;
michael@0 1053
michael@0 1054 /* Allows deletes to work */
michael@0 1055 ncalls = ev->ev_ncalls;
michael@0 1056 if (ncalls != 0)
michael@0 1057 ev->ev_pncalls = &ncalls;
michael@0 1058 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 1059 while (ncalls) {
michael@0 1060 ncalls--;
michael@0 1061 ev->ev_ncalls = ncalls;
michael@0 1062 if (ncalls == 0)
michael@0 1063 ev->ev_pncalls = NULL;
michael@0 1064 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
michael@0 1065
michael@0 1066 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
michael@0 1067 should_break = base->event_break;
michael@0 1068 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 1069
michael@0 1070 if (should_break) {
michael@0 1071 if (ncalls != 0)
michael@0 1072 ev->ev_pncalls = NULL;
michael@0 1073 return;
michael@0 1074 }
michael@0 1075 }
michael@0 1076 }
michael@0 1077
michael@0 1078 /* Common timeouts are special timeouts that are handled as queues rather than
michael@0 1079 * in the minheap. This is more efficient than the minheap if we happen to
michael@0 1080 * know that we're going to get several thousands of timeout events all with
michael@0 1081 * the same timeout value.
michael@0 1082 *
michael@0 1083 * Since all our timeout handling code assumes timevals can be copied,
michael@0 1084 * assigned, etc, we can't use "magic pointer" to encode these common
michael@0 1085 * timeouts. Searching through a list to see if every timeout is common could
michael@0 1086 * also get inefficient. Instead, we take advantage of the fact that tv_usec
michael@0 1087 * is 32 bits long, but only uses 20 of those bits (since it can never be over
michael@0 1088 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
michael@0 1089 * of index into the event_base's aray of common timeouts.
michael@0 1090 */
michael@0 1091
michael@0 1092 #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
michael@0 1093 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
michael@0 1094 #define COMMON_TIMEOUT_IDX_SHIFT 20
michael@0 1095 #define COMMON_TIMEOUT_MASK 0xf0000000
michael@0 1096 #define COMMON_TIMEOUT_MAGIC 0x50000000
michael@0 1097
michael@0 1098 #define COMMON_TIMEOUT_IDX(tv) \
michael@0 1099 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
michael@0 1100
michael@0 1101 /** Return true iff if 'tv' is a common timeout in 'base' */
michael@0 1102 static inline int
michael@0 1103 is_common_timeout(const struct timeval *tv,
michael@0 1104 const struct event_base *base)
michael@0 1105 {
michael@0 1106 int idx;
michael@0 1107 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
michael@0 1108 return 0;
michael@0 1109 idx = COMMON_TIMEOUT_IDX(tv);
michael@0 1110 return idx < base->n_common_timeouts;
michael@0 1111 }
michael@0 1112
michael@0 1113 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
michael@0 1114 * one is a common timeout. */
michael@0 1115 static inline int
michael@0 1116 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
michael@0 1117 {
michael@0 1118 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
michael@0 1119 (tv2->tv_usec & ~MICROSECONDS_MASK);
michael@0 1120 }
michael@0 1121
michael@0 1122 /** Requires that 'tv' is a common timeout. Return the corresponding
michael@0 1123 * common_timeout_list. */
michael@0 1124 static inline struct common_timeout_list *
michael@0 1125 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
michael@0 1126 {
michael@0 1127 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
michael@0 1128 }
michael@0 1129
michael@0 1130 #if 0
michael@0 1131 static inline int
michael@0 1132 common_timeout_ok(const struct timeval *tv,
michael@0 1133 struct event_base *base)
michael@0 1134 {
michael@0 1135 const struct timeval *expect =
michael@0 1136 &get_common_timeout_list(base, tv)->duration;
michael@0 1137 return tv->tv_sec == expect->tv_sec &&
michael@0 1138 tv->tv_usec == expect->tv_usec;
michael@0 1139 }
michael@0 1140 #endif
michael@0 1141
michael@0 1142 /* Add the timeout for the first event in given common timeout list to the
michael@0 1143 * event_base's minheap. */
michael@0 1144 static void
michael@0 1145 common_timeout_schedule(struct common_timeout_list *ctl,
michael@0 1146 const struct timeval *now, struct event *head)
michael@0 1147 {
michael@0 1148 struct timeval timeout = head->ev_timeout;
michael@0 1149 timeout.tv_usec &= MICROSECONDS_MASK;
michael@0 1150 event_add_internal(&ctl->timeout_event, &timeout, 1);
michael@0 1151 }
michael@0 1152
michael@0 1153 /* Callback: invoked when the timeout for a common timeout queue triggers.
michael@0 1154 * This means that (at least) the first event in that queue should be run,
michael@0 1155 * and the timeout should be rescheduled if there are more events. */
michael@0 1156 static void
michael@0 1157 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
michael@0 1158 {
michael@0 1159 struct timeval now;
michael@0 1160 struct common_timeout_list *ctl = arg;
michael@0 1161 struct event_base *base = ctl->base;
michael@0 1162 struct event *ev = NULL;
michael@0 1163 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
michael@0 1164 gettime(base, &now);
michael@0 1165 while (1) {
michael@0 1166 ev = TAILQ_FIRST(&ctl->events);
michael@0 1167 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
michael@0 1168 (ev->ev_timeout.tv_sec == now.tv_sec &&
michael@0 1169 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
michael@0 1170 break;
michael@0 1171 event_del_internal(ev);
michael@0 1172 event_active_nolock(ev, EV_TIMEOUT, 1);
michael@0 1173 }
michael@0 1174 if (ev)
michael@0 1175 common_timeout_schedule(ctl, &now, ev);
michael@0 1176 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 1177 }
michael@0 1178
michael@0 1179 #define MAX_COMMON_TIMEOUTS 256
michael@0 1180
michael@0 1181 const struct timeval *
michael@0 1182 event_base_init_common_timeout(struct event_base *base,
michael@0 1183 const struct timeval *duration)
michael@0 1184 {
michael@0 1185 int i;
michael@0 1186 struct timeval tv;
michael@0 1187 const struct timeval *result=NULL;
michael@0 1188 struct common_timeout_list *new_ctl;
michael@0 1189
michael@0 1190 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
michael@0 1191 if (duration->tv_usec > 1000000) {
michael@0 1192 memcpy(&tv, duration, sizeof(struct timeval));
michael@0 1193 if (is_common_timeout(duration, base))
michael@0 1194 tv.tv_usec &= MICROSECONDS_MASK;
michael@0 1195 tv.tv_sec += tv.tv_usec / 1000000;
michael@0 1196 tv.tv_usec %= 1000000;
michael@0 1197 duration = &tv;
michael@0 1198 }
michael@0 1199 for (i = 0; i < base->n_common_timeouts; ++i) {
michael@0 1200 const struct common_timeout_list *ctl =
michael@0 1201 base->common_timeout_queues[i];
michael@0 1202 if (duration->tv_sec == ctl->duration.tv_sec &&
michael@0 1203 duration->tv_usec ==
michael@0 1204 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
michael@0 1205 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
michael@0 1206 result = &ctl->duration;
michael@0 1207 goto done;
michael@0 1208 }
michael@0 1209 }
michael@0 1210 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
michael@0 1211 event_warnx("%s: Too many common timeouts already in use; "
michael@0 1212 "we only support %d per event_base", __func__,
michael@0 1213 MAX_COMMON_TIMEOUTS);
michael@0 1214 goto done;
michael@0 1215 }
michael@0 1216 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
michael@0 1217 int n = base->n_common_timeouts < 16 ? 16 :
michael@0 1218 base->n_common_timeouts*2;
michael@0 1219 struct common_timeout_list **newqueues =
michael@0 1220 mm_realloc(base->common_timeout_queues,
michael@0 1221 n*sizeof(struct common_timeout_queue *));
michael@0 1222 if (!newqueues) {
michael@0 1223 event_warn("%s: realloc",__func__);
michael@0 1224 goto done;
michael@0 1225 }
michael@0 1226 base->n_common_timeouts_allocated = n;
michael@0 1227 base->common_timeout_queues = newqueues;
michael@0 1228 }
michael@0 1229 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
michael@0 1230 if (!new_ctl) {
michael@0 1231 event_warn("%s: calloc",__func__);
michael@0 1232 goto done;
michael@0 1233 }
michael@0 1234 TAILQ_INIT(&new_ctl->events);
michael@0 1235 new_ctl->duration.tv_sec = duration->tv_sec;
michael@0 1236 new_ctl->duration.tv_usec =
michael@0 1237 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
michael@0 1238 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
michael@0 1239 evtimer_assign(&new_ctl->timeout_event, base,
michael@0 1240 common_timeout_callback, new_ctl);
michael@0 1241 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
michael@0 1242 event_priority_set(&new_ctl->timeout_event, 0);
michael@0 1243 new_ctl->base = base;
michael@0 1244 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
michael@0 1245 result = &new_ctl->duration;
michael@0 1246
michael@0 1247 done:
michael@0 1248 if (result)
michael@0 1249 EVUTIL_ASSERT(is_common_timeout(result, base));
michael@0 1250
michael@0 1251 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 1252 return result;
michael@0 1253 }
michael@0 1254
michael@0 1255 /* Closure function invoked when we're activating a persistent event. */
michael@0 1256 static inline void
michael@0 1257 event_persist_closure(struct event_base *base, struct event *ev)
michael@0 1258 {
michael@0 1259 /* reschedule the persistent event if we have a timeout. */
michael@0 1260 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
michael@0 1261 /* If there was a timeout, we want it to run at an interval of
michael@0 1262 * ev_io_timeout after the last time it was _scheduled_ for,
michael@0 1263 * not ev_io_timeout after _now_. If it fired for another
michael@0 1264 * reason, though, the timeout ought to start ticking _now_. */
michael@0 1265 struct timeval run_at, relative_to, delay, now;
michael@0 1266 ev_uint32_t usec_mask = 0;
michael@0 1267 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
michael@0 1268 &ev->ev_io_timeout));
michael@0 1269 gettime(base, &now);
michael@0 1270 if (is_common_timeout(&ev->ev_timeout, base)) {
michael@0 1271 delay = ev->ev_io_timeout;
michael@0 1272 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
michael@0 1273 delay.tv_usec &= MICROSECONDS_MASK;
michael@0 1274 if (ev->ev_res & EV_TIMEOUT) {
michael@0 1275 relative_to = ev->ev_timeout;
michael@0 1276 relative_to.tv_usec &= MICROSECONDS_MASK;
michael@0 1277 } else {
michael@0 1278 relative_to = now;
michael@0 1279 }
michael@0 1280 } else {
michael@0 1281 delay = ev->ev_io_timeout;
michael@0 1282 if (ev->ev_res & EV_TIMEOUT) {
michael@0 1283 relative_to = ev->ev_timeout;
michael@0 1284 } else {
michael@0 1285 relative_to = now;
michael@0 1286 }
michael@0 1287 }
michael@0 1288 evutil_timeradd(&relative_to, &delay, &run_at);
michael@0 1289 if (evutil_timercmp(&run_at, &now, <)) {
michael@0 1290 /* Looks like we missed at least one invocation due to
michael@0 1291 * a clock jump, not running the event loop for a
michael@0 1292 * while, really slow callbacks, or
michael@0 1293 * something. Reschedule relative to now.
michael@0 1294 */
michael@0 1295 evutil_timeradd(&now, &delay, &run_at);
michael@0 1296 }
michael@0 1297 run_at.tv_usec |= usec_mask;
michael@0 1298 event_add_internal(ev, &run_at, 1);
michael@0 1299 }
michael@0 1300 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 1301 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
michael@0 1302 }
michael@0 1303
michael@0 1304 /*
michael@0 1305 Helper for event_process_active to process all the events in a single queue,
michael@0 1306 releasing the lock as we go. This function requires that the lock be held
michael@0 1307 when it's invoked. Returns -1 if we get a signal or an event_break that
michael@0 1308 means we should stop processing any active events now. Otherwise returns
michael@0 1309 the number of non-internal events that we processed.
michael@0 1310 */
michael@0 1311 static int
michael@0 1312 event_process_active_single_queue(struct event_base *base,
michael@0 1313 struct event_list *activeq)
michael@0 1314 {
michael@0 1315 struct event *ev;
michael@0 1316 int count = 0;
michael@0 1317
michael@0 1318 EVUTIL_ASSERT(activeq != NULL);
michael@0 1319
michael@0 1320 for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
michael@0 1321 if (ev->ev_events & EV_PERSIST)
michael@0 1322 event_queue_remove(base, ev, EVLIST_ACTIVE);
michael@0 1323 else
michael@0 1324 event_del_internal(ev);
michael@0 1325 if (!(ev->ev_flags & EVLIST_INTERNAL))
michael@0 1326 ++count;
michael@0 1327
michael@0 1328 event_debug((
michael@0 1329 "event_process_active: event: %p, %s%scall %p",
michael@0 1330 ev,
michael@0 1331 ev->ev_res & EV_READ ? "EV_READ " : " ",
michael@0 1332 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
michael@0 1333 ev->ev_callback));
michael@0 1334
michael@0 1335 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
michael@0 1336 base->current_event = ev;
michael@0 1337 base->current_event_waiters = 0;
michael@0 1338 #endif
michael@0 1339
michael@0 1340 switch (ev->ev_closure) {
michael@0 1341 case EV_CLOSURE_SIGNAL:
michael@0 1342 event_signal_closure(base, ev);
michael@0 1343 break;
michael@0 1344 case EV_CLOSURE_PERSIST:
michael@0 1345 event_persist_closure(base, ev);
michael@0 1346 break;
michael@0 1347 default:
michael@0 1348 case EV_CLOSURE_NONE:
michael@0 1349 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 1350 (*ev->ev_callback)(
michael@0 1351 ev->ev_fd, ev->ev_res, ev->ev_arg);
michael@0 1352 break;
michael@0 1353 }
michael@0 1354
michael@0 1355 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
michael@0 1356 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
michael@0 1357 base->current_event = NULL;
michael@0 1358 if (base->current_event_waiters) {
michael@0 1359 base->current_event_waiters = 0;
michael@0 1360 EVTHREAD_COND_BROADCAST(base->current_event_cond);
michael@0 1361 }
michael@0 1362 #endif
michael@0 1363
michael@0 1364 if (base->event_break)
michael@0 1365 return -1;
michael@0 1366 if (base->event_continue)
michael@0 1367 break;
michael@0 1368 }
michael@0 1369 return count;
michael@0 1370 }
michael@0 1371
michael@0 1372 /*
michael@0 1373 Process up to MAX_DEFERRED of the defered_cb entries in 'queue'. If
michael@0 1374 *breakptr becomes set to 1, stop. Requires that we start out holding
michael@0 1375 the lock on 'queue'; releases the lock around 'queue' for each deferred_cb
michael@0 1376 we process.
michael@0 1377 */
michael@0 1378 static int
michael@0 1379 event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
michael@0 1380 {
michael@0 1381 int count = 0;
michael@0 1382 struct deferred_cb *cb;
michael@0 1383
michael@0 1384 #define MAX_DEFERRED 16
michael@0 1385 while ((cb = TAILQ_FIRST(&queue->deferred_cb_list))) {
michael@0 1386 cb->queued = 0;
michael@0 1387 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
michael@0 1388 --queue->active_count;
michael@0 1389 UNLOCK_DEFERRED_QUEUE(queue);
michael@0 1390
michael@0 1391 cb->cb(cb, cb->arg);
michael@0 1392
michael@0 1393 LOCK_DEFERRED_QUEUE(queue);
michael@0 1394 if (*breakptr)
michael@0 1395 return -1;
michael@0 1396 if (++count == MAX_DEFERRED)
michael@0 1397 break;
michael@0 1398 }
michael@0 1399 #undef MAX_DEFERRED
michael@0 1400 return count;
michael@0 1401 }
michael@0 1402
michael@0 1403 /*
michael@0 1404 * Active events are stored in priority queues. Lower priorities are always
michael@0 1405 * process before higher priorities. Low priority events can starve high
michael@0 1406 * priority ones.
michael@0 1407 */
michael@0 1408
michael@0 1409 static int
michael@0 1410 event_process_active(struct event_base *base)
michael@0 1411 {
michael@0 1412 /* Caller must hold th_base_lock */
michael@0 1413 struct event_list *activeq = NULL;
michael@0 1414 int i, c = 0;
michael@0 1415
michael@0 1416 for (i = 0; i < base->nactivequeues; ++i) {
michael@0 1417 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
michael@0 1418 base->event_running_priority = i;
michael@0 1419 activeq = &base->activequeues[i];
michael@0 1420 c = event_process_active_single_queue(base, activeq);
michael@0 1421 if (c < 0) {
michael@0 1422 base->event_running_priority = -1;
michael@0 1423 return -1;
michael@0 1424 } else if (c > 0)
michael@0 1425 break; /* Processed a real event; do not
michael@0 1426 * consider lower-priority events */
michael@0 1427 /* If we get here, all of the events we processed
michael@0 1428 * were internal. Continue. */
michael@0 1429 }
michael@0 1430 }
michael@0 1431
michael@0 1432 event_process_deferred_callbacks(&base->defer_queue,&base->event_break);
michael@0 1433 base->event_running_priority = -1;
michael@0 1434 return c;
michael@0 1435 }
michael@0 1436
michael@0 1437 /*
michael@0 1438 * Wait continuously for events. We exit only if no events are left.
michael@0 1439 */
michael@0 1440
michael@0 1441 int
michael@0 1442 event_dispatch(void)
michael@0 1443 {
michael@0 1444 return (event_loop(0));
michael@0 1445 }
michael@0 1446
michael@0 1447 int
michael@0 1448 event_base_dispatch(struct event_base *event_base)
michael@0 1449 {
michael@0 1450 return (event_base_loop(event_base, 0));
michael@0 1451 }
michael@0 1452
michael@0 1453 const char *
michael@0 1454 event_base_get_method(const struct event_base *base)
michael@0 1455 {
michael@0 1456 EVUTIL_ASSERT(base);
michael@0 1457 return (base->evsel->name);
michael@0 1458 }
michael@0 1459
michael@0 1460 /** Callback: used to implement event_base_loopexit by telling the event_base
michael@0 1461 * that it's time to exit its loop. */
michael@0 1462 static void
michael@0 1463 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
michael@0 1464 {
michael@0 1465 struct event_base *base = arg;
michael@0 1466 base->event_gotterm = 1;
michael@0 1467 }
michael@0 1468
michael@0 1469 int
michael@0 1470 event_loopexit(const struct timeval *tv)
michael@0 1471 {
michael@0 1472 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
michael@0 1473 current_base, tv));
michael@0 1474 }
michael@0 1475
michael@0 1476 int
michael@0 1477 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
michael@0 1478 {
michael@0 1479 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
michael@0 1480 event_base, tv));
michael@0 1481 }
michael@0 1482
michael@0 1483 int
michael@0 1484 event_loopbreak(void)
michael@0 1485 {
michael@0 1486 return (event_base_loopbreak(current_base));
michael@0 1487 }
michael@0 1488
michael@0 1489 int
michael@0 1490 event_base_loopbreak(struct event_base *event_base)
michael@0 1491 {
michael@0 1492 int r = 0;
michael@0 1493 if (event_base == NULL)
michael@0 1494 return (-1);
michael@0 1495
michael@0 1496 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
michael@0 1497 event_base->event_break = 1;
michael@0 1498
michael@0 1499 if (EVBASE_NEED_NOTIFY(event_base)) {
michael@0 1500 r = evthread_notify_base(event_base);
michael@0 1501 } else {
michael@0 1502 r = (0);
michael@0 1503 }
michael@0 1504 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
michael@0 1505 return r;
michael@0 1506 }
michael@0 1507
michael@0 1508 int
michael@0 1509 event_base_got_break(struct event_base *event_base)
michael@0 1510 {
michael@0 1511 int res;
michael@0 1512 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
michael@0 1513 res = event_base->event_break;
michael@0 1514 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
michael@0 1515 return res;
michael@0 1516 }
michael@0 1517
michael@0 1518 int
michael@0 1519 event_base_got_exit(struct event_base *event_base)
michael@0 1520 {
michael@0 1521 int res;
michael@0 1522 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
michael@0 1523 res = event_base->event_gotterm;
michael@0 1524 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
michael@0 1525 return res;
michael@0 1526 }
michael@0 1527
michael@0 1528 /* not thread safe */
michael@0 1529
michael@0 1530 int
michael@0 1531 event_loop(int flags)
michael@0 1532 {
michael@0 1533 return event_base_loop(current_base, flags);
michael@0 1534 }
michael@0 1535
michael@0 1536 int
michael@0 1537 event_base_loop(struct event_base *base, int flags)
michael@0 1538 {
michael@0 1539 const struct eventop *evsel = base->evsel;
michael@0 1540 struct timeval tv;
michael@0 1541 struct timeval *tv_p;
michael@0 1542 int res, done, retval = 0;
michael@0 1543
michael@0 1544 /* Grab the lock. We will release it inside evsel.dispatch, and again
michael@0 1545 * as we invoke user callbacks. */
michael@0 1546 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
michael@0 1547
michael@0 1548 if (base->running_loop) {
michael@0 1549 event_warnx("%s: reentrant invocation. Only one event_base_loop"
michael@0 1550 " can run on each event_base at once.", __func__);
michael@0 1551 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 1552 return -1;
michael@0 1553 }
michael@0 1554
michael@0 1555 base->running_loop = 1;
michael@0 1556
michael@0 1557 clear_time_cache(base);
michael@0 1558
michael@0 1559 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
michael@0 1560 evsig_set_base(base);
michael@0 1561
michael@0 1562 done = 0;
michael@0 1563
michael@0 1564 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
michael@0 1565 base->th_owner_id = EVTHREAD_GET_ID();
michael@0 1566 #endif
michael@0 1567
michael@0 1568 base->event_gotterm = base->event_break = 0;
michael@0 1569
michael@0 1570 while (!done) {
michael@0 1571 base->event_continue = 0;
michael@0 1572
michael@0 1573 /* Terminate the loop if we have been asked to */
michael@0 1574 if (base->event_gotterm) {
michael@0 1575 break;
michael@0 1576 }
michael@0 1577
michael@0 1578 if (base->event_break) {
michael@0 1579 break;
michael@0 1580 }
michael@0 1581
michael@0 1582 timeout_correct(base, &tv);
michael@0 1583
michael@0 1584 tv_p = &tv;
michael@0 1585 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
michael@0 1586 timeout_next(base, &tv_p);
michael@0 1587 } else {
michael@0 1588 /*
michael@0 1589 * if we have active events, we just poll new events
michael@0 1590 * without waiting.
michael@0 1591 */
michael@0 1592 evutil_timerclear(&tv);
michael@0 1593 }
michael@0 1594
michael@0 1595 /* If we have no events, we just exit */
michael@0 1596 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
michael@0 1597 event_debug(("%s: no events registered.", __func__));
michael@0 1598 retval = 1;
michael@0 1599 goto done;
michael@0 1600 }
michael@0 1601
michael@0 1602 /* update last old time */
michael@0 1603 gettime(base, &base->event_tv);
michael@0 1604
michael@0 1605 clear_time_cache(base);
michael@0 1606
michael@0 1607 res = evsel->dispatch(base, tv_p);
michael@0 1608
michael@0 1609 if (res == -1) {
michael@0 1610 event_debug(("%s: dispatch returned unsuccessfully.",
michael@0 1611 __func__));
michael@0 1612 retval = -1;
michael@0 1613 goto done;
michael@0 1614 }
michael@0 1615
michael@0 1616 update_time_cache(base);
michael@0 1617
michael@0 1618 timeout_process(base);
michael@0 1619
michael@0 1620 if (N_ACTIVE_CALLBACKS(base)) {
michael@0 1621 int n = event_process_active(base);
michael@0 1622 if ((flags & EVLOOP_ONCE)
michael@0 1623 && N_ACTIVE_CALLBACKS(base) == 0
michael@0 1624 && n != 0)
michael@0 1625 done = 1;
michael@0 1626 } else if (flags & EVLOOP_NONBLOCK)
michael@0 1627 done = 1;
michael@0 1628 }
michael@0 1629 event_debug(("%s: asked to terminate loop.", __func__));
michael@0 1630
michael@0 1631 done:
michael@0 1632 clear_time_cache(base);
michael@0 1633 base->running_loop = 0;
michael@0 1634
michael@0 1635 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 1636
michael@0 1637 return (retval);
michael@0 1638 }
michael@0 1639
michael@0 1640 /* Sets up an event for processing once */
michael@0 1641 struct event_once {
michael@0 1642 struct event ev;
michael@0 1643
michael@0 1644 void (*cb)(evutil_socket_t, short, void *);
michael@0 1645 void *arg;
michael@0 1646 };
michael@0 1647
michael@0 1648 /* One-time callback to implement event_base_once: invokes the user callback,
michael@0 1649 * then deletes the allocated storage */
michael@0 1650 static void
michael@0 1651 event_once_cb(evutil_socket_t fd, short events, void *arg)
michael@0 1652 {
michael@0 1653 struct event_once *eonce = arg;
michael@0 1654
michael@0 1655 (*eonce->cb)(fd, events, eonce->arg);
michael@0 1656 event_debug_unassign(&eonce->ev);
michael@0 1657 mm_free(eonce);
michael@0 1658 }
michael@0 1659
michael@0 1660 /* not threadsafe, event scheduled once. */
michael@0 1661 int
michael@0 1662 event_once(evutil_socket_t fd, short events,
michael@0 1663 void (*callback)(evutil_socket_t, short, void *),
michael@0 1664 void *arg, const struct timeval *tv)
michael@0 1665 {
michael@0 1666 return event_base_once(current_base, fd, events, callback, arg, tv);
michael@0 1667 }
michael@0 1668
michael@0 1669 /* Schedules an event once */
michael@0 1670 int
michael@0 1671 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
michael@0 1672 void (*callback)(evutil_socket_t, short, void *),
michael@0 1673 void *arg, const struct timeval *tv)
michael@0 1674 {
michael@0 1675 struct event_once *eonce;
michael@0 1676 struct timeval etv;
michael@0 1677 int res = 0;
michael@0 1678
michael@0 1679 /* We cannot support signals that just fire once, or persistent
michael@0 1680 * events. */
michael@0 1681 if (events & (EV_SIGNAL|EV_PERSIST))
michael@0 1682 return (-1);
michael@0 1683
michael@0 1684 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
michael@0 1685 return (-1);
michael@0 1686
michael@0 1687 eonce->cb = callback;
michael@0 1688 eonce->arg = arg;
michael@0 1689
michael@0 1690 if (events == EV_TIMEOUT) {
michael@0 1691 if (tv == NULL) {
michael@0 1692 evutil_timerclear(&etv);
michael@0 1693 tv = &etv;
michael@0 1694 }
michael@0 1695
michael@0 1696 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
michael@0 1697 } else if (events & (EV_READ|EV_WRITE)) {
michael@0 1698 events &= EV_READ|EV_WRITE;
michael@0 1699
michael@0 1700 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
michael@0 1701 } else {
michael@0 1702 /* Bad event combination */
michael@0 1703 mm_free(eonce);
michael@0 1704 return (-1);
michael@0 1705 }
michael@0 1706
michael@0 1707 if (res == 0)
michael@0 1708 res = event_add(&eonce->ev, tv);
michael@0 1709 if (res != 0) {
michael@0 1710 mm_free(eonce);
michael@0 1711 return (res);
michael@0 1712 }
michael@0 1713
michael@0 1714 return (0);
michael@0 1715 }
michael@0 1716
michael@0 1717 int
michael@0 1718 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
michael@0 1719 {
michael@0 1720 if (!base)
michael@0 1721 base = current_base;
michael@0 1722
michael@0 1723 _event_debug_assert_not_added(ev);
michael@0 1724
michael@0 1725 ev->ev_base = base;
michael@0 1726
michael@0 1727 ev->ev_callback = callback;
michael@0 1728 ev->ev_arg = arg;
michael@0 1729 ev->ev_fd = fd;
michael@0 1730 ev->ev_events = events;
michael@0 1731 ev->ev_res = 0;
michael@0 1732 ev->ev_flags = EVLIST_INIT;
michael@0 1733 ev->ev_ncalls = 0;
michael@0 1734 ev->ev_pncalls = NULL;
michael@0 1735
michael@0 1736 if (events & EV_SIGNAL) {
michael@0 1737 if ((events & (EV_READ|EV_WRITE)) != 0) {
michael@0 1738 event_warnx("%s: EV_SIGNAL is not compatible with "
michael@0 1739 "EV_READ or EV_WRITE", __func__);
michael@0 1740 return -1;
michael@0 1741 }
michael@0 1742 ev->ev_closure = EV_CLOSURE_SIGNAL;
michael@0 1743 } else {
michael@0 1744 if (events & EV_PERSIST) {
michael@0 1745 evutil_timerclear(&ev->ev_io_timeout);
michael@0 1746 ev->ev_closure = EV_CLOSURE_PERSIST;
michael@0 1747 } else {
michael@0 1748 ev->ev_closure = EV_CLOSURE_NONE;
michael@0 1749 }
michael@0 1750 }
michael@0 1751
michael@0 1752 min_heap_elem_init(ev);
michael@0 1753
michael@0 1754 if (base != NULL) {
michael@0 1755 /* by default, we put new events into the middle priority */
michael@0 1756 ev->ev_pri = base->nactivequeues / 2;
michael@0 1757 }
michael@0 1758
michael@0 1759 _event_debug_note_setup(ev);
michael@0 1760
michael@0 1761 return 0;
michael@0 1762 }
michael@0 1763
michael@0 1764 int
michael@0 1765 event_base_set(struct event_base *base, struct event *ev)
michael@0 1766 {
michael@0 1767 /* Only innocent events may be assigned to a different base */
michael@0 1768 if (ev->ev_flags != EVLIST_INIT)
michael@0 1769 return (-1);
michael@0 1770
michael@0 1771 _event_debug_assert_is_setup(ev);
michael@0 1772
michael@0 1773 ev->ev_base = base;
michael@0 1774 ev->ev_pri = base->nactivequeues/2;
michael@0 1775
michael@0 1776 return (0);
michael@0 1777 }
michael@0 1778
michael@0 1779 void
michael@0 1780 event_set(struct event *ev, evutil_socket_t fd, short events,
michael@0 1781 void (*callback)(evutil_socket_t, short, void *), void *arg)
michael@0 1782 {
michael@0 1783 int r;
michael@0 1784 r = event_assign(ev, current_base, fd, events, callback, arg);
michael@0 1785 EVUTIL_ASSERT(r == 0);
michael@0 1786 }
michael@0 1787
michael@0 1788 struct event *
michael@0 1789 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
michael@0 1790 {
michael@0 1791 struct event *ev;
michael@0 1792 ev = mm_malloc(sizeof(struct event));
michael@0 1793 if (ev == NULL)
michael@0 1794 return (NULL);
michael@0 1795 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
michael@0 1796 mm_free(ev);
michael@0 1797 return (NULL);
michael@0 1798 }
michael@0 1799
michael@0 1800 return (ev);
michael@0 1801 }
michael@0 1802
michael@0 1803 void
michael@0 1804 event_free(struct event *ev)
michael@0 1805 {
michael@0 1806 _event_debug_assert_is_setup(ev);
michael@0 1807
michael@0 1808 /* make sure that this event won't be coming back to haunt us. */
michael@0 1809 event_del(ev);
michael@0 1810 _event_debug_note_teardown(ev);
michael@0 1811 mm_free(ev);
michael@0 1812
michael@0 1813 }
michael@0 1814
michael@0 1815 void
michael@0 1816 event_debug_unassign(struct event *ev)
michael@0 1817 {
michael@0 1818 _event_debug_assert_not_added(ev);
michael@0 1819 _event_debug_note_teardown(ev);
michael@0 1820
michael@0 1821 ev->ev_flags &= ~EVLIST_INIT;
michael@0 1822 }
michael@0 1823
michael@0 1824 /*
michael@0 1825 * Set's the priority of an event - if an event is already scheduled
michael@0 1826 * changing the priority is going to fail.
michael@0 1827 */
michael@0 1828
michael@0 1829 int
michael@0 1830 event_priority_set(struct event *ev, int pri)
michael@0 1831 {
michael@0 1832 _event_debug_assert_is_setup(ev);
michael@0 1833
michael@0 1834 if (ev->ev_flags & EVLIST_ACTIVE)
michael@0 1835 return (-1);
michael@0 1836 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
michael@0 1837 return (-1);
michael@0 1838
michael@0 1839 ev->ev_pri = pri;
michael@0 1840
michael@0 1841 return (0);
michael@0 1842 }
michael@0 1843
michael@0 1844 /*
michael@0 1845 * Checks if a specific event is pending or scheduled.
michael@0 1846 */
michael@0 1847
michael@0 1848 int
michael@0 1849 event_pending(const struct event *ev, short event, struct timeval *tv)
michael@0 1850 {
michael@0 1851 int flags = 0;
michael@0 1852
michael@0 1853 if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
michael@0 1854 event_warnx("%s: event has no event_base set.", __func__);
michael@0 1855 return 0;
michael@0 1856 }
michael@0 1857
michael@0 1858 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
michael@0 1859 _event_debug_assert_is_setup(ev);
michael@0 1860
michael@0 1861 if (ev->ev_flags & EVLIST_INSERTED)
michael@0 1862 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
michael@0 1863 if (ev->ev_flags & EVLIST_ACTIVE)
michael@0 1864 flags |= ev->ev_res;
michael@0 1865 if (ev->ev_flags & EVLIST_TIMEOUT)
michael@0 1866 flags |= EV_TIMEOUT;
michael@0 1867
michael@0 1868 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
michael@0 1869
michael@0 1870 /* See if there is a timeout that we should report */
michael@0 1871 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
michael@0 1872 struct timeval tmp = ev->ev_timeout;
michael@0 1873 tmp.tv_usec &= MICROSECONDS_MASK;
michael@0 1874 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
michael@0 1875 /* correctly remamp to real time */
michael@0 1876 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
michael@0 1877 #else
michael@0 1878 *tv = tmp;
michael@0 1879 #endif
michael@0 1880 }
michael@0 1881
michael@0 1882 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
michael@0 1883
michael@0 1884 return (flags & event);
michael@0 1885 }
michael@0 1886
michael@0 1887 int
michael@0 1888 event_initialized(const struct event *ev)
michael@0 1889 {
michael@0 1890 if (!(ev->ev_flags & EVLIST_INIT))
michael@0 1891 return 0;
michael@0 1892
michael@0 1893 return 1;
michael@0 1894 }
michael@0 1895
michael@0 1896 void
michael@0 1897 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
michael@0 1898 {
michael@0 1899 _event_debug_assert_is_setup(event);
michael@0 1900
michael@0 1901 if (base_out)
michael@0 1902 *base_out = event->ev_base;
michael@0 1903 if (fd_out)
michael@0 1904 *fd_out = event->ev_fd;
michael@0 1905 if (events_out)
michael@0 1906 *events_out = event->ev_events;
michael@0 1907 if (callback_out)
michael@0 1908 *callback_out = event->ev_callback;
michael@0 1909 if (arg_out)
michael@0 1910 *arg_out = event->ev_arg;
michael@0 1911 }
michael@0 1912
michael@0 1913 size_t
michael@0 1914 event_get_struct_event_size(void)
michael@0 1915 {
michael@0 1916 return sizeof(struct event);
michael@0 1917 }
michael@0 1918
michael@0 1919 evutil_socket_t
michael@0 1920 event_get_fd(const struct event *ev)
michael@0 1921 {
michael@0 1922 _event_debug_assert_is_setup(ev);
michael@0 1923 return ev->ev_fd;
michael@0 1924 }
michael@0 1925
michael@0 1926 struct event_base *
michael@0 1927 event_get_base(const struct event *ev)
michael@0 1928 {
michael@0 1929 _event_debug_assert_is_setup(ev);
michael@0 1930 return ev->ev_base;
michael@0 1931 }
michael@0 1932
michael@0 1933 short
michael@0 1934 event_get_events(const struct event *ev)
michael@0 1935 {
michael@0 1936 _event_debug_assert_is_setup(ev);
michael@0 1937 return ev->ev_events;
michael@0 1938 }
michael@0 1939
michael@0 1940 event_callback_fn
michael@0 1941 event_get_callback(const struct event *ev)
michael@0 1942 {
michael@0 1943 _event_debug_assert_is_setup(ev);
michael@0 1944 return ev->ev_callback;
michael@0 1945 }
michael@0 1946
michael@0 1947 void *
michael@0 1948 event_get_callback_arg(const struct event *ev)
michael@0 1949 {
michael@0 1950 _event_debug_assert_is_setup(ev);
michael@0 1951 return ev->ev_arg;
michael@0 1952 }
michael@0 1953
michael@0 1954 int
michael@0 1955 event_add(struct event *ev, const struct timeval *tv)
michael@0 1956 {
michael@0 1957 int res;
michael@0 1958
michael@0 1959 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
michael@0 1960 event_warnx("%s: event has no event_base set.", __func__);
michael@0 1961 return -1;
michael@0 1962 }
michael@0 1963
michael@0 1964 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
michael@0 1965
michael@0 1966 res = event_add_internal(ev, tv, 0);
michael@0 1967
michael@0 1968 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
michael@0 1969
michael@0 1970 return (res);
michael@0 1971 }
michael@0 1972
michael@0 1973 /* Helper callback: wake an event_base from another thread. This version
michael@0 1974 * works by writing a byte to one end of a socketpair, so that the event_base
michael@0 1975 * listening on the other end will wake up as the corresponding event
michael@0 1976 * triggers */
michael@0 1977 static int
michael@0 1978 evthread_notify_base_default(struct event_base *base)
michael@0 1979 {
michael@0 1980 char buf[1];
michael@0 1981 int r;
michael@0 1982 buf[0] = (char) 0;
michael@0 1983 #ifdef WIN32
michael@0 1984 r = send(base->th_notify_fd[1], buf, 1, 0);
michael@0 1985 #else
michael@0 1986 r = write(base->th_notify_fd[1], buf, 1);
michael@0 1987 #endif
michael@0 1988 return (r < 0 && errno != EAGAIN) ? -1 : 0;
michael@0 1989 }
michael@0 1990
michael@0 1991 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
michael@0 1992 /* Helper callback: wake an event_base from another thread. This version
michael@0 1993 * assumes that you have a working eventfd() implementation. */
michael@0 1994 static int
michael@0 1995 evthread_notify_base_eventfd(struct event_base *base)
michael@0 1996 {
michael@0 1997 ev_uint64_t msg = 1;
michael@0 1998 int r;
michael@0 1999 do {
michael@0 2000 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
michael@0 2001 } while (r < 0 && errno == EAGAIN);
michael@0 2002
michael@0 2003 return (r < 0) ? -1 : 0;
michael@0 2004 }
michael@0 2005 #endif
michael@0 2006
michael@0 2007 /** Tell the thread currently running the event_loop for base (if any) that it
michael@0 2008 * needs to stop waiting in its dispatch function (if it is) and process all
michael@0 2009 * active events and deferred callbacks (if there are any). */
michael@0 2010 static int
michael@0 2011 evthread_notify_base(struct event_base *base)
michael@0 2012 {
michael@0 2013 EVENT_BASE_ASSERT_LOCKED(base);
michael@0 2014 if (!base->th_notify_fn)
michael@0 2015 return -1;
michael@0 2016 if (base->is_notify_pending)
michael@0 2017 return 0;
michael@0 2018 base->is_notify_pending = 1;
michael@0 2019 return base->th_notify_fn(base);
michael@0 2020 }
michael@0 2021
michael@0 2022 /* Implementation function to add an event. Works just like event_add,
michael@0 2023 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
michael@0 2024 * we treat tv as an absolute time, not as an interval to add to the current
michael@0 2025 * time */
michael@0 2026 static inline int
michael@0 2027 event_add_internal(struct event *ev, const struct timeval *tv,
michael@0 2028 int tv_is_absolute)
michael@0 2029 {
michael@0 2030 struct event_base *base = ev->ev_base;
michael@0 2031 int res = 0;
michael@0 2032 int notify = 0;
michael@0 2033
michael@0 2034 EVENT_BASE_ASSERT_LOCKED(base);
michael@0 2035 _event_debug_assert_is_setup(ev);
michael@0 2036
michael@0 2037 event_debug((
michael@0 2038 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%scall %p",
michael@0 2039 ev,
michael@0 2040 EV_SOCK_ARG(ev->ev_fd),
michael@0 2041 ev->ev_events & EV_READ ? "EV_READ " : " ",
michael@0 2042 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
michael@0 2043 tv ? "EV_TIMEOUT " : " ",
michael@0 2044 ev->ev_callback));
michael@0 2045
michael@0 2046 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
michael@0 2047
michael@0 2048 /*
michael@0 2049 * prepare for timeout insertion further below, if we get a
michael@0 2050 * failure on any step, we should not change any state.
michael@0 2051 */
michael@0 2052 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
michael@0 2053 if (min_heap_reserve(&base->timeheap,
michael@0 2054 1 + min_heap_size(&base->timeheap)) == -1)
michael@0 2055 return (-1); /* ENOMEM == errno */
michael@0 2056 }
michael@0 2057
michael@0 2058 /* If the main thread is currently executing a signal event's
michael@0 2059 * callback, and we are not the main thread, then we want to wait
michael@0 2060 * until the callback is done before we mess with the event, or else
michael@0 2061 * we can race on ev_ncalls and ev_pncalls below. */
michael@0 2062 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
michael@0 2063 if (base->current_event == ev && (ev->ev_events & EV_SIGNAL)
michael@0 2064 && !EVBASE_IN_THREAD(base)) {
michael@0 2065 ++base->current_event_waiters;
michael@0 2066 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
michael@0 2067 }
michael@0 2068 #endif
michael@0 2069
michael@0 2070 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
michael@0 2071 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
michael@0 2072 if (ev->ev_events & (EV_READ|EV_WRITE))
michael@0 2073 res = evmap_io_add(base, ev->ev_fd, ev);
michael@0 2074 else if (ev->ev_events & EV_SIGNAL)
michael@0 2075 res = evmap_signal_add(base, (int)ev->ev_fd, ev);
michael@0 2076 if (res != -1)
michael@0 2077 event_queue_insert(base, ev, EVLIST_INSERTED);
michael@0 2078 if (res == 1) {
michael@0 2079 /* evmap says we need to notify the main thread. */
michael@0 2080 notify = 1;
michael@0 2081 res = 0;
michael@0 2082 }
michael@0 2083 }
michael@0 2084
michael@0 2085 /*
michael@0 2086 * we should change the timeout state only if the previous event
michael@0 2087 * addition succeeded.
michael@0 2088 */
michael@0 2089 if (res != -1 && tv != NULL) {
michael@0 2090 struct timeval now;
michael@0 2091 int common_timeout;
michael@0 2092
michael@0 2093 /*
michael@0 2094 * for persistent timeout events, we remember the
michael@0 2095 * timeout value and re-add the event.
michael@0 2096 *
michael@0 2097 * If tv_is_absolute, this was already set.
michael@0 2098 */
michael@0 2099 if (ev->ev_closure == EV_CLOSURE_PERSIST && !tv_is_absolute)
michael@0 2100 ev->ev_io_timeout = *tv;
michael@0 2101
michael@0 2102 /*
michael@0 2103 * we already reserved memory above for the case where we
michael@0 2104 * are not replacing an existing timeout.
michael@0 2105 */
michael@0 2106 if (ev->ev_flags & EVLIST_TIMEOUT) {
michael@0 2107 /* XXX I believe this is needless. */
michael@0 2108 if (min_heap_elt_is_top(ev))
michael@0 2109 notify = 1;
michael@0 2110 event_queue_remove(base, ev, EVLIST_TIMEOUT);
michael@0 2111 }
michael@0 2112
michael@0 2113 /* Check if it is active due to a timeout. Rescheduling
michael@0 2114 * this timeout before the callback can be executed
michael@0 2115 * removes it from the active list. */
michael@0 2116 if ((ev->ev_flags & EVLIST_ACTIVE) &&
michael@0 2117 (ev->ev_res & EV_TIMEOUT)) {
michael@0 2118 if (ev->ev_events & EV_SIGNAL) {
michael@0 2119 /* See if we are just active executing
michael@0 2120 * this event in a loop
michael@0 2121 */
michael@0 2122 if (ev->ev_ncalls && ev->ev_pncalls) {
michael@0 2123 /* Abort loop */
michael@0 2124 *ev->ev_pncalls = 0;
michael@0 2125 }
michael@0 2126 }
michael@0 2127
michael@0 2128 event_queue_remove(base, ev, EVLIST_ACTIVE);
michael@0 2129 }
michael@0 2130
michael@0 2131 gettime(base, &now);
michael@0 2132
michael@0 2133 common_timeout = is_common_timeout(tv, base);
michael@0 2134 if (tv_is_absolute) {
michael@0 2135 ev->ev_timeout = *tv;
michael@0 2136 } else if (common_timeout) {
michael@0 2137 struct timeval tmp = *tv;
michael@0 2138 tmp.tv_usec &= MICROSECONDS_MASK;
michael@0 2139 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
michael@0 2140 ev->ev_timeout.tv_usec |=
michael@0 2141 (tv->tv_usec & ~MICROSECONDS_MASK);
michael@0 2142 } else {
michael@0 2143 evutil_timeradd(&now, tv, &ev->ev_timeout);
michael@0 2144 }
michael@0 2145
michael@0 2146 event_debug((
michael@0 2147 "event_add: timeout in %d seconds, call %p",
michael@0 2148 (int)tv->tv_sec, ev->ev_callback));
michael@0 2149
michael@0 2150 event_queue_insert(base, ev, EVLIST_TIMEOUT);
michael@0 2151 if (common_timeout) {
michael@0 2152 struct common_timeout_list *ctl =
michael@0 2153 get_common_timeout_list(base, &ev->ev_timeout);
michael@0 2154 if (ev == TAILQ_FIRST(&ctl->events)) {
michael@0 2155 common_timeout_schedule(ctl, &now, ev);
michael@0 2156 }
michael@0 2157 } else {
michael@0 2158 /* See if the earliest timeout is now earlier than it
michael@0 2159 * was before: if so, we will need to tell the main
michael@0 2160 * thread to wake up earlier than it would
michael@0 2161 * otherwise. */
michael@0 2162 if (min_heap_elt_is_top(ev))
michael@0 2163 notify = 1;
michael@0 2164 }
michael@0 2165 }
michael@0 2166
michael@0 2167 /* if we are not in the right thread, we need to wake up the loop */
michael@0 2168 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
michael@0 2169 evthread_notify_base(base);
michael@0 2170
michael@0 2171 _event_debug_note_add(ev);
michael@0 2172
michael@0 2173 return (res);
michael@0 2174 }
michael@0 2175
michael@0 2176 int
michael@0 2177 event_del(struct event *ev)
michael@0 2178 {
michael@0 2179 int res;
michael@0 2180
michael@0 2181 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
michael@0 2182 event_warnx("%s: event has no event_base set.", __func__);
michael@0 2183 return -1;
michael@0 2184 }
michael@0 2185
michael@0 2186 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
michael@0 2187
michael@0 2188 res = event_del_internal(ev);
michael@0 2189
michael@0 2190 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
michael@0 2191
michael@0 2192 return (res);
michael@0 2193 }
michael@0 2194
michael@0 2195 /* Helper for event_del: always called with th_base_lock held. */
michael@0 2196 static inline int
michael@0 2197 event_del_internal(struct event *ev)
michael@0 2198 {
michael@0 2199 struct event_base *base;
michael@0 2200 int res = 0, notify = 0;
michael@0 2201
michael@0 2202 event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
michael@0 2203 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
michael@0 2204
michael@0 2205 /* An event without a base has not been added */
michael@0 2206 if (ev->ev_base == NULL)
michael@0 2207 return (-1);
michael@0 2208
michael@0 2209 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
michael@0 2210
michael@0 2211 /* If the main thread is currently executing this event's callback,
michael@0 2212 * and we are not the main thread, then we want to wait until the
michael@0 2213 * callback is done before we start removing the event. That way,
michael@0 2214 * when this function returns, it will be safe to free the
michael@0 2215 * user-supplied argument. */
michael@0 2216 base = ev->ev_base;
michael@0 2217 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
michael@0 2218 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
michael@0 2219 ++base->current_event_waiters;
michael@0 2220 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
michael@0 2221 }
michael@0 2222 #endif
michael@0 2223
michael@0 2224 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
michael@0 2225
michael@0 2226 /* See if we are just active executing this event in a loop */
michael@0 2227 if (ev->ev_events & EV_SIGNAL) {
michael@0 2228 if (ev->ev_ncalls && ev->ev_pncalls) {
michael@0 2229 /* Abort loop */
michael@0 2230 *ev->ev_pncalls = 0;
michael@0 2231 }
michael@0 2232 }
michael@0 2233
michael@0 2234 if (ev->ev_flags & EVLIST_TIMEOUT) {
michael@0 2235 /* NOTE: We never need to notify the main thread because of a
michael@0 2236 * deleted timeout event: all that could happen if we don't is
michael@0 2237 * that the dispatch loop might wake up too early. But the
michael@0 2238 * point of notifying the main thread _is_ to wake up the
michael@0 2239 * dispatch loop early anyway, so we wouldn't gain anything by
michael@0 2240 * doing it.
michael@0 2241 */
michael@0 2242 event_queue_remove(base, ev, EVLIST_TIMEOUT);
michael@0 2243 }
michael@0 2244
michael@0 2245 if (ev->ev_flags & EVLIST_ACTIVE)
michael@0 2246 event_queue_remove(base, ev, EVLIST_ACTIVE);
michael@0 2247
michael@0 2248 if (ev->ev_flags & EVLIST_INSERTED) {
michael@0 2249 event_queue_remove(base, ev, EVLIST_INSERTED);
michael@0 2250 if (ev->ev_events & (EV_READ|EV_WRITE))
michael@0 2251 res = evmap_io_del(base, ev->ev_fd, ev);
michael@0 2252 else
michael@0 2253 res = evmap_signal_del(base, (int)ev->ev_fd, ev);
michael@0 2254 if (res == 1) {
michael@0 2255 /* evmap says we need to notify the main thread. */
michael@0 2256 notify = 1;
michael@0 2257 res = 0;
michael@0 2258 }
michael@0 2259 }
michael@0 2260
michael@0 2261 /* if we are not in the right thread, we need to wake up the loop */
michael@0 2262 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
michael@0 2263 evthread_notify_base(base);
michael@0 2264
michael@0 2265 _event_debug_note_del(ev);
michael@0 2266
michael@0 2267 return (res);
michael@0 2268 }
michael@0 2269
michael@0 2270 void
michael@0 2271 event_active(struct event *ev, int res, short ncalls)
michael@0 2272 {
michael@0 2273 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
michael@0 2274 event_warnx("%s: event has no event_base set.", __func__);
michael@0 2275 return;
michael@0 2276 }
michael@0 2277
michael@0 2278 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
michael@0 2279
michael@0 2280 _event_debug_assert_is_setup(ev);
michael@0 2281
michael@0 2282 event_active_nolock(ev, res, ncalls);
michael@0 2283
michael@0 2284 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
michael@0 2285 }
michael@0 2286
michael@0 2287
michael@0 2288 void
michael@0 2289 event_active_nolock(struct event *ev, int res, short ncalls)
michael@0 2290 {
michael@0 2291 struct event_base *base;
michael@0 2292
michael@0 2293 event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
michael@0 2294 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
michael@0 2295
michael@0 2296
michael@0 2297 /* We get different kinds of events, add them together */
michael@0 2298 if (ev->ev_flags & EVLIST_ACTIVE) {
michael@0 2299 ev->ev_res |= res;
michael@0 2300 return;
michael@0 2301 }
michael@0 2302
michael@0 2303 base = ev->ev_base;
michael@0 2304
michael@0 2305 EVENT_BASE_ASSERT_LOCKED(base);
michael@0 2306
michael@0 2307 ev->ev_res = res;
michael@0 2308
michael@0 2309 if (ev->ev_pri < base->event_running_priority)
michael@0 2310 base->event_continue = 1;
michael@0 2311
michael@0 2312 if (ev->ev_events & EV_SIGNAL) {
michael@0 2313 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
michael@0 2314 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
michael@0 2315 ++base->current_event_waiters;
michael@0 2316 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
michael@0 2317 }
michael@0 2318 #endif
michael@0 2319 ev->ev_ncalls = ncalls;
michael@0 2320 ev->ev_pncalls = NULL;
michael@0 2321 }
michael@0 2322
michael@0 2323 event_queue_insert(base, ev, EVLIST_ACTIVE);
michael@0 2324
michael@0 2325 if (EVBASE_NEED_NOTIFY(base))
michael@0 2326 evthread_notify_base(base);
michael@0 2327 }
michael@0 2328
michael@0 2329 void
michael@0 2330 event_deferred_cb_init(struct deferred_cb *cb, deferred_cb_fn fn, void *arg)
michael@0 2331 {
michael@0 2332 memset(cb, 0, sizeof(struct deferred_cb));
michael@0 2333 cb->cb = fn;
michael@0 2334 cb->arg = arg;
michael@0 2335 }
michael@0 2336
michael@0 2337 void
michael@0 2338 event_deferred_cb_cancel(struct deferred_cb_queue *queue,
michael@0 2339 struct deferred_cb *cb)
michael@0 2340 {
michael@0 2341 if (!queue) {
michael@0 2342 if (current_base)
michael@0 2343 queue = &current_base->defer_queue;
michael@0 2344 else
michael@0 2345 return;
michael@0 2346 }
michael@0 2347
michael@0 2348 LOCK_DEFERRED_QUEUE(queue);
michael@0 2349 if (cb->queued) {
michael@0 2350 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
michael@0 2351 --queue->active_count;
michael@0 2352 cb->queued = 0;
michael@0 2353 }
michael@0 2354 UNLOCK_DEFERRED_QUEUE(queue);
michael@0 2355 }
michael@0 2356
michael@0 2357 void
michael@0 2358 event_deferred_cb_schedule(struct deferred_cb_queue *queue,
michael@0 2359 struct deferred_cb *cb)
michael@0 2360 {
michael@0 2361 if (!queue) {
michael@0 2362 if (current_base)
michael@0 2363 queue = &current_base->defer_queue;
michael@0 2364 else
michael@0 2365 return;
michael@0 2366 }
michael@0 2367
michael@0 2368 LOCK_DEFERRED_QUEUE(queue);
michael@0 2369 if (!cb->queued) {
michael@0 2370 cb->queued = 1;
michael@0 2371 TAILQ_INSERT_TAIL(&queue->deferred_cb_list, cb, cb_next);
michael@0 2372 ++queue->active_count;
michael@0 2373 if (queue->notify_fn)
michael@0 2374 queue->notify_fn(queue, queue->notify_arg);
michael@0 2375 }
michael@0 2376 UNLOCK_DEFERRED_QUEUE(queue);
michael@0 2377 }
michael@0 2378
michael@0 2379 static int
michael@0 2380 timeout_next(struct event_base *base, struct timeval **tv_p)
michael@0 2381 {
michael@0 2382 /* Caller must hold th_base_lock */
michael@0 2383 struct timeval now;
michael@0 2384 struct event *ev;
michael@0 2385 struct timeval *tv = *tv_p;
michael@0 2386 int res = 0;
michael@0 2387
michael@0 2388 ev = min_heap_top(&base->timeheap);
michael@0 2389
michael@0 2390 if (ev == NULL) {
michael@0 2391 /* if no time-based events are active wait for I/O */
michael@0 2392 *tv_p = NULL;
michael@0 2393 goto out;
michael@0 2394 }
michael@0 2395
michael@0 2396 if (gettime(base, &now) == -1) {
michael@0 2397 res = -1;
michael@0 2398 goto out;
michael@0 2399 }
michael@0 2400
michael@0 2401 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
michael@0 2402 evutil_timerclear(tv);
michael@0 2403 goto out;
michael@0 2404 }
michael@0 2405
michael@0 2406 evutil_timersub(&ev->ev_timeout, &now, tv);
michael@0 2407
michael@0 2408 EVUTIL_ASSERT(tv->tv_sec >= 0);
michael@0 2409 EVUTIL_ASSERT(tv->tv_usec >= 0);
michael@0 2410 event_debug(("timeout_next: in %d seconds", (int)tv->tv_sec));
michael@0 2411
michael@0 2412 out:
michael@0 2413 return (res);
michael@0 2414 }
michael@0 2415
michael@0 2416 /*
michael@0 2417 * Determines if the time is running backwards by comparing the current time
michael@0 2418 * against the last time we checked. Not needed when using clock monotonic.
michael@0 2419 * If time is running backwards, we adjust the firing time of every event by
michael@0 2420 * the amount that time seems to have jumped.
michael@0 2421 */
michael@0 2422 static void
michael@0 2423 timeout_correct(struct event_base *base, struct timeval *tv)
michael@0 2424 {
michael@0 2425 /* Caller must hold th_base_lock. */
michael@0 2426 struct event **pev;
michael@0 2427 unsigned int size;
michael@0 2428 struct timeval off;
michael@0 2429 int i;
michael@0 2430
michael@0 2431 if (use_monotonic)
michael@0 2432 return;
michael@0 2433
michael@0 2434 /* Check if time is running backwards */
michael@0 2435 gettime(base, tv);
michael@0 2436
michael@0 2437 if (evutil_timercmp(tv, &base->event_tv, >=)) {
michael@0 2438 base->event_tv = *tv;
michael@0 2439 return;
michael@0 2440 }
michael@0 2441
michael@0 2442 event_debug(("%s: time is running backwards, corrected",
michael@0 2443 __func__));
michael@0 2444 evutil_timersub(&base->event_tv, tv, &off);
michael@0 2445
michael@0 2446 /*
michael@0 2447 * We can modify the key element of the node without destroying
michael@0 2448 * the minheap property, because we change every element.
michael@0 2449 */
michael@0 2450 pev = base->timeheap.p;
michael@0 2451 size = base->timeheap.n;
michael@0 2452 for (; size-- > 0; ++pev) {
michael@0 2453 struct timeval *ev_tv = &(**pev).ev_timeout;
michael@0 2454 evutil_timersub(ev_tv, &off, ev_tv);
michael@0 2455 }
michael@0 2456 for (i=0; i<base->n_common_timeouts; ++i) {
michael@0 2457 struct event *ev;
michael@0 2458 struct common_timeout_list *ctl =
michael@0 2459 base->common_timeout_queues[i];
michael@0 2460 TAILQ_FOREACH(ev, &ctl->events,
michael@0 2461 ev_timeout_pos.ev_next_with_common_timeout) {
michael@0 2462 struct timeval *ev_tv = &ev->ev_timeout;
michael@0 2463 ev_tv->tv_usec &= MICROSECONDS_MASK;
michael@0 2464 evutil_timersub(ev_tv, &off, ev_tv);
michael@0 2465 ev_tv->tv_usec |= COMMON_TIMEOUT_MAGIC |
michael@0 2466 (i<<COMMON_TIMEOUT_IDX_SHIFT);
michael@0 2467 }
michael@0 2468 }
michael@0 2469
michael@0 2470 /* Now remember what the new time turned out to be. */
michael@0 2471 base->event_tv = *tv;
michael@0 2472 }
michael@0 2473
michael@0 2474 /* Activate every event whose timeout has elapsed. */
michael@0 2475 static void
michael@0 2476 timeout_process(struct event_base *base)
michael@0 2477 {
michael@0 2478 /* Caller must hold lock. */
michael@0 2479 struct timeval now;
michael@0 2480 struct event *ev;
michael@0 2481
michael@0 2482 if (min_heap_empty(&base->timeheap)) {
michael@0 2483 return;
michael@0 2484 }
michael@0 2485
michael@0 2486 gettime(base, &now);
michael@0 2487
michael@0 2488 while ((ev = min_heap_top(&base->timeheap))) {
michael@0 2489 if (evutil_timercmp(&ev->ev_timeout, &now, >))
michael@0 2490 break;
michael@0 2491
michael@0 2492 /* delete this event from the I/O queues */
michael@0 2493 event_del_internal(ev);
michael@0 2494
michael@0 2495 event_debug(("timeout_process: call %p",
michael@0 2496 ev->ev_callback));
michael@0 2497 event_active_nolock(ev, EV_TIMEOUT, 1);
michael@0 2498 }
michael@0 2499 }
michael@0 2500
michael@0 2501 /* Remove 'ev' from 'queue' (EVLIST_...) in base. */
michael@0 2502 static void
michael@0 2503 event_queue_remove(struct event_base *base, struct event *ev, int queue)
michael@0 2504 {
michael@0 2505 EVENT_BASE_ASSERT_LOCKED(base);
michael@0 2506
michael@0 2507 if (!(ev->ev_flags & queue)) {
michael@0 2508 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
michael@0 2509 ev, EV_SOCK_ARG(ev->ev_fd), queue);
michael@0 2510 return;
michael@0 2511 }
michael@0 2512
michael@0 2513 if (~ev->ev_flags & EVLIST_INTERNAL)
michael@0 2514 base->event_count--;
michael@0 2515
michael@0 2516 ev->ev_flags &= ~queue;
michael@0 2517 switch (queue) {
michael@0 2518 case EVLIST_INSERTED:
michael@0 2519 TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
michael@0 2520 break;
michael@0 2521 case EVLIST_ACTIVE:
michael@0 2522 base->event_count_active--;
michael@0 2523 TAILQ_REMOVE(&base->activequeues[ev->ev_pri],
michael@0 2524 ev, ev_active_next);
michael@0 2525 break;
michael@0 2526 case EVLIST_TIMEOUT:
michael@0 2527 if (is_common_timeout(&ev->ev_timeout, base)) {
michael@0 2528 struct common_timeout_list *ctl =
michael@0 2529 get_common_timeout_list(base, &ev->ev_timeout);
michael@0 2530 TAILQ_REMOVE(&ctl->events, ev,
michael@0 2531 ev_timeout_pos.ev_next_with_common_timeout);
michael@0 2532 } else {
michael@0 2533 min_heap_erase(&base->timeheap, ev);
michael@0 2534 }
michael@0 2535 break;
michael@0 2536 default:
michael@0 2537 event_errx(1, "%s: unknown queue %x", __func__, queue);
michael@0 2538 }
michael@0 2539 }
michael@0 2540
michael@0 2541 /* Add 'ev' to the common timeout list in 'ev'. */
michael@0 2542 static void
michael@0 2543 insert_common_timeout_inorder(struct common_timeout_list *ctl,
michael@0 2544 struct event *ev)
michael@0 2545 {
michael@0 2546 struct event *e;
michael@0 2547 /* By all logic, we should just be able to append 'ev' to the end of
michael@0 2548 * ctl->events, since the timeout on each 'ev' is set to {the common
michael@0 2549 * timeout} + {the time when we add the event}, and so the events
michael@0 2550 * should arrive in order of their timeeouts. But just in case
michael@0 2551 * there's some wacky threading issue going on, we do a search from
michael@0 2552 * the end of 'ev' to find the right insertion point.
michael@0 2553 */
michael@0 2554 TAILQ_FOREACH_REVERSE(e, &ctl->events,
michael@0 2555 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
michael@0 2556 /* This timercmp is a little sneaky, since both ev and e have
michael@0 2557 * magic values in tv_usec. Fortunately, they ought to have
michael@0 2558 * the _same_ magic values in tv_usec. Let's assert for that.
michael@0 2559 */
michael@0 2560 EVUTIL_ASSERT(
michael@0 2561 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
michael@0 2562 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
michael@0 2563 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
michael@0 2564 ev_timeout_pos.ev_next_with_common_timeout);
michael@0 2565 return;
michael@0 2566 }
michael@0 2567 }
michael@0 2568 TAILQ_INSERT_HEAD(&ctl->events, ev,
michael@0 2569 ev_timeout_pos.ev_next_with_common_timeout);
michael@0 2570 }
michael@0 2571
michael@0 2572 static void
michael@0 2573 event_queue_insert(struct event_base *base, struct event *ev, int queue)
michael@0 2574 {
michael@0 2575 EVENT_BASE_ASSERT_LOCKED(base);
michael@0 2576
michael@0 2577 if (ev->ev_flags & queue) {
michael@0 2578 /* Double insertion is possible for active events */
michael@0 2579 if (queue & EVLIST_ACTIVE)
michael@0 2580 return;
michael@0 2581
michael@0 2582 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on queue %x", __func__,
michael@0 2583 ev, EV_SOCK_ARG(ev->ev_fd), queue);
michael@0 2584 return;
michael@0 2585 }
michael@0 2586
michael@0 2587 if (~ev->ev_flags & EVLIST_INTERNAL)
michael@0 2588 base->event_count++;
michael@0 2589
michael@0 2590 ev->ev_flags |= queue;
michael@0 2591 switch (queue) {
michael@0 2592 case EVLIST_INSERTED:
michael@0 2593 TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
michael@0 2594 break;
michael@0 2595 case EVLIST_ACTIVE:
michael@0 2596 base->event_count_active++;
michael@0 2597 TAILQ_INSERT_TAIL(&base->activequeues[ev->ev_pri],
michael@0 2598 ev,ev_active_next);
michael@0 2599 break;
michael@0 2600 case EVLIST_TIMEOUT: {
michael@0 2601 if (is_common_timeout(&ev->ev_timeout, base)) {
michael@0 2602 struct common_timeout_list *ctl =
michael@0 2603 get_common_timeout_list(base, &ev->ev_timeout);
michael@0 2604 insert_common_timeout_inorder(ctl, ev);
michael@0 2605 } else
michael@0 2606 min_heap_push(&base->timeheap, ev);
michael@0 2607 break;
michael@0 2608 }
michael@0 2609 default:
michael@0 2610 event_errx(1, "%s: unknown queue %x", __func__, queue);
michael@0 2611 }
michael@0 2612 }
michael@0 2613
michael@0 2614 /* Functions for debugging */
michael@0 2615
michael@0 2616 const char *
michael@0 2617 event_get_version(void)
michael@0 2618 {
michael@0 2619 return (_EVENT_VERSION);
michael@0 2620 }
michael@0 2621
michael@0 2622 ev_uint32_t
michael@0 2623 event_get_version_number(void)
michael@0 2624 {
michael@0 2625 return (_EVENT_NUMERIC_VERSION);
michael@0 2626 }
michael@0 2627
michael@0 2628 /*
michael@0 2629 * No thread-safe interface needed - the information should be the same
michael@0 2630 * for all threads.
michael@0 2631 */
michael@0 2632
michael@0 2633 const char *
michael@0 2634 event_get_method(void)
michael@0 2635 {
michael@0 2636 return (current_base->evsel->name);
michael@0 2637 }
michael@0 2638
michael@0 2639 #ifndef _EVENT_DISABLE_MM_REPLACEMENT
michael@0 2640 static void *(*_mm_malloc_fn)(size_t sz) = NULL;
michael@0 2641 static void *(*_mm_realloc_fn)(void *p, size_t sz) = NULL;
michael@0 2642 static void (*_mm_free_fn)(void *p) = NULL;
michael@0 2643
michael@0 2644 void *
michael@0 2645 event_mm_malloc_(size_t sz)
michael@0 2646 {
michael@0 2647 if (_mm_malloc_fn)
michael@0 2648 return _mm_malloc_fn(sz);
michael@0 2649 else
michael@0 2650 return malloc(sz);
michael@0 2651 }
michael@0 2652
michael@0 2653 void *
michael@0 2654 event_mm_calloc_(size_t count, size_t size)
michael@0 2655 {
michael@0 2656 if (_mm_malloc_fn) {
michael@0 2657 size_t sz = count * size;
michael@0 2658 void *p = _mm_malloc_fn(sz);
michael@0 2659 if (p)
michael@0 2660 memset(p, 0, sz);
michael@0 2661 return p;
michael@0 2662 } else
michael@0 2663 return calloc(count, size);
michael@0 2664 }
michael@0 2665
michael@0 2666 char *
michael@0 2667 event_mm_strdup_(const char *str)
michael@0 2668 {
michael@0 2669 if (_mm_malloc_fn) {
michael@0 2670 size_t ln = strlen(str);
michael@0 2671 void *p = _mm_malloc_fn(ln+1);
michael@0 2672 if (p)
michael@0 2673 memcpy(p, str, ln+1);
michael@0 2674 return p;
michael@0 2675 } else
michael@0 2676 #ifdef WIN32
michael@0 2677 return _strdup(str);
michael@0 2678 #else
michael@0 2679 return strdup(str);
michael@0 2680 #endif
michael@0 2681 }
michael@0 2682
michael@0 2683 void *
michael@0 2684 event_mm_realloc_(void *ptr, size_t sz)
michael@0 2685 {
michael@0 2686 if (_mm_realloc_fn)
michael@0 2687 return _mm_realloc_fn(ptr, sz);
michael@0 2688 else
michael@0 2689 return realloc(ptr, sz);
michael@0 2690 }
michael@0 2691
michael@0 2692 void
michael@0 2693 event_mm_free_(void *ptr)
michael@0 2694 {
michael@0 2695 if (_mm_free_fn)
michael@0 2696 _mm_free_fn(ptr);
michael@0 2697 else
michael@0 2698 free(ptr);
michael@0 2699 }
michael@0 2700
michael@0 2701 void
michael@0 2702 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
michael@0 2703 void *(*realloc_fn)(void *ptr, size_t sz),
michael@0 2704 void (*free_fn)(void *ptr))
michael@0 2705 {
michael@0 2706 _mm_malloc_fn = malloc_fn;
michael@0 2707 _mm_realloc_fn = realloc_fn;
michael@0 2708 _mm_free_fn = free_fn;
michael@0 2709 }
michael@0 2710 #endif
michael@0 2711
michael@0 2712 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
michael@0 2713 static void
michael@0 2714 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
michael@0 2715 {
michael@0 2716 ev_uint64_t msg;
michael@0 2717 ev_ssize_t r;
michael@0 2718 struct event_base *base = arg;
michael@0 2719
michael@0 2720 r = read(fd, (void*) &msg, sizeof(msg));
michael@0 2721 if (r<0 && errno != EAGAIN) {
michael@0 2722 event_sock_warn(fd, "Error reading from eventfd");
michael@0 2723 }
michael@0 2724 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
michael@0 2725 base->is_notify_pending = 0;
michael@0 2726 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 2727 }
michael@0 2728 #endif
michael@0 2729
michael@0 2730 static void
michael@0 2731 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
michael@0 2732 {
michael@0 2733 unsigned char buf[1024];
michael@0 2734 struct event_base *base = arg;
michael@0 2735 #ifdef WIN32
michael@0 2736 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
michael@0 2737 ;
michael@0 2738 #else
michael@0 2739 while (read(fd, (char*)buf, sizeof(buf)) > 0)
michael@0 2740 ;
michael@0 2741 #endif
michael@0 2742
michael@0 2743 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
michael@0 2744 base->is_notify_pending = 0;
michael@0 2745 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 2746 }
michael@0 2747
michael@0 2748 int
michael@0 2749 evthread_make_base_notifiable(struct event_base *base)
michael@0 2750 {
michael@0 2751 void (*cb)(evutil_socket_t, short, void *) = evthread_notify_drain_default;
michael@0 2752 int (*notify)(struct event_base *) = evthread_notify_base_default;
michael@0 2753
michael@0 2754 /* XXXX grab the lock here? */
michael@0 2755 if (!base)
michael@0 2756 return -1;
michael@0 2757
michael@0 2758 if (base->th_notify_fd[0] >= 0)
michael@0 2759 return 0;
michael@0 2760
michael@0 2761 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
michael@0 2762 #ifndef EFD_CLOEXEC
michael@0 2763 #define EFD_CLOEXEC 0
michael@0 2764 #endif
michael@0 2765 base->th_notify_fd[0] = eventfd(0, EFD_CLOEXEC);
michael@0 2766 if (base->th_notify_fd[0] >= 0) {
michael@0 2767 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
michael@0 2768 notify = evthread_notify_base_eventfd;
michael@0 2769 cb = evthread_notify_drain_eventfd;
michael@0 2770 }
michael@0 2771 #endif
michael@0 2772 #if defined(_EVENT_HAVE_PIPE)
michael@0 2773 if (base->th_notify_fd[0] < 0) {
michael@0 2774 if ((base->evsel->features & EV_FEATURE_FDS)) {
michael@0 2775 if (pipe(base->th_notify_fd) < 0) {
michael@0 2776 event_warn("%s: pipe", __func__);
michael@0 2777 } else {
michael@0 2778 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
michael@0 2779 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
michael@0 2780 }
michael@0 2781 }
michael@0 2782 }
michael@0 2783 #endif
michael@0 2784
michael@0 2785 #ifdef WIN32
michael@0 2786 #define LOCAL_SOCKETPAIR_AF AF_INET
michael@0 2787 #else
michael@0 2788 #define LOCAL_SOCKETPAIR_AF AF_UNIX
michael@0 2789 #endif
michael@0 2790 if (base->th_notify_fd[0] < 0) {
michael@0 2791 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0,
michael@0 2792 base->th_notify_fd) == -1) {
michael@0 2793 event_sock_warn(-1, "%s: socketpair", __func__);
michael@0 2794 return (-1);
michael@0 2795 } else {
michael@0 2796 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
michael@0 2797 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
michael@0 2798 }
michael@0 2799 }
michael@0 2800
michael@0 2801 evutil_make_socket_nonblocking(base->th_notify_fd[0]);
michael@0 2802
michael@0 2803 base->th_notify_fn = notify;
michael@0 2804
michael@0 2805 /*
michael@0 2806 Making the second socket nonblocking is a bit subtle, given that we
michael@0 2807 ignore any EAGAIN returns when writing to it, and you don't usally
michael@0 2808 do that for a nonblocking socket. But if the kernel gives us EAGAIN,
michael@0 2809 then there's no need to add any more data to the buffer, since
michael@0 2810 the main thread is already either about to wake up and drain it,
michael@0 2811 or woken up and in the process of draining it.
michael@0 2812 */
michael@0 2813 if (base->th_notify_fd[1] > 0)
michael@0 2814 evutil_make_socket_nonblocking(base->th_notify_fd[1]);
michael@0 2815
michael@0 2816 /* prepare an event that we can use for wakeup */
michael@0 2817 event_assign(&base->th_notify, base, base->th_notify_fd[0],
michael@0 2818 EV_READ|EV_PERSIST, cb, base);
michael@0 2819
michael@0 2820 /* we need to mark this as internal event */
michael@0 2821 base->th_notify.ev_flags |= EVLIST_INTERNAL;
michael@0 2822 event_priority_set(&base->th_notify, 0);
michael@0 2823
michael@0 2824 return event_add(&base->th_notify, NULL);
michael@0 2825 }
michael@0 2826
michael@0 2827 void
michael@0 2828 event_base_dump_events(struct event_base *base, FILE *output)
michael@0 2829 {
michael@0 2830 struct event *e;
michael@0 2831 int i;
michael@0 2832 fprintf(output, "Inserted events:\n");
michael@0 2833 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
michael@0 2834 fprintf(output, " %p [fd "EV_SOCK_FMT"]%s%s%s%s%s\n",
michael@0 2835 (void*)e, EV_SOCK_ARG(e->ev_fd),
michael@0 2836 (e->ev_events&EV_READ)?" Read":"",
michael@0 2837 (e->ev_events&EV_WRITE)?" Write":"",
michael@0 2838 (e->ev_events&EV_SIGNAL)?" Signal":"",
michael@0 2839 (e->ev_events&EV_TIMEOUT)?" Timeout":"",
michael@0 2840 (e->ev_events&EV_PERSIST)?" Persist":"");
michael@0 2841
michael@0 2842 }
michael@0 2843 for (i = 0; i < base->nactivequeues; ++i) {
michael@0 2844 if (TAILQ_EMPTY(&base->activequeues[i]))
michael@0 2845 continue;
michael@0 2846 fprintf(output, "Active events [priority %d]:\n", i);
michael@0 2847 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
michael@0 2848 fprintf(output, " %p [fd "EV_SOCK_FMT"]%s%s%s%s\n",
michael@0 2849 (void*)e, EV_SOCK_ARG(e->ev_fd),
michael@0 2850 (e->ev_res&EV_READ)?" Read active":"",
michael@0 2851 (e->ev_res&EV_WRITE)?" Write active":"",
michael@0 2852 (e->ev_res&EV_SIGNAL)?" Signal active":"",
michael@0 2853 (e->ev_res&EV_TIMEOUT)?" Timeout active":"");
michael@0 2854 }
michael@0 2855 }
michael@0 2856 }
michael@0 2857
michael@0 2858 void
michael@0 2859 event_base_add_virtual(struct event_base *base)
michael@0 2860 {
michael@0 2861 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
michael@0 2862 base->virtual_event_count++;
michael@0 2863 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 2864 }
michael@0 2865
michael@0 2866 void
michael@0 2867 event_base_del_virtual(struct event_base *base)
michael@0 2868 {
michael@0 2869 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
michael@0 2870 EVUTIL_ASSERT(base->virtual_event_count > 0);
michael@0 2871 base->virtual_event_count--;
michael@0 2872 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
michael@0 2873 evthread_notify_base(base);
michael@0 2874 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 2875 }
michael@0 2876
michael@0 2877 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
michael@0 2878 int
michael@0 2879 event_global_setup_locks_(const int enable_locks)
michael@0 2880 {
michael@0 2881 #ifndef _EVENT_DISABLE_DEBUG_MODE
michael@0 2882 EVTHREAD_SETUP_GLOBAL_LOCK(_event_debug_map_lock, 0);
michael@0 2883 #endif
michael@0 2884 if (evsig_global_setup_locks_(enable_locks) < 0)
michael@0 2885 return -1;
michael@0 2886 if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
michael@0 2887 return -1;
michael@0 2888 return 0;
michael@0 2889 }
michael@0 2890 #endif
michael@0 2891
michael@0 2892 void
michael@0 2893 event_base_assert_ok(struct event_base *base)
michael@0 2894 {
michael@0 2895 int i;
michael@0 2896 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
michael@0 2897 evmap_check_integrity(base);
michael@0 2898
michael@0 2899 /* Check the heap property */
michael@0 2900 for (i = 1; i < (int)base->timeheap.n; ++i) {
michael@0 2901 int parent = (i - 1) / 2;
michael@0 2902 struct event *ev, *p_ev;
michael@0 2903 ev = base->timeheap.p[i];
michael@0 2904 p_ev = base->timeheap.p[parent];
michael@0 2905 EVUTIL_ASSERT(ev->ev_flags & EV_TIMEOUT);
michael@0 2906 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
michael@0 2907 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
michael@0 2908 }
michael@0 2909
michael@0 2910 /* Check that the common timeouts are fine */
michael@0 2911 for (i = 0; i < base->n_common_timeouts; ++i) {
michael@0 2912 struct common_timeout_list *ctl = base->common_timeout_queues[i];
michael@0 2913 struct event *last=NULL, *ev;
michael@0 2914 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
michael@0 2915 if (last)
michael@0 2916 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
michael@0 2917 EVUTIL_ASSERT(ev->ev_flags & EV_TIMEOUT);
michael@0 2918 EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
michael@0 2919 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
michael@0 2920 last = ev;
michael@0 2921 }
michael@0 2922 }
michael@0 2923
michael@0 2924 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 2925 }

mercurial