ipc/chromium/src/third_party/libevent/event.c

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/ipc/chromium/src/third_party/libevent/event.c	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,2925 @@
     1.4 +/*
     1.5 + * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
     1.6 + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
     1.7 + *
     1.8 + * Redistribution and use in source and binary forms, with or without
     1.9 + * modification, are permitted provided that the following conditions
    1.10 + * are met:
    1.11 + * 1. Redistributions of source code must retain the above copyright
    1.12 + *    notice, this list of conditions and the following disclaimer.
    1.13 + * 2. Redistributions in binary form must reproduce the above copyright
    1.14 + *    notice, this list of conditions and the following disclaimer in the
    1.15 + *    documentation and/or other materials provided with the distribution.
    1.16 + * 3. The name of the author may not be used to endorse or promote products
    1.17 + *    derived from this software without specific prior written permission.
    1.18 + *
    1.19 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
    1.20 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
    1.21 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
    1.22 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
    1.23 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
    1.24 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    1.25 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    1.26 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    1.27 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
    1.28 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    1.29 + */
    1.30 +#include "event2/event-config.h"
    1.31 +
    1.32 +#ifdef WIN32
    1.33 +#include <winsock2.h>
    1.34 +#define WIN32_LEAN_AND_MEAN
    1.35 +#include <windows.h>
    1.36 +#undef WIN32_LEAN_AND_MEAN
    1.37 +#endif
    1.38 +#include <sys/types.h>
    1.39 +#if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
    1.40 +#include <sys/time.h>
    1.41 +#endif
    1.42 +#include <sys/queue.h>
    1.43 +#ifdef _EVENT_HAVE_SYS_SOCKET_H
    1.44 +#include <sys/socket.h>
    1.45 +#endif
    1.46 +#include <stdio.h>
    1.47 +#include <stdlib.h>
    1.48 +#ifdef _EVENT_HAVE_UNISTD_H
    1.49 +#include <unistd.h>
    1.50 +#endif
    1.51 +#ifdef _EVENT_HAVE_SYS_EVENTFD_H
    1.52 +#include <sys/eventfd.h>
    1.53 +#endif
    1.54 +#include <ctype.h>
    1.55 +#include <errno.h>
    1.56 +#include <signal.h>
    1.57 +#include <string.h>
    1.58 +#include <time.h>
    1.59 +
    1.60 +#include "event2/event.h"
    1.61 +#include "event2/event_struct.h"
    1.62 +#include "event2/event_compat.h"
    1.63 +#include "event-internal.h"
    1.64 +#include "defer-internal.h"
    1.65 +#include "evthread-internal.h"
    1.66 +#include "event2/thread.h"
    1.67 +#include "event2/util.h"
    1.68 +#include "log-internal.h"
    1.69 +#include "evmap-internal.h"
    1.70 +#include "iocp-internal.h"
    1.71 +#include "changelist-internal.h"
    1.72 +#include "ht-internal.h"
    1.73 +#include "util-internal.h"
    1.74 +
    1.75 +#ifdef _EVENT_HAVE_EVENT_PORTS
    1.76 +extern const struct eventop evportops;
    1.77 +#endif
    1.78 +#ifdef _EVENT_HAVE_SELECT
    1.79 +extern const struct eventop selectops;
    1.80 +#endif
    1.81 +#ifdef _EVENT_HAVE_POLL
    1.82 +extern const struct eventop pollops;
    1.83 +#endif
    1.84 +#ifdef _EVENT_HAVE_EPOLL
    1.85 +extern const struct eventop epollops;
    1.86 +#endif
    1.87 +#ifdef _EVENT_HAVE_WORKING_KQUEUE
    1.88 +extern const struct eventop kqops;
    1.89 +#endif
    1.90 +#ifdef _EVENT_HAVE_DEVPOLL
    1.91 +extern const struct eventop devpollops;
    1.92 +#endif
    1.93 +#ifdef WIN32
    1.94 +extern const struct eventop win32ops;
    1.95 +#endif
    1.96 +
    1.97 +/* Array of backends in order of preference. */
    1.98 +static const struct eventop *eventops[] = {
    1.99 +#ifdef _EVENT_HAVE_EVENT_PORTS
   1.100 +	&evportops,
   1.101 +#endif
   1.102 +#ifdef _EVENT_HAVE_WORKING_KQUEUE
   1.103 +	&kqops,
   1.104 +#endif
   1.105 +#ifdef _EVENT_HAVE_EPOLL
   1.106 +	&epollops,
   1.107 +#endif
   1.108 +#ifdef _EVENT_HAVE_DEVPOLL
   1.109 +	&devpollops,
   1.110 +#endif
   1.111 +#ifdef _EVENT_HAVE_POLL
   1.112 +	&pollops,
   1.113 +#endif
   1.114 +#ifdef _EVENT_HAVE_SELECT
   1.115 +	&selectops,
   1.116 +#endif
   1.117 +#ifdef WIN32
   1.118 +	&win32ops,
   1.119 +#endif
   1.120 +	NULL
   1.121 +};
   1.122 +
   1.123 +/* Global state; deprecated */
   1.124 +struct event_base *event_global_current_base_ = NULL;
   1.125 +#define current_base event_global_current_base_
   1.126 +
   1.127 +/* Global state */
   1.128 +
   1.129 +static int use_monotonic;
   1.130 +
   1.131 +/* Prototypes */
   1.132 +static inline int event_add_internal(struct event *ev,
   1.133 +    const struct timeval *tv, int tv_is_absolute);
   1.134 +static inline int event_del_internal(struct event *ev);
   1.135 +
   1.136 +static void	event_queue_insert(struct event_base *, struct event *, int);
   1.137 +static void	event_queue_remove(struct event_base *, struct event *, int);
   1.138 +static int	event_haveevents(struct event_base *);
   1.139 +
   1.140 +static int	event_process_active(struct event_base *);
   1.141 +
   1.142 +static int	timeout_next(struct event_base *, struct timeval **);
   1.143 +static void	timeout_process(struct event_base *);
   1.144 +static void	timeout_correct(struct event_base *, struct timeval *);
   1.145 +
   1.146 +static inline void	event_signal_closure(struct event_base *, struct event *ev);
   1.147 +static inline void	event_persist_closure(struct event_base *, struct event *ev);
   1.148 +
   1.149 +static int	evthread_notify_base(struct event_base *base);
   1.150 +
   1.151 +#ifndef _EVENT_DISABLE_DEBUG_MODE
   1.152 +/* These functions implement a hashtable of which 'struct event *' structures
   1.153 + * have been setup or added.  We don't want to trust the content of the struct
   1.154 + * event itself, since we're trying to work through cases where an event gets
   1.155 + * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
   1.156 + */
   1.157 +
   1.158 +struct event_debug_entry {
   1.159 +	HT_ENTRY(event_debug_entry) node;
   1.160 +	const struct event *ptr;
   1.161 +	unsigned added : 1;
   1.162 +};
   1.163 +
   1.164 +static inline unsigned
   1.165 +hash_debug_entry(const struct event_debug_entry *e)
   1.166 +{
   1.167 +	/* We need to do this silliness to convince compilers that we
   1.168 +	 * honestly mean to cast e->ptr to an integer, and discard any
   1.169 +	 * part of it that doesn't fit in an unsigned.
   1.170 +	 */
   1.171 +	unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
   1.172 +	/* Our hashtable implementation is pretty sensitive to low bits,
   1.173 +	 * and every struct event is over 64 bytes in size, so we can
   1.174 +	 * just say >>6. */
   1.175 +	return (u >> 6);
   1.176 +}
   1.177 +
   1.178 +static inline int
   1.179 +eq_debug_entry(const struct event_debug_entry *a,
   1.180 +    const struct event_debug_entry *b)
   1.181 +{
   1.182 +	return a->ptr == b->ptr;
   1.183 +}
   1.184 +
   1.185 +int _event_debug_mode_on = 0;
   1.186 +/* Set if it's too late to enable event_debug_mode. */
   1.187 +static int event_debug_mode_too_late = 0;
   1.188 +#ifndef _EVENT_DISABLE_THREAD_SUPPORT
   1.189 +static void *_event_debug_map_lock = NULL;
   1.190 +#endif
   1.191 +static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
   1.192 +	HT_INITIALIZER();
   1.193 +
   1.194 +HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
   1.195 +    eq_debug_entry)
   1.196 +HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
   1.197 +    eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
   1.198 +
   1.199 +/* Macro: record that ev is now setup (that is, ready for an add) */
   1.200 +#define _event_debug_note_setup(ev) do {				\
   1.201 +	if (_event_debug_mode_on) {					\
   1.202 +		struct event_debug_entry *dent,find;			\
   1.203 +		find.ptr = (ev);					\
   1.204 +		EVLOCK_LOCK(_event_debug_map_lock, 0);			\
   1.205 +		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
   1.206 +		if (dent) {						\
   1.207 +			dent->added = 0;				\
   1.208 +		} else {						\
   1.209 +			dent = mm_malloc(sizeof(*dent));		\
   1.210 +			if (!dent)					\
   1.211 +				event_err(1,				\
   1.212 +				    "Out of memory in debugging code");	\
   1.213 +			dent->ptr = (ev);				\
   1.214 +			dent->added = 0;				\
   1.215 +			HT_INSERT(event_debug_map, &global_debug_map, dent); \
   1.216 +		}							\
   1.217 +		EVLOCK_UNLOCK(_event_debug_map_lock, 0);		\
   1.218 +	}								\
   1.219 +	event_debug_mode_too_late = 1;					\
   1.220 +	} while (0)
   1.221 +/* Macro: record that ev is no longer setup */
   1.222 +#define _event_debug_note_teardown(ev) do {				\
   1.223 +	if (_event_debug_mode_on) {					\
   1.224 +		struct event_debug_entry *dent,find;			\
   1.225 +		find.ptr = (ev);					\
   1.226 +		EVLOCK_LOCK(_event_debug_map_lock, 0);			\
   1.227 +		dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
   1.228 +		if (dent)						\
   1.229 +			mm_free(dent);					\
   1.230 +		EVLOCK_UNLOCK(_event_debug_map_lock, 0);		\
   1.231 +	}								\
   1.232 +	event_debug_mode_too_late = 1;					\
   1.233 +	} while (0)
   1.234 +/* Macro: record that ev is now added */
   1.235 +#define _event_debug_note_add(ev)	do {				\
   1.236 +	if (_event_debug_mode_on) {					\
   1.237 +		struct event_debug_entry *dent,find;			\
   1.238 +		find.ptr = (ev);					\
   1.239 +		EVLOCK_LOCK(_event_debug_map_lock, 0);			\
   1.240 +		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
   1.241 +		if (dent) {						\
   1.242 +			dent->added = 1;				\
   1.243 +		} else {						\
   1.244 +			event_errx(_EVENT_ERR_ABORT,			\
   1.245 +			    "%s: noting an add on a non-setup event %p" \
   1.246 +			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
   1.247 +			    ", flags: 0x%x)",				\
   1.248 +			    __func__, (ev), (ev)->ev_events,		\
   1.249 +			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
   1.250 +		}							\
   1.251 +		EVLOCK_UNLOCK(_event_debug_map_lock, 0);		\
   1.252 +	}								\
   1.253 +	event_debug_mode_too_late = 1;					\
   1.254 +	} while (0)
   1.255 +/* Macro: record that ev is no longer added */
   1.256 +#define _event_debug_note_del(ev) do {					\
   1.257 +	if (_event_debug_mode_on) {					\
   1.258 +		struct event_debug_entry *dent,find;			\
   1.259 +		find.ptr = (ev);					\
   1.260 +		EVLOCK_LOCK(_event_debug_map_lock, 0);			\
   1.261 +		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
   1.262 +		if (dent) {						\
   1.263 +			dent->added = 0;				\
   1.264 +		} else {						\
   1.265 +			event_errx(_EVENT_ERR_ABORT,			\
   1.266 +			    "%s: noting a del on a non-setup event %p"	\
   1.267 +			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
   1.268 +			    ", flags: 0x%x)",				\
   1.269 +			    __func__, (ev), (ev)->ev_events,		\
   1.270 +			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
   1.271 +		}							\
   1.272 +		EVLOCK_UNLOCK(_event_debug_map_lock, 0);		\
   1.273 +	}								\
   1.274 +	event_debug_mode_too_late = 1;					\
   1.275 +	} while (0)
   1.276 +/* Macro: assert that ev is setup (i.e., okay to add or inspect) */
   1.277 +#define _event_debug_assert_is_setup(ev) do {				\
   1.278 +	if (_event_debug_mode_on) {					\
   1.279 +		struct event_debug_entry *dent,find;			\
   1.280 +		find.ptr = (ev);					\
   1.281 +		EVLOCK_LOCK(_event_debug_map_lock, 0);			\
   1.282 +		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
   1.283 +		if (!dent) {						\
   1.284 +			event_errx(_EVENT_ERR_ABORT,			\
   1.285 +			    "%s called on a non-initialized event %p"	\
   1.286 +			    " (events: 0x%x, fd: "EV_SOCK_FMT\
   1.287 +			    ", flags: 0x%x)",				\
   1.288 +			    __func__, (ev), (ev)->ev_events,		\
   1.289 +			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
   1.290 +		}							\
   1.291 +		EVLOCK_UNLOCK(_event_debug_map_lock, 0);		\
   1.292 +	}								\
   1.293 +	} while (0)
   1.294 +/* Macro: assert that ev is not added (i.e., okay to tear down or set
   1.295 + * up again) */
   1.296 +#define _event_debug_assert_not_added(ev) do {				\
   1.297 +	if (_event_debug_mode_on) {					\
   1.298 +		struct event_debug_entry *dent,find;			\
   1.299 +		find.ptr = (ev);					\
   1.300 +		EVLOCK_LOCK(_event_debug_map_lock, 0);			\
   1.301 +		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
   1.302 +		if (dent && dent->added) {				\
   1.303 +			event_errx(_EVENT_ERR_ABORT,			\
   1.304 +			    "%s called on an already added event %p"	\
   1.305 +			    " (events: 0x%x, fd: "EV_SOCK_FMT", "	\
   1.306 +			    "flags: 0x%x)",				\
   1.307 +			    __func__, (ev), (ev)->ev_events,		\
   1.308 +			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
   1.309 +		}							\
   1.310 +		EVLOCK_UNLOCK(_event_debug_map_lock, 0);		\
   1.311 +	}								\
   1.312 +	} while (0)
   1.313 +#else
   1.314 +#define _event_debug_note_setup(ev) \
   1.315 +	((void)0)
   1.316 +#define _event_debug_note_teardown(ev) \
   1.317 +	((void)0)
   1.318 +#define _event_debug_note_add(ev) \
   1.319 +	((void)0)
   1.320 +#define _event_debug_note_del(ev) \
   1.321 +	((void)0)
   1.322 +#define _event_debug_assert_is_setup(ev) \
   1.323 +	((void)0)
   1.324 +#define _event_debug_assert_not_added(ev) \
   1.325 +	((void)0)
   1.326 +#endif
   1.327 +
   1.328 +#define EVENT_BASE_ASSERT_LOCKED(base)		\
   1.329 +	EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
   1.330 +
   1.331 +/* The first time this function is called, it sets use_monotonic to 1
   1.332 + * if we have a clock function that supports monotonic time */
   1.333 +static void
   1.334 +detect_monotonic(void)
   1.335 +{
   1.336 +#if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
   1.337 +	struct timespec	ts;
   1.338 +	static int use_monotonic_initialized = 0;
   1.339 +
   1.340 +	if (use_monotonic_initialized)
   1.341 +		return;
   1.342 +
   1.343 +	if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
   1.344 +		use_monotonic = 1;
   1.345 +
   1.346 +	use_monotonic_initialized = 1;
   1.347 +#endif
   1.348 +}
   1.349 +
   1.350 +/* How often (in seconds) do we check for changes in wall clock time relative
   1.351 + * to monotonic time?  Set this to -1 for 'never.' */
   1.352 +#define CLOCK_SYNC_INTERVAL -1
   1.353 +
   1.354 +/** Set 'tp' to the current time according to 'base'.  We must hold the lock
   1.355 + * on 'base'.  If there is a cached time, return it.  Otherwise, use
   1.356 + * clock_gettime or gettimeofday as appropriate to find out the right time.
   1.357 + * Return 0 on success, -1 on failure.
   1.358 + */
   1.359 +static int
   1.360 +gettime(struct event_base *base, struct timeval *tp)
   1.361 +{
   1.362 +	EVENT_BASE_ASSERT_LOCKED(base);
   1.363 +
   1.364 +	if (base->tv_cache.tv_sec) {
   1.365 +		*tp = base->tv_cache;
   1.366 +		return (0);
   1.367 +	}
   1.368 +
   1.369 +#if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
   1.370 +	if (use_monotonic) {
   1.371 +		struct timespec	ts;
   1.372 +
   1.373 +		if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
   1.374 +			return (-1);
   1.375 +
   1.376 +		tp->tv_sec = ts.tv_sec;
   1.377 +		tp->tv_usec = ts.tv_nsec / 1000;
   1.378 +		if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
   1.379 +		    < ts.tv_sec) {
   1.380 +			struct timeval tv;
   1.381 +			evutil_gettimeofday(&tv,NULL);
   1.382 +			evutil_timersub(&tv, tp, &base->tv_clock_diff);
   1.383 +			base->last_updated_clock_diff = ts.tv_sec;
   1.384 +		}
   1.385 +
   1.386 +		return (0);
   1.387 +	}
   1.388 +#endif
   1.389 +
   1.390 +	return (evutil_gettimeofday(tp, NULL));
   1.391 +}
   1.392 +
   1.393 +int
   1.394 +event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
   1.395 +{
   1.396 +	int r;
   1.397 +	if (!base) {
   1.398 +		base = current_base;
   1.399 +		if (!current_base)
   1.400 +			return evutil_gettimeofday(tv, NULL);
   1.401 +	}
   1.402 +
   1.403 +	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1.404 +	if (base->tv_cache.tv_sec == 0) {
   1.405 +		r = evutil_gettimeofday(tv, NULL);
   1.406 +	} else {
   1.407 +#if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
   1.408 +		evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
   1.409 +#else
   1.410 +		*tv = base->tv_cache;
   1.411 +#endif
   1.412 +		r = 0;
   1.413 +	}
   1.414 +	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1.415 +	return r;
   1.416 +}
   1.417 +
   1.418 +/** Make 'base' have no current cached time. */
   1.419 +static inline void
   1.420 +clear_time_cache(struct event_base *base)
   1.421 +{
   1.422 +	base->tv_cache.tv_sec = 0;
   1.423 +}
   1.424 +
   1.425 +/** Replace the cached time in 'base' with the current time. */
   1.426 +static inline void
   1.427 +update_time_cache(struct event_base *base)
   1.428 +{
   1.429 +	base->tv_cache.tv_sec = 0;
   1.430 +	if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
   1.431 +	    gettime(base, &base->tv_cache);
   1.432 +}
   1.433 +
   1.434 +struct event_base *
   1.435 +event_init(void)
   1.436 +{
   1.437 +	struct event_base *base = event_base_new_with_config(NULL);
   1.438 +
   1.439 +	if (base == NULL) {
   1.440 +		event_errx(1, "%s: Unable to construct event_base", __func__);
   1.441 +		return NULL;
   1.442 +	}
   1.443 +
   1.444 +	current_base = base;
   1.445 +
   1.446 +	return (base);
   1.447 +}
   1.448 +
   1.449 +struct event_base *
   1.450 +event_base_new(void)
   1.451 +{
   1.452 +	struct event_base *base = NULL;
   1.453 +	struct event_config *cfg = event_config_new();
   1.454 +	if (cfg) {
   1.455 +		base = event_base_new_with_config(cfg);
   1.456 +		event_config_free(cfg);
   1.457 +	}
   1.458 +	return base;
   1.459 +}
   1.460 +
   1.461 +/** Return true iff 'method' is the name of a method that 'cfg' tells us to
   1.462 + * avoid. */
   1.463 +static int
   1.464 +event_config_is_avoided_method(const struct event_config *cfg,
   1.465 +    const char *method)
   1.466 +{
   1.467 +	struct event_config_entry *entry;
   1.468 +
   1.469 +	TAILQ_FOREACH(entry, &cfg->entries, next) {
   1.470 +		if (entry->avoid_method != NULL &&
   1.471 +		    strcmp(entry->avoid_method, method) == 0)
   1.472 +			return (1);
   1.473 +	}
   1.474 +
   1.475 +	return (0);
   1.476 +}
   1.477 +
   1.478 +/** Return true iff 'method' is disabled according to the environment. */
   1.479 +static int
   1.480 +event_is_method_disabled(const char *name)
   1.481 +{
   1.482 +	char environment[64];
   1.483 +	int i;
   1.484 +
   1.485 +	evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
   1.486 +	for (i = 8; environment[i] != '\0'; ++i)
   1.487 +		environment[i] = EVUTIL_TOUPPER(environment[i]);
   1.488 +	/* Note that evutil_getenv() ignores the environment entirely if
   1.489 +	 * we're setuid */
   1.490 +	return (evutil_getenv(environment) != NULL);
   1.491 +}
   1.492 +
   1.493 +int
   1.494 +event_base_get_features(const struct event_base *base)
   1.495 +{
   1.496 +	return base->evsel->features;
   1.497 +}
   1.498 +
   1.499 +void
   1.500 +event_deferred_cb_queue_init(struct deferred_cb_queue *cb)
   1.501 +{
   1.502 +	memset(cb, 0, sizeof(struct deferred_cb_queue));
   1.503 +	TAILQ_INIT(&cb->deferred_cb_list);
   1.504 +}
   1.505 +
   1.506 +/** Helper for the deferred_cb queue: wake up the event base. */
   1.507 +static void
   1.508 +notify_base_cbq_callback(struct deferred_cb_queue *cb, void *baseptr)
   1.509 +{
   1.510 +	struct event_base *base = baseptr;
   1.511 +	if (EVBASE_NEED_NOTIFY(base))
   1.512 +		evthread_notify_base(base);
   1.513 +}
   1.514 +
   1.515 +struct deferred_cb_queue *
   1.516 +event_base_get_deferred_cb_queue(struct event_base *base)
   1.517 +{
   1.518 +	return base ? &base->defer_queue : NULL;
   1.519 +}
   1.520 +
   1.521 +void
   1.522 +event_enable_debug_mode(void)
   1.523 +{
   1.524 +#ifndef _EVENT_DISABLE_DEBUG_MODE
   1.525 +	if (_event_debug_mode_on)
   1.526 +		event_errx(1, "%s was called twice!", __func__);
   1.527 +	if (event_debug_mode_too_late)
   1.528 +		event_errx(1, "%s must be called *before* creating any events "
   1.529 +		    "or event_bases",__func__);
   1.530 +
   1.531 +	_event_debug_mode_on = 1;
   1.532 +
   1.533 +	HT_INIT(event_debug_map, &global_debug_map);
   1.534 +#endif
   1.535 +}
   1.536 +
   1.537 +#if 0
   1.538 +void
   1.539 +event_disable_debug_mode(void)
   1.540 +{
   1.541 +	struct event_debug_entry **ent, *victim;
   1.542 +
   1.543 +	EVLOCK_LOCK(_event_debug_map_lock, 0);
   1.544 +	for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
   1.545 +		victim = *ent;
   1.546 +		ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
   1.547 +		mm_free(victim);
   1.548 +	}
   1.549 +	HT_CLEAR(event_debug_map, &global_debug_map);
   1.550 +	EVLOCK_UNLOCK(_event_debug_map_lock , 0);
   1.551 +}
   1.552 +#endif
   1.553 +
   1.554 +struct event_base *
   1.555 +event_base_new_with_config(const struct event_config *cfg)
   1.556 +{
   1.557 +	int i;
   1.558 +	struct event_base *base;
   1.559 +	int should_check_environment;
   1.560 +
   1.561 +#ifndef _EVENT_DISABLE_DEBUG_MODE
   1.562 +	event_debug_mode_too_late = 1;
   1.563 +#endif
   1.564 +
   1.565 +	if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
   1.566 +		event_warn("%s: calloc", __func__);
   1.567 +		return NULL;
   1.568 +	}
   1.569 +	detect_monotonic();
   1.570 +	gettime(base, &base->event_tv);
   1.571 +
   1.572 +	min_heap_ctor(&base->timeheap);
   1.573 +	TAILQ_INIT(&base->eventqueue);
   1.574 +	base->sig.ev_signal_pair[0] = -1;
   1.575 +	base->sig.ev_signal_pair[1] = -1;
   1.576 +	base->th_notify_fd[0] = -1;
   1.577 +	base->th_notify_fd[1] = -1;
   1.578 +
   1.579 +	event_deferred_cb_queue_init(&base->defer_queue);
   1.580 +	base->defer_queue.notify_fn = notify_base_cbq_callback;
   1.581 +	base->defer_queue.notify_arg = base;
   1.582 +	if (cfg)
   1.583 +		base->flags = cfg->flags;
   1.584 +
   1.585 +	evmap_io_initmap(&base->io);
   1.586 +	evmap_signal_initmap(&base->sigmap);
   1.587 +	event_changelist_init(&base->changelist);
   1.588 +
   1.589 +	base->evbase = NULL;
   1.590 +
   1.591 +	should_check_environment =
   1.592 +	    !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
   1.593 +
   1.594 +	for (i = 0; eventops[i] && !base->evbase; i++) {
   1.595 +		if (cfg != NULL) {
   1.596 +			/* determine if this backend should be avoided */
   1.597 +			if (event_config_is_avoided_method(cfg,
   1.598 +				eventops[i]->name))
   1.599 +				continue;
   1.600 +			if ((eventops[i]->features & cfg->require_features)
   1.601 +			    != cfg->require_features)
   1.602 +				continue;
   1.603 +		}
   1.604 +
   1.605 +		/* also obey the environment variables */
   1.606 +		if (should_check_environment &&
   1.607 +		    event_is_method_disabled(eventops[i]->name))
   1.608 +			continue;
   1.609 +
   1.610 +		base->evsel = eventops[i];
   1.611 +
   1.612 +		base->evbase = base->evsel->init(base);
   1.613 +	}
   1.614 +
   1.615 +	if (base->evbase == NULL) {
   1.616 +		event_warnx("%s: no event mechanism available",
   1.617 +		    __func__);
   1.618 +		base->evsel = NULL;
   1.619 +		event_base_free(base);
   1.620 +		return NULL;
   1.621 +	}
   1.622 +
   1.623 +	if (evutil_getenv("EVENT_SHOW_METHOD"))
   1.624 +		event_msgx("libevent using: %s", base->evsel->name);
   1.625 +
   1.626 +	/* allocate a single active event queue */
   1.627 +	if (event_base_priority_init(base, 1) < 0) {
   1.628 +		event_base_free(base);
   1.629 +		return NULL;
   1.630 +	}
   1.631 +
   1.632 +	/* prepare for threading */
   1.633 +
   1.634 +#ifndef _EVENT_DISABLE_THREAD_SUPPORT
   1.635 +	if (EVTHREAD_LOCKING_ENABLED() &&
   1.636 +	    (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
   1.637 +		int r;
   1.638 +		EVTHREAD_ALLOC_LOCK(base->th_base_lock,
   1.639 +		    EVTHREAD_LOCKTYPE_RECURSIVE);
   1.640 +		base->defer_queue.lock = base->th_base_lock;
   1.641 +		EVTHREAD_ALLOC_COND(base->current_event_cond);
   1.642 +		r = evthread_make_base_notifiable(base);
   1.643 +		if (r<0) {
   1.644 +			event_warnx("%s: Unable to make base notifiable.", __func__);
   1.645 +			event_base_free(base);
   1.646 +			return NULL;
   1.647 +		}
   1.648 +	}
   1.649 +#endif
   1.650 +
   1.651 +#ifdef WIN32
   1.652 +	if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
   1.653 +		event_base_start_iocp(base, cfg->n_cpus_hint);
   1.654 +#endif
   1.655 +
   1.656 +	return (base);
   1.657 +}
   1.658 +
   1.659 +int
   1.660 +event_base_start_iocp(struct event_base *base, int n_cpus)
   1.661 +{
   1.662 +#ifdef WIN32
   1.663 +	if (base->iocp)
   1.664 +		return 0;
   1.665 +	base->iocp = event_iocp_port_launch(n_cpus);
   1.666 +	if (!base->iocp) {
   1.667 +		event_warnx("%s: Couldn't launch IOCP", __func__);
   1.668 +		return -1;
   1.669 +	}
   1.670 +	return 0;
   1.671 +#else
   1.672 +	return -1;
   1.673 +#endif
   1.674 +}
   1.675 +
   1.676 +void
   1.677 +event_base_stop_iocp(struct event_base *base)
   1.678 +{
   1.679 +#ifdef WIN32
   1.680 +	int rv;
   1.681 +
   1.682 +	if (!base->iocp)
   1.683 +		return;
   1.684 +	rv = event_iocp_shutdown(base->iocp, -1);
   1.685 +	EVUTIL_ASSERT(rv >= 0);
   1.686 +	base->iocp = NULL;
   1.687 +#endif
   1.688 +}
   1.689 +
   1.690 +void
   1.691 +event_base_free(struct event_base *base)
   1.692 +{
   1.693 +	int i, n_deleted=0;
   1.694 +	struct event *ev;
   1.695 +	/* XXXX grab the lock? If there is contention when one thread frees
   1.696 +	 * the base, then the contending thread will be very sad soon. */
   1.697 +
   1.698 +	/* event_base_free(NULL) is how to free the current_base if we
   1.699 +	 * made it with event_init and forgot to hold a reference to it. */
   1.700 +	if (base == NULL && current_base)
   1.701 +		base = current_base;
   1.702 +	/* If we're freeing current_base, there won't be a current_base. */
   1.703 +	if (base == current_base)
   1.704 +		current_base = NULL;
   1.705 +	/* Don't actually free NULL. */
   1.706 +	if (base == NULL) {
   1.707 +		event_warnx("%s: no base to free", __func__);
   1.708 +		return;
   1.709 +	}
   1.710 +	/* XXX(niels) - check for internal events first */
   1.711 +
   1.712 +#ifdef WIN32
   1.713 +	event_base_stop_iocp(base);
   1.714 +#endif
   1.715 +
   1.716 +	/* threading fds if we have them */
   1.717 +	if (base->th_notify_fd[0] != -1) {
   1.718 +		event_del(&base->th_notify);
   1.719 +		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
   1.720 +		if (base->th_notify_fd[1] != -1)
   1.721 +			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
   1.722 +		base->th_notify_fd[0] = -1;
   1.723 +		base->th_notify_fd[1] = -1;
   1.724 +		event_debug_unassign(&base->th_notify);
   1.725 +	}
   1.726 +
   1.727 +	/* Delete all non-internal events. */
   1.728 +	for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
   1.729 +		struct event *next = TAILQ_NEXT(ev, ev_next);
   1.730 +		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
   1.731 +			event_del(ev);
   1.732 +			++n_deleted;
   1.733 +		}
   1.734 +		ev = next;
   1.735 +	}
   1.736 +	while ((ev = min_heap_top(&base->timeheap)) != NULL) {
   1.737 +		event_del(ev);
   1.738 +		++n_deleted;
   1.739 +	}
   1.740 +	for (i = 0; i < base->n_common_timeouts; ++i) {
   1.741 +		struct common_timeout_list *ctl =
   1.742 +		    base->common_timeout_queues[i];
   1.743 +		event_del(&ctl->timeout_event); /* Internal; doesn't count */
   1.744 +		event_debug_unassign(&ctl->timeout_event);
   1.745 +		for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
   1.746 +			struct event *next = TAILQ_NEXT(ev,
   1.747 +			    ev_timeout_pos.ev_next_with_common_timeout);
   1.748 +			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
   1.749 +				event_del(ev);
   1.750 +				++n_deleted;
   1.751 +			}
   1.752 +			ev = next;
   1.753 +		}
   1.754 +		mm_free(ctl);
   1.755 +	}
   1.756 +	if (base->common_timeout_queues)
   1.757 +		mm_free(base->common_timeout_queues);
   1.758 +
   1.759 +	for (i = 0; i < base->nactivequeues; ++i) {
   1.760 +		for (ev = TAILQ_FIRST(&base->activequeues[i]); ev; ) {
   1.761 +			struct event *next = TAILQ_NEXT(ev, ev_active_next);
   1.762 +			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
   1.763 +				event_del(ev);
   1.764 +				++n_deleted;
   1.765 +			}
   1.766 +			ev = next;
   1.767 +		}
   1.768 +	}
   1.769 +
   1.770 +	if (n_deleted)
   1.771 +		event_debug(("%s: %d events were still set in base",
   1.772 +			__func__, n_deleted));
   1.773 +
   1.774 +	if (base->evsel != NULL && base->evsel->dealloc != NULL)
   1.775 +		base->evsel->dealloc(base);
   1.776 +
   1.777 +	for (i = 0; i < base->nactivequeues; ++i)
   1.778 +		EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
   1.779 +
   1.780 +	EVUTIL_ASSERT(min_heap_empty(&base->timeheap));
   1.781 +	min_heap_dtor(&base->timeheap);
   1.782 +
   1.783 +	mm_free(base->activequeues);
   1.784 +
   1.785 +	EVUTIL_ASSERT(TAILQ_EMPTY(&base->eventqueue));
   1.786 +
   1.787 +	evmap_io_clear(&base->io);
   1.788 +	evmap_signal_clear(&base->sigmap);
   1.789 +	event_changelist_freemem(&base->changelist);
   1.790 +
   1.791 +	EVTHREAD_FREE_LOCK(base->th_base_lock, EVTHREAD_LOCKTYPE_RECURSIVE);
   1.792 +	EVTHREAD_FREE_COND(base->current_event_cond);
   1.793 +
   1.794 +	mm_free(base);
   1.795 +}
   1.796 +
   1.797 +/* reinitialize the event base after a fork */
   1.798 +int
   1.799 +event_reinit(struct event_base *base)
   1.800 +{
   1.801 +	const struct eventop *evsel;
   1.802 +	int res = 0;
   1.803 +	struct event *ev;
   1.804 +	int was_notifiable = 0;
   1.805 +
   1.806 +	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1.807 +
   1.808 +	evsel = base->evsel;
   1.809 +
   1.810 +#if 0
   1.811 +	/* Right now, reinit always takes effect, since even if the
   1.812 +	   backend doesn't require it, the signal socketpair code does.
   1.813 +
   1.814 +	   XXX
   1.815 +	 */
   1.816 +	/* check if this event mechanism requires reinit */
   1.817 +	if (!evsel->need_reinit)
   1.818 +		goto done;
   1.819 +#endif
   1.820 +
   1.821 +	/* prevent internal delete */
   1.822 +	if (base->sig.ev_signal_added) {
   1.823 +		/* we cannot call event_del here because the base has
   1.824 +		 * not been reinitialized yet. */
   1.825 +		event_queue_remove(base, &base->sig.ev_signal,
   1.826 +		    EVLIST_INSERTED);
   1.827 +		if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
   1.828 +			event_queue_remove(base, &base->sig.ev_signal,
   1.829 +			    EVLIST_ACTIVE);
   1.830 +		if (base->sig.ev_signal_pair[0] != -1)
   1.831 +			EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
   1.832 +		if (base->sig.ev_signal_pair[1] != -1)
   1.833 +			EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
   1.834 +		base->sig.ev_signal_added = 0;
   1.835 +	}
   1.836 +	if (base->th_notify_fd[0] != -1) {
   1.837 +		/* we cannot call event_del here because the base has
   1.838 +		 * not been reinitialized yet. */
   1.839 +		was_notifiable = 1;
   1.840 +		event_queue_remove(base, &base->th_notify,
   1.841 +		    EVLIST_INSERTED);
   1.842 +		if (base->th_notify.ev_flags & EVLIST_ACTIVE)
   1.843 +			event_queue_remove(base, &base->th_notify,
   1.844 +			    EVLIST_ACTIVE);
   1.845 +		base->sig.ev_signal_added = 0;
   1.846 +		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
   1.847 +		if (base->th_notify_fd[1] != -1)
   1.848 +			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
   1.849 +		base->th_notify_fd[0] = -1;
   1.850 +		base->th_notify_fd[1] = -1;
   1.851 +		event_debug_unassign(&base->th_notify);
   1.852 +	}
   1.853 +
   1.854 +	if (base->evsel->dealloc != NULL)
   1.855 +		base->evsel->dealloc(base);
   1.856 +	base->evbase = evsel->init(base);
   1.857 +	if (base->evbase == NULL) {
   1.858 +		event_errx(1, "%s: could not reinitialize event mechanism",
   1.859 +		    __func__);
   1.860 +		res = -1;
   1.861 +		goto done;
   1.862 +	}
   1.863 +
   1.864 +	event_changelist_freemem(&base->changelist); /* XXX */
   1.865 +	evmap_io_clear(&base->io);
   1.866 +	evmap_signal_clear(&base->sigmap);
   1.867 +
   1.868 +	TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
   1.869 +		if (ev->ev_events & (EV_READ|EV_WRITE)) {
   1.870 +			if (ev == &base->sig.ev_signal) {
   1.871 +				/* If we run into the ev_signal event, it's only
   1.872 +				 * in eventqueue because some signal event was
   1.873 +				 * added, which made evsig_add re-add ev_signal.
   1.874 +				 * So don't double-add it. */
   1.875 +				continue;
   1.876 +			}
   1.877 +			if (evmap_io_add(base, ev->ev_fd, ev) == -1)
   1.878 +				res = -1;
   1.879 +		} else if (ev->ev_events & EV_SIGNAL) {
   1.880 +			if (evmap_signal_add(base, (int)ev->ev_fd, ev) == -1)
   1.881 +				res = -1;
   1.882 +		}
   1.883 +	}
   1.884 +
   1.885 +	if (was_notifiable && res == 0)
   1.886 +		res = evthread_make_base_notifiable(base);
   1.887 +
   1.888 +done:
   1.889 +	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1.890 +	return (res);
   1.891 +}
   1.892 +
   1.893 +const char **
   1.894 +event_get_supported_methods(void)
   1.895 +{
   1.896 +	static const char **methods = NULL;
   1.897 +	const struct eventop **method;
   1.898 +	const char **tmp;
   1.899 +	int i = 0, k;
   1.900 +
   1.901 +	/* count all methods */
   1.902 +	for (method = &eventops[0]; *method != NULL; ++method) {
   1.903 +		++i;
   1.904 +	}
   1.905 +
   1.906 +	/* allocate one more than we need for the NULL pointer */
   1.907 +	tmp = mm_calloc((i + 1), sizeof(char *));
   1.908 +	if (tmp == NULL)
   1.909 +		return (NULL);
   1.910 +
   1.911 +	/* populate the array with the supported methods */
   1.912 +	for (k = 0, i = 0; eventops[k] != NULL; ++k) {
   1.913 +		tmp[i++] = eventops[k]->name;
   1.914 +	}
   1.915 +	tmp[i] = NULL;
   1.916 +
   1.917 +	if (methods != NULL)
   1.918 +		mm_free((char**)methods);
   1.919 +
   1.920 +	methods = tmp;
   1.921 +
   1.922 +	return (methods);
   1.923 +}
   1.924 +
   1.925 +struct event_config *
   1.926 +event_config_new(void)
   1.927 +{
   1.928 +	struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
   1.929 +
   1.930 +	if (cfg == NULL)
   1.931 +		return (NULL);
   1.932 +
   1.933 +	TAILQ_INIT(&cfg->entries);
   1.934 +
   1.935 +	return (cfg);
   1.936 +}
   1.937 +
   1.938 +static void
   1.939 +event_config_entry_free(struct event_config_entry *entry)
   1.940 +{
   1.941 +	if (entry->avoid_method != NULL)
   1.942 +		mm_free((char *)entry->avoid_method);
   1.943 +	mm_free(entry);
   1.944 +}
   1.945 +
   1.946 +void
   1.947 +event_config_free(struct event_config *cfg)
   1.948 +{
   1.949 +	struct event_config_entry *entry;
   1.950 +
   1.951 +	while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
   1.952 +		TAILQ_REMOVE(&cfg->entries, entry, next);
   1.953 +		event_config_entry_free(entry);
   1.954 +	}
   1.955 +	mm_free(cfg);
   1.956 +}
   1.957 +
   1.958 +int
   1.959 +event_config_set_flag(struct event_config *cfg, int flag)
   1.960 +{
   1.961 +	if (!cfg)
   1.962 +		return -1;
   1.963 +	cfg->flags |= flag;
   1.964 +	return 0;
   1.965 +}
   1.966 +
   1.967 +int
   1.968 +event_config_avoid_method(struct event_config *cfg, const char *method)
   1.969 +{
   1.970 +	struct event_config_entry *entry = mm_malloc(sizeof(*entry));
   1.971 +	if (entry == NULL)
   1.972 +		return (-1);
   1.973 +
   1.974 +	if ((entry->avoid_method = mm_strdup(method)) == NULL) {
   1.975 +		mm_free(entry);
   1.976 +		return (-1);
   1.977 +	}
   1.978 +
   1.979 +	TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
   1.980 +
   1.981 +	return (0);
   1.982 +}
   1.983 +
   1.984 +int
   1.985 +event_config_require_features(struct event_config *cfg,
   1.986 +    int features)
   1.987 +{
   1.988 +	if (!cfg)
   1.989 +		return (-1);
   1.990 +	cfg->require_features = features;
   1.991 +	return (0);
   1.992 +}
   1.993 +
   1.994 +int
   1.995 +event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
   1.996 +{
   1.997 +	if (!cfg)
   1.998 +		return (-1);
   1.999 +	cfg->n_cpus_hint = cpus;
  1.1000 +	return (0);
  1.1001 +}
  1.1002 +
  1.1003 +int
  1.1004 +event_priority_init(int npriorities)
  1.1005 +{
  1.1006 +	return event_base_priority_init(current_base, npriorities);
  1.1007 +}
  1.1008 +
  1.1009 +int
  1.1010 +event_base_priority_init(struct event_base *base, int npriorities)
  1.1011 +{
  1.1012 +	int i;
  1.1013 +
  1.1014 +	if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
  1.1015 +	    || npriorities >= EVENT_MAX_PRIORITIES)
  1.1016 +		return (-1);
  1.1017 +
  1.1018 +	if (npriorities == base->nactivequeues)
  1.1019 +		return (0);
  1.1020 +
  1.1021 +	if (base->nactivequeues) {
  1.1022 +		mm_free(base->activequeues);
  1.1023 +		base->nactivequeues = 0;
  1.1024 +	}
  1.1025 +
  1.1026 +	/* Allocate our priority queues */
  1.1027 +	base->activequeues = (struct event_list *)
  1.1028 +	  mm_calloc(npriorities, sizeof(struct event_list));
  1.1029 +	if (base->activequeues == NULL) {
  1.1030 +		event_warn("%s: calloc", __func__);
  1.1031 +		return (-1);
  1.1032 +	}
  1.1033 +	base->nactivequeues = npriorities;
  1.1034 +
  1.1035 +	for (i = 0; i < base->nactivequeues; ++i) {
  1.1036 +		TAILQ_INIT(&base->activequeues[i]);
  1.1037 +	}
  1.1038 +
  1.1039 +	return (0);
  1.1040 +}
  1.1041 +
  1.1042 +/* Returns true iff we're currently watching any events. */
  1.1043 +static int
  1.1044 +event_haveevents(struct event_base *base)
  1.1045 +{
  1.1046 +	/* Caller must hold th_base_lock */
  1.1047 +	return (base->virtual_event_count > 0 || base->event_count > 0);
  1.1048 +}
  1.1049 +
  1.1050 +/* "closure" function called when processing active signal events */
  1.1051 +static inline void
  1.1052 +event_signal_closure(struct event_base *base, struct event *ev)
  1.1053 +{
  1.1054 +	short ncalls;
  1.1055 +	int should_break;
  1.1056 +
  1.1057 +	/* Allows deletes to work */
  1.1058 +	ncalls = ev->ev_ncalls;
  1.1059 +	if (ncalls != 0)
  1.1060 +		ev->ev_pncalls = &ncalls;
  1.1061 +	EVBASE_RELEASE_LOCK(base, th_base_lock);
  1.1062 +	while (ncalls) {
  1.1063 +		ncalls--;
  1.1064 +		ev->ev_ncalls = ncalls;
  1.1065 +		if (ncalls == 0)
  1.1066 +			ev->ev_pncalls = NULL;
  1.1067 +		(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
  1.1068 +
  1.1069 +		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1.1070 +		should_break = base->event_break;
  1.1071 +		EVBASE_RELEASE_LOCK(base, th_base_lock);
  1.1072 +
  1.1073 +		if (should_break) {
  1.1074 +			if (ncalls != 0)
  1.1075 +				ev->ev_pncalls = NULL;
  1.1076 +			return;
  1.1077 +		}
  1.1078 +	}
  1.1079 +}
  1.1080 +
  1.1081 +/* Common timeouts are special timeouts that are handled as queues rather than
  1.1082 + * in the minheap.  This is more efficient than the minheap if we happen to
  1.1083 + * know that we're going to get several thousands of timeout events all with
  1.1084 + * the same timeout value.
  1.1085 + *
  1.1086 + * Since all our timeout handling code assumes timevals can be copied,
  1.1087 + * assigned, etc, we can't use "magic pointer" to encode these common
  1.1088 + * timeouts.  Searching through a list to see if every timeout is common could
  1.1089 + * also get inefficient.  Instead, we take advantage of the fact that tv_usec
  1.1090 + * is 32 bits long, but only uses 20 of those bits (since it can never be over
  1.1091 + * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
  1.1092 + * of index into the event_base's aray of common timeouts.
  1.1093 + */
  1.1094 +
  1.1095 +#define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
  1.1096 +#define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
  1.1097 +#define COMMON_TIMEOUT_IDX_SHIFT 20
  1.1098 +#define COMMON_TIMEOUT_MASK     0xf0000000
  1.1099 +#define COMMON_TIMEOUT_MAGIC    0x50000000
  1.1100 +
  1.1101 +#define COMMON_TIMEOUT_IDX(tv) \
  1.1102 +	(((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
  1.1103 +
  1.1104 +/** Return true iff if 'tv' is a common timeout in 'base' */
  1.1105 +static inline int
  1.1106 +is_common_timeout(const struct timeval *tv,
  1.1107 +    const struct event_base *base)
  1.1108 +{
  1.1109 +	int idx;
  1.1110 +	if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
  1.1111 +		return 0;
  1.1112 +	idx = COMMON_TIMEOUT_IDX(tv);
  1.1113 +	return idx < base->n_common_timeouts;
  1.1114 +}
  1.1115 +
  1.1116 +/* True iff tv1 and tv2 have the same common-timeout index, or if neither
  1.1117 + * one is a common timeout. */
  1.1118 +static inline int
  1.1119 +is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
  1.1120 +{
  1.1121 +	return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
  1.1122 +	    (tv2->tv_usec & ~MICROSECONDS_MASK);
  1.1123 +}
  1.1124 +
  1.1125 +/** Requires that 'tv' is a common timeout.  Return the corresponding
  1.1126 + * common_timeout_list. */
  1.1127 +static inline struct common_timeout_list *
  1.1128 +get_common_timeout_list(struct event_base *base, const struct timeval *tv)
  1.1129 +{
  1.1130 +	return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
  1.1131 +}
  1.1132 +
  1.1133 +#if 0
  1.1134 +static inline int
  1.1135 +common_timeout_ok(const struct timeval *tv,
  1.1136 +    struct event_base *base)
  1.1137 +{
  1.1138 +	const struct timeval *expect =
  1.1139 +	    &get_common_timeout_list(base, tv)->duration;
  1.1140 +	return tv->tv_sec == expect->tv_sec &&
  1.1141 +	    tv->tv_usec == expect->tv_usec;
  1.1142 +}
  1.1143 +#endif
  1.1144 +
  1.1145 +/* Add the timeout for the first event in given common timeout list to the
  1.1146 + * event_base's minheap. */
  1.1147 +static void
  1.1148 +common_timeout_schedule(struct common_timeout_list *ctl,
  1.1149 +    const struct timeval *now, struct event *head)
  1.1150 +{
  1.1151 +	struct timeval timeout = head->ev_timeout;
  1.1152 +	timeout.tv_usec &= MICROSECONDS_MASK;
  1.1153 +	event_add_internal(&ctl->timeout_event, &timeout, 1);
  1.1154 +}
  1.1155 +
  1.1156 +/* Callback: invoked when the timeout for a common timeout queue triggers.
  1.1157 + * This means that (at least) the first event in that queue should be run,
  1.1158 + * and the timeout should be rescheduled if there are more events. */
  1.1159 +static void
  1.1160 +common_timeout_callback(evutil_socket_t fd, short what, void *arg)
  1.1161 +{
  1.1162 +	struct timeval now;
  1.1163 +	struct common_timeout_list *ctl = arg;
  1.1164 +	struct event_base *base = ctl->base;
  1.1165 +	struct event *ev = NULL;
  1.1166 +	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1.1167 +	gettime(base, &now);
  1.1168 +	while (1) {
  1.1169 +		ev = TAILQ_FIRST(&ctl->events);
  1.1170 +		if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
  1.1171 +		    (ev->ev_timeout.tv_sec == now.tv_sec &&
  1.1172 +			(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
  1.1173 +			break;
  1.1174 +		event_del_internal(ev);
  1.1175 +		event_active_nolock(ev, EV_TIMEOUT, 1);
  1.1176 +	}
  1.1177 +	if (ev)
  1.1178 +		common_timeout_schedule(ctl, &now, ev);
  1.1179 +	EVBASE_RELEASE_LOCK(base, th_base_lock);
  1.1180 +}
  1.1181 +
  1.1182 +#define MAX_COMMON_TIMEOUTS 256
  1.1183 +
  1.1184 +const struct timeval *
  1.1185 +event_base_init_common_timeout(struct event_base *base,
  1.1186 +    const struct timeval *duration)
  1.1187 +{
  1.1188 +	int i;
  1.1189 +	struct timeval tv;
  1.1190 +	const struct timeval *result=NULL;
  1.1191 +	struct common_timeout_list *new_ctl;
  1.1192 +
  1.1193 +	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1.1194 +	if (duration->tv_usec > 1000000) {
  1.1195 +		memcpy(&tv, duration, sizeof(struct timeval));
  1.1196 +		if (is_common_timeout(duration, base))
  1.1197 +			tv.tv_usec &= MICROSECONDS_MASK;
  1.1198 +		tv.tv_sec += tv.tv_usec / 1000000;
  1.1199 +		tv.tv_usec %= 1000000;
  1.1200 +		duration = &tv;
  1.1201 +	}
  1.1202 +	for (i = 0; i < base->n_common_timeouts; ++i) {
  1.1203 +		const struct common_timeout_list *ctl =
  1.1204 +		    base->common_timeout_queues[i];
  1.1205 +		if (duration->tv_sec == ctl->duration.tv_sec &&
  1.1206 +		    duration->tv_usec ==
  1.1207 +		    (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
  1.1208 +			EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
  1.1209 +			result = &ctl->duration;
  1.1210 +			goto done;
  1.1211 +		}
  1.1212 +	}
  1.1213 +	if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
  1.1214 +		event_warnx("%s: Too many common timeouts already in use; "
  1.1215 +		    "we only support %d per event_base", __func__,
  1.1216 +		    MAX_COMMON_TIMEOUTS);
  1.1217 +		goto done;
  1.1218 +	}
  1.1219 +	if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
  1.1220 +		int n = base->n_common_timeouts < 16 ? 16 :
  1.1221 +		    base->n_common_timeouts*2;
  1.1222 +		struct common_timeout_list **newqueues =
  1.1223 +		    mm_realloc(base->common_timeout_queues,
  1.1224 +			n*sizeof(struct common_timeout_queue *));
  1.1225 +		if (!newqueues) {
  1.1226 +			event_warn("%s: realloc",__func__);
  1.1227 +			goto done;
  1.1228 +		}
  1.1229 +		base->n_common_timeouts_allocated = n;
  1.1230 +		base->common_timeout_queues = newqueues;
  1.1231 +	}
  1.1232 +	new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
  1.1233 +	if (!new_ctl) {
  1.1234 +		event_warn("%s: calloc",__func__);
  1.1235 +		goto done;
  1.1236 +	}
  1.1237 +	TAILQ_INIT(&new_ctl->events);
  1.1238 +	new_ctl->duration.tv_sec = duration->tv_sec;
  1.1239 +	new_ctl->duration.tv_usec =
  1.1240 +	    duration->tv_usec | COMMON_TIMEOUT_MAGIC |
  1.1241 +	    (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
  1.1242 +	evtimer_assign(&new_ctl->timeout_event, base,
  1.1243 +	    common_timeout_callback, new_ctl);
  1.1244 +	new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
  1.1245 +	event_priority_set(&new_ctl->timeout_event, 0);
  1.1246 +	new_ctl->base = base;
  1.1247 +	base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
  1.1248 +	result = &new_ctl->duration;
  1.1249 +
  1.1250 +done:
  1.1251 +	if (result)
  1.1252 +		EVUTIL_ASSERT(is_common_timeout(result, base));
  1.1253 +
  1.1254 +	EVBASE_RELEASE_LOCK(base, th_base_lock);
  1.1255 +	return result;
  1.1256 +}
  1.1257 +
  1.1258 +/* Closure function invoked when we're activating a persistent event. */
  1.1259 +static inline void
  1.1260 +event_persist_closure(struct event_base *base, struct event *ev)
  1.1261 +{
  1.1262 +	/* reschedule the persistent event if we have a timeout. */
  1.1263 +	if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
  1.1264 +		/* If there was a timeout, we want it to run at an interval of
  1.1265 +		 * ev_io_timeout after the last time it was _scheduled_ for,
  1.1266 +		 * not ev_io_timeout after _now_.  If it fired for another
  1.1267 +		 * reason, though, the timeout ought to start ticking _now_. */
  1.1268 +		struct timeval run_at, relative_to, delay, now;
  1.1269 +		ev_uint32_t usec_mask = 0;
  1.1270 +		EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
  1.1271 +			&ev->ev_io_timeout));
  1.1272 +		gettime(base, &now);
  1.1273 +		if (is_common_timeout(&ev->ev_timeout, base)) {
  1.1274 +			delay = ev->ev_io_timeout;
  1.1275 +			usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
  1.1276 +			delay.tv_usec &= MICROSECONDS_MASK;
  1.1277 +			if (ev->ev_res & EV_TIMEOUT) {
  1.1278 +				relative_to = ev->ev_timeout;
  1.1279 +				relative_to.tv_usec &= MICROSECONDS_MASK;
  1.1280 +			} else {
  1.1281 +				relative_to = now;
  1.1282 +			}
  1.1283 +		} else {
  1.1284 +			delay = ev->ev_io_timeout;
  1.1285 +			if (ev->ev_res & EV_TIMEOUT) {
  1.1286 +				relative_to = ev->ev_timeout;
  1.1287 +			} else {
  1.1288 +				relative_to = now;
  1.1289 +			}
  1.1290 +		}
  1.1291 +		evutil_timeradd(&relative_to, &delay, &run_at);
  1.1292 +		if (evutil_timercmp(&run_at, &now, <)) {
  1.1293 +			/* Looks like we missed at least one invocation due to
  1.1294 +			 * a clock jump, not running the event loop for a
  1.1295 +			 * while, really slow callbacks, or
  1.1296 +			 * something. Reschedule relative to now.
  1.1297 +			 */
  1.1298 +			evutil_timeradd(&now, &delay, &run_at);
  1.1299 +		}
  1.1300 +		run_at.tv_usec |= usec_mask;
  1.1301 +		event_add_internal(ev, &run_at, 1);
  1.1302 +	}
  1.1303 +	EVBASE_RELEASE_LOCK(base, th_base_lock);
  1.1304 +	(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
  1.1305 +}
  1.1306 +
  1.1307 +/*
  1.1308 +  Helper for event_process_active to process all the events in a single queue,
  1.1309 +  releasing the lock as we go.  This function requires that the lock be held
  1.1310 +  when it's invoked.  Returns -1 if we get a signal or an event_break that
  1.1311 +  means we should stop processing any active events now.  Otherwise returns
  1.1312 +  the number of non-internal events that we processed.
  1.1313 +*/
  1.1314 +static int
  1.1315 +event_process_active_single_queue(struct event_base *base,
  1.1316 +    struct event_list *activeq)
  1.1317 +{
  1.1318 +	struct event *ev;
  1.1319 +	int count = 0;
  1.1320 +
  1.1321 +	EVUTIL_ASSERT(activeq != NULL);
  1.1322 +
  1.1323 +	for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
  1.1324 +		if (ev->ev_events & EV_PERSIST)
  1.1325 +			event_queue_remove(base, ev, EVLIST_ACTIVE);
  1.1326 +		else
  1.1327 +			event_del_internal(ev);
  1.1328 +		if (!(ev->ev_flags & EVLIST_INTERNAL))
  1.1329 +			++count;
  1.1330 +
  1.1331 +		event_debug((
  1.1332 +			 "event_process_active: event: %p, %s%scall %p",
  1.1333 +			ev,
  1.1334 +			ev->ev_res & EV_READ ? "EV_READ " : " ",
  1.1335 +			ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
  1.1336 +			ev->ev_callback));
  1.1337 +
  1.1338 +#ifndef _EVENT_DISABLE_THREAD_SUPPORT
  1.1339 +		base->current_event = ev;
  1.1340 +		base->current_event_waiters = 0;
  1.1341 +#endif
  1.1342 +
  1.1343 +		switch (ev->ev_closure) {
  1.1344 +		case EV_CLOSURE_SIGNAL:
  1.1345 +			event_signal_closure(base, ev);
  1.1346 +			break;
  1.1347 +		case EV_CLOSURE_PERSIST:
  1.1348 +			event_persist_closure(base, ev);
  1.1349 +			break;
  1.1350 +		default:
  1.1351 +		case EV_CLOSURE_NONE:
  1.1352 +			EVBASE_RELEASE_LOCK(base, th_base_lock);
  1.1353 +			(*ev->ev_callback)(
  1.1354 +				ev->ev_fd, ev->ev_res, ev->ev_arg);
  1.1355 +			break;
  1.1356 +		}
  1.1357 +
  1.1358 +		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1.1359 +#ifndef _EVENT_DISABLE_THREAD_SUPPORT
  1.1360 +		base->current_event = NULL;
  1.1361 +		if (base->current_event_waiters) {
  1.1362 +			base->current_event_waiters = 0;
  1.1363 +			EVTHREAD_COND_BROADCAST(base->current_event_cond);
  1.1364 +		}
  1.1365 +#endif
  1.1366 +
  1.1367 +		if (base->event_break)
  1.1368 +			return -1;
  1.1369 +		if (base->event_continue)
  1.1370 +			break;
  1.1371 +	}
  1.1372 +	return count;
  1.1373 +}
  1.1374 +
  1.1375 +/*
  1.1376 +   Process up to MAX_DEFERRED of the defered_cb entries in 'queue'.  If
  1.1377 +   *breakptr becomes set to 1, stop.  Requires that we start out holding
  1.1378 +   the lock on 'queue'; releases the lock around 'queue' for each deferred_cb
  1.1379 +   we process.
  1.1380 + */
  1.1381 +static int
  1.1382 +event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
  1.1383 +{
  1.1384 +	int count = 0;
  1.1385 +	struct deferred_cb *cb;
  1.1386 +
  1.1387 +#define MAX_DEFERRED 16
  1.1388 +	while ((cb = TAILQ_FIRST(&queue->deferred_cb_list))) {
  1.1389 +		cb->queued = 0;
  1.1390 +		TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
  1.1391 +		--queue->active_count;
  1.1392 +		UNLOCK_DEFERRED_QUEUE(queue);
  1.1393 +
  1.1394 +		cb->cb(cb, cb->arg);
  1.1395 +
  1.1396 +		LOCK_DEFERRED_QUEUE(queue);
  1.1397 +		if (*breakptr)
  1.1398 +			return -1;
  1.1399 +		if (++count == MAX_DEFERRED)
  1.1400 +			break;
  1.1401 +	}
  1.1402 +#undef MAX_DEFERRED
  1.1403 +	return count;
  1.1404 +}
  1.1405 +
  1.1406 +/*
  1.1407 + * Active events are stored in priority queues.  Lower priorities are always
  1.1408 + * process before higher priorities.  Low priority events can starve high
  1.1409 + * priority ones.
  1.1410 + */
  1.1411 +
  1.1412 +static int
  1.1413 +event_process_active(struct event_base *base)
  1.1414 +{
  1.1415 +	/* Caller must hold th_base_lock */
  1.1416 +	struct event_list *activeq = NULL;
  1.1417 +	int i, c = 0;
  1.1418 +
  1.1419 +	for (i = 0; i < base->nactivequeues; ++i) {
  1.1420 +		if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
  1.1421 +			base->event_running_priority = i;
  1.1422 +			activeq = &base->activequeues[i];
  1.1423 +			c = event_process_active_single_queue(base, activeq);
  1.1424 +			if (c < 0) {
  1.1425 +				base->event_running_priority = -1;
  1.1426 +				return -1;
  1.1427 +			} else if (c > 0)
  1.1428 +				break; /* Processed a real event; do not
  1.1429 +					* consider lower-priority events */
  1.1430 +			/* If we get here, all of the events we processed
  1.1431 +			 * were internal.  Continue. */
  1.1432 +		}
  1.1433 +	}
  1.1434 +
  1.1435 +	event_process_deferred_callbacks(&base->defer_queue,&base->event_break);
  1.1436 +	base->event_running_priority = -1;
  1.1437 +	return c;
  1.1438 +}
  1.1439 +
  1.1440 +/*
  1.1441 + * Wait continuously for events.  We exit only if no events are left.
  1.1442 + */
  1.1443 +
  1.1444 +int
  1.1445 +event_dispatch(void)
  1.1446 +{
  1.1447 +	return (event_loop(0));
  1.1448 +}
  1.1449 +
  1.1450 +int
  1.1451 +event_base_dispatch(struct event_base *event_base)
  1.1452 +{
  1.1453 +	return (event_base_loop(event_base, 0));
  1.1454 +}
  1.1455 +
  1.1456 +const char *
  1.1457 +event_base_get_method(const struct event_base *base)
  1.1458 +{
  1.1459 +	EVUTIL_ASSERT(base);
  1.1460 +	return (base->evsel->name);
  1.1461 +}
  1.1462 +
  1.1463 +/** Callback: used to implement event_base_loopexit by telling the event_base
  1.1464 + * that it's time to exit its loop. */
  1.1465 +static void
  1.1466 +event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
  1.1467 +{
  1.1468 +	struct event_base *base = arg;
  1.1469 +	base->event_gotterm = 1;
  1.1470 +}
  1.1471 +
  1.1472 +int
  1.1473 +event_loopexit(const struct timeval *tv)
  1.1474 +{
  1.1475 +	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
  1.1476 +		    current_base, tv));
  1.1477 +}
  1.1478 +
  1.1479 +int
  1.1480 +event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
  1.1481 +{
  1.1482 +	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
  1.1483 +		    event_base, tv));
  1.1484 +}
  1.1485 +
  1.1486 +int
  1.1487 +event_loopbreak(void)
  1.1488 +{
  1.1489 +	return (event_base_loopbreak(current_base));
  1.1490 +}
  1.1491 +
  1.1492 +int
  1.1493 +event_base_loopbreak(struct event_base *event_base)
  1.1494 +{
  1.1495 +	int r = 0;
  1.1496 +	if (event_base == NULL)
  1.1497 +		return (-1);
  1.1498 +
  1.1499 +	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
  1.1500 +	event_base->event_break = 1;
  1.1501 +
  1.1502 +	if (EVBASE_NEED_NOTIFY(event_base)) {
  1.1503 +		r = evthread_notify_base(event_base);
  1.1504 +	} else {
  1.1505 +		r = (0);
  1.1506 +	}
  1.1507 +	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
  1.1508 +	return r;
  1.1509 +}
  1.1510 +
  1.1511 +int
  1.1512 +event_base_got_break(struct event_base *event_base)
  1.1513 +{
  1.1514 +	int res;
  1.1515 +	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
  1.1516 +	res = event_base->event_break;
  1.1517 +	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
  1.1518 +	return res;
  1.1519 +}
  1.1520 +
  1.1521 +int
  1.1522 +event_base_got_exit(struct event_base *event_base)
  1.1523 +{
  1.1524 +	int res;
  1.1525 +	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
  1.1526 +	res = event_base->event_gotterm;
  1.1527 +	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
  1.1528 +	return res;
  1.1529 +}
  1.1530 +
  1.1531 +/* not thread safe */
  1.1532 +
  1.1533 +int
  1.1534 +event_loop(int flags)
  1.1535 +{
  1.1536 +	return event_base_loop(current_base, flags);
  1.1537 +}
  1.1538 +
  1.1539 +int
  1.1540 +event_base_loop(struct event_base *base, int flags)
  1.1541 +{
  1.1542 +	const struct eventop *evsel = base->evsel;
  1.1543 +	struct timeval tv;
  1.1544 +	struct timeval *tv_p;
  1.1545 +	int res, done, retval = 0;
  1.1546 +
  1.1547 +	/* Grab the lock.  We will release it inside evsel.dispatch, and again
  1.1548 +	 * as we invoke user callbacks. */
  1.1549 +	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1.1550 +
  1.1551 +	if (base->running_loop) {
  1.1552 +		event_warnx("%s: reentrant invocation.  Only one event_base_loop"
  1.1553 +		    " can run on each event_base at once.", __func__);
  1.1554 +		EVBASE_RELEASE_LOCK(base, th_base_lock);
  1.1555 +		return -1;
  1.1556 +	}
  1.1557 +
  1.1558 +	base->running_loop = 1;
  1.1559 +
  1.1560 +	clear_time_cache(base);
  1.1561 +
  1.1562 +	if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
  1.1563 +		evsig_set_base(base);
  1.1564 +
  1.1565 +	done = 0;
  1.1566 +
  1.1567 +#ifndef _EVENT_DISABLE_THREAD_SUPPORT
  1.1568 +	base->th_owner_id = EVTHREAD_GET_ID();
  1.1569 +#endif
  1.1570 +
  1.1571 +	base->event_gotterm = base->event_break = 0;
  1.1572 +
  1.1573 +	while (!done) {
  1.1574 +		base->event_continue = 0;
  1.1575 +
  1.1576 +		/* Terminate the loop if we have been asked to */
  1.1577 +		if (base->event_gotterm) {
  1.1578 +			break;
  1.1579 +		}
  1.1580 +
  1.1581 +		if (base->event_break) {
  1.1582 +			break;
  1.1583 +		}
  1.1584 +
  1.1585 +		timeout_correct(base, &tv);
  1.1586 +
  1.1587 +		tv_p = &tv;
  1.1588 +		if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
  1.1589 +			timeout_next(base, &tv_p);
  1.1590 +		} else {
  1.1591 +			/*
  1.1592 +			 * if we have active events, we just poll new events
  1.1593 +			 * without waiting.
  1.1594 +			 */
  1.1595 +			evutil_timerclear(&tv);
  1.1596 +		}
  1.1597 +
  1.1598 +		/* If we have no events, we just exit */
  1.1599 +		if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
  1.1600 +			event_debug(("%s: no events registered.", __func__));
  1.1601 +			retval = 1;
  1.1602 +			goto done;
  1.1603 +		}
  1.1604 +
  1.1605 +		/* update last old time */
  1.1606 +		gettime(base, &base->event_tv);
  1.1607 +
  1.1608 +		clear_time_cache(base);
  1.1609 +
  1.1610 +		res = evsel->dispatch(base, tv_p);
  1.1611 +
  1.1612 +		if (res == -1) {
  1.1613 +			event_debug(("%s: dispatch returned unsuccessfully.",
  1.1614 +				__func__));
  1.1615 +			retval = -1;
  1.1616 +			goto done;
  1.1617 +		}
  1.1618 +
  1.1619 +		update_time_cache(base);
  1.1620 +
  1.1621 +		timeout_process(base);
  1.1622 +
  1.1623 +		if (N_ACTIVE_CALLBACKS(base)) {
  1.1624 +			int n = event_process_active(base);
  1.1625 +			if ((flags & EVLOOP_ONCE)
  1.1626 +			    && N_ACTIVE_CALLBACKS(base) == 0
  1.1627 +			    && n != 0)
  1.1628 +				done = 1;
  1.1629 +		} else if (flags & EVLOOP_NONBLOCK)
  1.1630 +			done = 1;
  1.1631 +	}
  1.1632 +	event_debug(("%s: asked to terminate loop.", __func__));
  1.1633 +
  1.1634 +done:
  1.1635 +	clear_time_cache(base);
  1.1636 +	base->running_loop = 0;
  1.1637 +
  1.1638 +	EVBASE_RELEASE_LOCK(base, th_base_lock);
  1.1639 +
  1.1640 +	return (retval);
  1.1641 +}
  1.1642 +
  1.1643 +/* Sets up an event for processing once */
  1.1644 +struct event_once {
  1.1645 +	struct event ev;
  1.1646 +
  1.1647 +	void (*cb)(evutil_socket_t, short, void *);
  1.1648 +	void *arg;
  1.1649 +};
  1.1650 +
  1.1651 +/* One-time callback to implement event_base_once: invokes the user callback,
  1.1652 + * then deletes the allocated storage */
  1.1653 +static void
  1.1654 +event_once_cb(evutil_socket_t fd, short events, void *arg)
  1.1655 +{
  1.1656 +	struct event_once *eonce = arg;
  1.1657 +
  1.1658 +	(*eonce->cb)(fd, events, eonce->arg);
  1.1659 +	event_debug_unassign(&eonce->ev);
  1.1660 +	mm_free(eonce);
  1.1661 +}
  1.1662 +
  1.1663 +/* not threadsafe, event scheduled once. */
  1.1664 +int
  1.1665 +event_once(evutil_socket_t fd, short events,
  1.1666 +    void (*callback)(evutil_socket_t, short, void *),
  1.1667 +    void *arg, const struct timeval *tv)
  1.1668 +{
  1.1669 +	return event_base_once(current_base, fd, events, callback, arg, tv);
  1.1670 +}
  1.1671 +
  1.1672 +/* Schedules an event once */
  1.1673 +int
  1.1674 +event_base_once(struct event_base *base, evutil_socket_t fd, short events,
  1.1675 +    void (*callback)(evutil_socket_t, short, void *),
  1.1676 +    void *arg, const struct timeval *tv)
  1.1677 +{
  1.1678 +	struct event_once *eonce;
  1.1679 +	struct timeval etv;
  1.1680 +	int res = 0;
  1.1681 +
  1.1682 +	/* We cannot support signals that just fire once, or persistent
  1.1683 +	 * events. */
  1.1684 +	if (events & (EV_SIGNAL|EV_PERSIST))
  1.1685 +		return (-1);
  1.1686 +
  1.1687 +	if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
  1.1688 +		return (-1);
  1.1689 +
  1.1690 +	eonce->cb = callback;
  1.1691 +	eonce->arg = arg;
  1.1692 +
  1.1693 +	if (events == EV_TIMEOUT) {
  1.1694 +		if (tv == NULL) {
  1.1695 +			evutil_timerclear(&etv);
  1.1696 +			tv = &etv;
  1.1697 +		}
  1.1698 +
  1.1699 +		evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
  1.1700 +	} else if (events & (EV_READ|EV_WRITE)) {
  1.1701 +		events &= EV_READ|EV_WRITE;
  1.1702 +
  1.1703 +		event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
  1.1704 +	} else {
  1.1705 +		/* Bad event combination */
  1.1706 +		mm_free(eonce);
  1.1707 +		return (-1);
  1.1708 +	}
  1.1709 +
  1.1710 +	if (res == 0)
  1.1711 +		res = event_add(&eonce->ev, tv);
  1.1712 +	if (res != 0) {
  1.1713 +		mm_free(eonce);
  1.1714 +		return (res);
  1.1715 +	}
  1.1716 +
  1.1717 +	return (0);
  1.1718 +}
  1.1719 +
  1.1720 +int
  1.1721 +event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
  1.1722 +{
  1.1723 +	if (!base)
  1.1724 +		base = current_base;
  1.1725 +
  1.1726 +	_event_debug_assert_not_added(ev);
  1.1727 +
  1.1728 +	ev->ev_base = base;
  1.1729 +
  1.1730 +	ev->ev_callback = callback;
  1.1731 +	ev->ev_arg = arg;
  1.1732 +	ev->ev_fd = fd;
  1.1733 +	ev->ev_events = events;
  1.1734 +	ev->ev_res = 0;
  1.1735 +	ev->ev_flags = EVLIST_INIT;
  1.1736 +	ev->ev_ncalls = 0;
  1.1737 +	ev->ev_pncalls = NULL;
  1.1738 +
  1.1739 +	if (events & EV_SIGNAL) {
  1.1740 +		if ((events & (EV_READ|EV_WRITE)) != 0) {
  1.1741 +			event_warnx("%s: EV_SIGNAL is not compatible with "
  1.1742 +			    "EV_READ or EV_WRITE", __func__);
  1.1743 +			return -1;
  1.1744 +		}
  1.1745 +		ev->ev_closure = EV_CLOSURE_SIGNAL;
  1.1746 +	} else {
  1.1747 +		if (events & EV_PERSIST) {
  1.1748 +			evutil_timerclear(&ev->ev_io_timeout);
  1.1749 +			ev->ev_closure = EV_CLOSURE_PERSIST;
  1.1750 +		} else {
  1.1751 +			ev->ev_closure = EV_CLOSURE_NONE;
  1.1752 +		}
  1.1753 +	}
  1.1754 +
  1.1755 +	min_heap_elem_init(ev);
  1.1756 +
  1.1757 +	if (base != NULL) {
  1.1758 +		/* by default, we put new events into the middle priority */
  1.1759 +		ev->ev_pri = base->nactivequeues / 2;
  1.1760 +	}
  1.1761 +
  1.1762 +	_event_debug_note_setup(ev);
  1.1763 +
  1.1764 +	return 0;
  1.1765 +}
  1.1766 +
  1.1767 +int
  1.1768 +event_base_set(struct event_base *base, struct event *ev)
  1.1769 +{
  1.1770 +	/* Only innocent events may be assigned to a different base */
  1.1771 +	if (ev->ev_flags != EVLIST_INIT)
  1.1772 +		return (-1);
  1.1773 +
  1.1774 +	_event_debug_assert_is_setup(ev);
  1.1775 +
  1.1776 +	ev->ev_base = base;
  1.1777 +	ev->ev_pri = base->nactivequeues/2;
  1.1778 +
  1.1779 +	return (0);
  1.1780 +}
  1.1781 +
  1.1782 +void
  1.1783 +event_set(struct event *ev, evutil_socket_t fd, short events,
  1.1784 +	  void (*callback)(evutil_socket_t, short, void *), void *arg)
  1.1785 +{
  1.1786 +	int r;
  1.1787 +	r = event_assign(ev, current_base, fd, events, callback, arg);
  1.1788 +	EVUTIL_ASSERT(r == 0);
  1.1789 +}
  1.1790 +
  1.1791 +struct event *
  1.1792 +event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
  1.1793 +{
  1.1794 +	struct event *ev;
  1.1795 +	ev = mm_malloc(sizeof(struct event));
  1.1796 +	if (ev == NULL)
  1.1797 +		return (NULL);
  1.1798 +	if (event_assign(ev, base, fd, events, cb, arg) < 0) {
  1.1799 +		mm_free(ev);
  1.1800 +		return (NULL);
  1.1801 +	}
  1.1802 +
  1.1803 +	return (ev);
  1.1804 +}
  1.1805 +
  1.1806 +void
  1.1807 +event_free(struct event *ev)
  1.1808 +{
  1.1809 +	_event_debug_assert_is_setup(ev);
  1.1810 +
  1.1811 +	/* make sure that this event won't be coming back to haunt us. */
  1.1812 +	event_del(ev);
  1.1813 +	_event_debug_note_teardown(ev);
  1.1814 +	mm_free(ev);
  1.1815 +
  1.1816 +}
  1.1817 +
  1.1818 +void
  1.1819 +event_debug_unassign(struct event *ev)
  1.1820 +{
  1.1821 +	_event_debug_assert_not_added(ev);
  1.1822 +	_event_debug_note_teardown(ev);
  1.1823 +
  1.1824 +	ev->ev_flags &= ~EVLIST_INIT;
  1.1825 +}
  1.1826 +
  1.1827 +/*
  1.1828 + * Set's the priority of an event - if an event is already scheduled
  1.1829 + * changing the priority is going to fail.
  1.1830 + */
  1.1831 +
  1.1832 +int
  1.1833 +event_priority_set(struct event *ev, int pri)
  1.1834 +{
  1.1835 +	_event_debug_assert_is_setup(ev);
  1.1836 +
  1.1837 +	if (ev->ev_flags & EVLIST_ACTIVE)
  1.1838 +		return (-1);
  1.1839 +	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
  1.1840 +		return (-1);
  1.1841 +
  1.1842 +	ev->ev_pri = pri;
  1.1843 +
  1.1844 +	return (0);
  1.1845 +}
  1.1846 +
  1.1847 +/*
  1.1848 + * Checks if a specific event is pending or scheduled.
  1.1849 + */
  1.1850 +
  1.1851 +int
  1.1852 +event_pending(const struct event *ev, short event, struct timeval *tv)
  1.1853 +{
  1.1854 +	int flags = 0;
  1.1855 +
  1.1856 +	if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
  1.1857 +		event_warnx("%s: event has no event_base set.", __func__);
  1.1858 +		return 0;
  1.1859 +	}
  1.1860 +
  1.1861 +	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
  1.1862 +	_event_debug_assert_is_setup(ev);
  1.1863 +
  1.1864 +	if (ev->ev_flags & EVLIST_INSERTED)
  1.1865 +		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
  1.1866 +	if (ev->ev_flags & EVLIST_ACTIVE)
  1.1867 +		flags |= ev->ev_res;
  1.1868 +	if (ev->ev_flags & EVLIST_TIMEOUT)
  1.1869 +		flags |= EV_TIMEOUT;
  1.1870 +
  1.1871 +	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
  1.1872 +
  1.1873 +	/* See if there is a timeout that we should report */
  1.1874 +	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
  1.1875 +		struct timeval tmp = ev->ev_timeout;
  1.1876 +		tmp.tv_usec &= MICROSECONDS_MASK;
  1.1877 +#if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
  1.1878 +		/* correctly remamp to real time */
  1.1879 +		evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
  1.1880 +#else
  1.1881 +		*tv = tmp;
  1.1882 +#endif
  1.1883 +	}
  1.1884 +
  1.1885 +	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
  1.1886 +
  1.1887 +	return (flags & event);
  1.1888 +}
  1.1889 +
  1.1890 +int
  1.1891 +event_initialized(const struct event *ev)
  1.1892 +{
  1.1893 +	if (!(ev->ev_flags & EVLIST_INIT))
  1.1894 +		return 0;
  1.1895 +
  1.1896 +	return 1;
  1.1897 +}
  1.1898 +
  1.1899 +void
  1.1900 +event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
  1.1901 +{
  1.1902 +	_event_debug_assert_is_setup(event);
  1.1903 +
  1.1904 +	if (base_out)
  1.1905 +		*base_out = event->ev_base;
  1.1906 +	if (fd_out)
  1.1907 +		*fd_out = event->ev_fd;
  1.1908 +	if (events_out)
  1.1909 +		*events_out = event->ev_events;
  1.1910 +	if (callback_out)
  1.1911 +		*callback_out = event->ev_callback;
  1.1912 +	if (arg_out)
  1.1913 +		*arg_out = event->ev_arg;
  1.1914 +}
  1.1915 +
  1.1916 +size_t
  1.1917 +event_get_struct_event_size(void)
  1.1918 +{
  1.1919 +	return sizeof(struct event);
  1.1920 +}
  1.1921 +
  1.1922 +evutil_socket_t
  1.1923 +event_get_fd(const struct event *ev)
  1.1924 +{
  1.1925 +	_event_debug_assert_is_setup(ev);
  1.1926 +	return ev->ev_fd;
  1.1927 +}
  1.1928 +
  1.1929 +struct event_base *
  1.1930 +event_get_base(const struct event *ev)
  1.1931 +{
  1.1932 +	_event_debug_assert_is_setup(ev);
  1.1933 +	return ev->ev_base;
  1.1934 +}
  1.1935 +
  1.1936 +short
  1.1937 +event_get_events(const struct event *ev)
  1.1938 +{
  1.1939 +	_event_debug_assert_is_setup(ev);
  1.1940 +	return ev->ev_events;
  1.1941 +}
  1.1942 +
  1.1943 +event_callback_fn
  1.1944 +event_get_callback(const struct event *ev)
  1.1945 +{
  1.1946 +	_event_debug_assert_is_setup(ev);
  1.1947 +	return ev->ev_callback;
  1.1948 +}
  1.1949 +
  1.1950 +void *
  1.1951 +event_get_callback_arg(const struct event *ev)
  1.1952 +{
  1.1953 +	_event_debug_assert_is_setup(ev);
  1.1954 +	return ev->ev_arg;
  1.1955 +}
  1.1956 +
  1.1957 +int
  1.1958 +event_add(struct event *ev, const struct timeval *tv)
  1.1959 +{
  1.1960 +	int res;
  1.1961 +
  1.1962 +	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
  1.1963 +		event_warnx("%s: event has no event_base set.", __func__);
  1.1964 +		return -1;
  1.1965 +	}
  1.1966 +
  1.1967 +	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
  1.1968 +
  1.1969 +	res = event_add_internal(ev, tv, 0);
  1.1970 +
  1.1971 +	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
  1.1972 +
  1.1973 +	return (res);
  1.1974 +}
  1.1975 +
  1.1976 +/* Helper callback: wake an event_base from another thread.  This version
  1.1977 + * works by writing a byte to one end of a socketpair, so that the event_base
  1.1978 + * listening on the other end will wake up as the corresponding event
  1.1979 + * triggers */
  1.1980 +static int
  1.1981 +evthread_notify_base_default(struct event_base *base)
  1.1982 +{
  1.1983 +	char buf[1];
  1.1984 +	int r;
  1.1985 +	buf[0] = (char) 0;
  1.1986 +#ifdef WIN32
  1.1987 +	r = send(base->th_notify_fd[1], buf, 1, 0);
  1.1988 +#else
  1.1989 +	r = write(base->th_notify_fd[1], buf, 1);
  1.1990 +#endif
  1.1991 +	return (r < 0 && errno != EAGAIN) ? -1 : 0;
  1.1992 +}
  1.1993 +
  1.1994 +#if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
  1.1995 +/* Helper callback: wake an event_base from another thread.  This version
  1.1996 + * assumes that you have a working eventfd() implementation. */
  1.1997 +static int
  1.1998 +evthread_notify_base_eventfd(struct event_base *base)
  1.1999 +{
  1.2000 +	ev_uint64_t msg = 1;
  1.2001 +	int r;
  1.2002 +	do {
  1.2003 +		r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
  1.2004 +	} while (r < 0 && errno == EAGAIN);
  1.2005 +
  1.2006 +	return (r < 0) ? -1 : 0;
  1.2007 +}
  1.2008 +#endif
  1.2009 +
  1.2010 +/** Tell the thread currently running the event_loop for base (if any) that it
  1.2011 + * needs to stop waiting in its dispatch function (if it is) and process all
  1.2012 + * active events and deferred callbacks (if there are any).  */
  1.2013 +static int
  1.2014 +evthread_notify_base(struct event_base *base)
  1.2015 +{
  1.2016 +	EVENT_BASE_ASSERT_LOCKED(base);
  1.2017 +	if (!base->th_notify_fn)
  1.2018 +		return -1;
  1.2019 +	if (base->is_notify_pending)
  1.2020 +		return 0;
  1.2021 +	base->is_notify_pending = 1;
  1.2022 +	return base->th_notify_fn(base);
  1.2023 +}
  1.2024 +
  1.2025 +/* Implementation function to add an event.  Works just like event_add,
  1.2026 + * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
  1.2027 + * we treat tv as an absolute time, not as an interval to add to the current
  1.2028 + * time */
  1.2029 +static inline int
  1.2030 +event_add_internal(struct event *ev, const struct timeval *tv,
  1.2031 +    int tv_is_absolute)
  1.2032 +{
  1.2033 +	struct event_base *base = ev->ev_base;
  1.2034 +	int res = 0;
  1.2035 +	int notify = 0;
  1.2036 +
  1.2037 +	EVENT_BASE_ASSERT_LOCKED(base);
  1.2038 +	_event_debug_assert_is_setup(ev);
  1.2039 +
  1.2040 +	event_debug((
  1.2041 +		 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%scall %p",
  1.2042 +		 ev,
  1.2043 +		 EV_SOCK_ARG(ev->ev_fd),
  1.2044 +		 ev->ev_events & EV_READ ? "EV_READ " : " ",
  1.2045 +		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
  1.2046 +		 tv ? "EV_TIMEOUT " : " ",
  1.2047 +		 ev->ev_callback));
  1.2048 +
  1.2049 +	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
  1.2050 +
  1.2051 +	/*
  1.2052 +	 * prepare for timeout insertion further below, if we get a
  1.2053 +	 * failure on any step, we should not change any state.
  1.2054 +	 */
  1.2055 +	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
  1.2056 +		if (min_heap_reserve(&base->timeheap,
  1.2057 +			1 + min_heap_size(&base->timeheap)) == -1)
  1.2058 +			return (-1);  /* ENOMEM == errno */
  1.2059 +	}
  1.2060 +
  1.2061 +	/* If the main thread is currently executing a signal event's
  1.2062 +	 * callback, and we are not the main thread, then we want to wait
  1.2063 +	 * until the callback is done before we mess with the event, or else
  1.2064 +	 * we can race on ev_ncalls and ev_pncalls below. */
  1.2065 +#ifndef _EVENT_DISABLE_THREAD_SUPPORT
  1.2066 +	if (base->current_event == ev && (ev->ev_events & EV_SIGNAL)
  1.2067 +	    && !EVBASE_IN_THREAD(base)) {
  1.2068 +		++base->current_event_waiters;
  1.2069 +		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
  1.2070 +	}
  1.2071 +#endif
  1.2072 +
  1.2073 +	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
  1.2074 +	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
  1.2075 +		if (ev->ev_events & (EV_READ|EV_WRITE))
  1.2076 +			res = evmap_io_add(base, ev->ev_fd, ev);
  1.2077 +		else if (ev->ev_events & EV_SIGNAL)
  1.2078 +			res = evmap_signal_add(base, (int)ev->ev_fd, ev);
  1.2079 +		if (res != -1)
  1.2080 +			event_queue_insert(base, ev, EVLIST_INSERTED);
  1.2081 +		if (res == 1) {
  1.2082 +			/* evmap says we need to notify the main thread. */
  1.2083 +			notify = 1;
  1.2084 +			res = 0;
  1.2085 +		}
  1.2086 +	}
  1.2087 +
  1.2088 +	/*
  1.2089 +	 * we should change the timeout state only if the previous event
  1.2090 +	 * addition succeeded.
  1.2091 +	 */
  1.2092 +	if (res != -1 && tv != NULL) {
  1.2093 +		struct timeval now;
  1.2094 +		int common_timeout;
  1.2095 +
  1.2096 +		/*
  1.2097 +		 * for persistent timeout events, we remember the
  1.2098 +		 * timeout value and re-add the event.
  1.2099 +		 *
  1.2100 +		 * If tv_is_absolute, this was already set.
  1.2101 +		 */
  1.2102 +		if (ev->ev_closure == EV_CLOSURE_PERSIST && !tv_is_absolute)
  1.2103 +			ev->ev_io_timeout = *tv;
  1.2104 +
  1.2105 +		/*
  1.2106 +		 * we already reserved memory above for the case where we
  1.2107 +		 * are not replacing an existing timeout.
  1.2108 +		 */
  1.2109 +		if (ev->ev_flags & EVLIST_TIMEOUT) {
  1.2110 +			/* XXX I believe this is needless. */
  1.2111 +			if (min_heap_elt_is_top(ev))
  1.2112 +				notify = 1;
  1.2113 +			event_queue_remove(base, ev, EVLIST_TIMEOUT);
  1.2114 +		}
  1.2115 +
  1.2116 +		/* Check if it is active due to a timeout.  Rescheduling
  1.2117 +		 * this timeout before the callback can be executed
  1.2118 +		 * removes it from the active list. */
  1.2119 +		if ((ev->ev_flags & EVLIST_ACTIVE) &&
  1.2120 +		    (ev->ev_res & EV_TIMEOUT)) {
  1.2121 +			if (ev->ev_events & EV_SIGNAL) {
  1.2122 +				/* See if we are just active executing
  1.2123 +				 * this event in a loop
  1.2124 +				 */
  1.2125 +				if (ev->ev_ncalls && ev->ev_pncalls) {
  1.2126 +					/* Abort loop */
  1.2127 +					*ev->ev_pncalls = 0;
  1.2128 +				}
  1.2129 +			}
  1.2130 +
  1.2131 +			event_queue_remove(base, ev, EVLIST_ACTIVE);
  1.2132 +		}
  1.2133 +
  1.2134 +		gettime(base, &now);
  1.2135 +
  1.2136 +		common_timeout = is_common_timeout(tv, base);
  1.2137 +		if (tv_is_absolute) {
  1.2138 +			ev->ev_timeout = *tv;
  1.2139 +		} else if (common_timeout) {
  1.2140 +			struct timeval tmp = *tv;
  1.2141 +			tmp.tv_usec &= MICROSECONDS_MASK;
  1.2142 +			evutil_timeradd(&now, &tmp, &ev->ev_timeout);
  1.2143 +			ev->ev_timeout.tv_usec |=
  1.2144 +			    (tv->tv_usec & ~MICROSECONDS_MASK);
  1.2145 +		} else {
  1.2146 +			evutil_timeradd(&now, tv, &ev->ev_timeout);
  1.2147 +		}
  1.2148 +
  1.2149 +		event_debug((
  1.2150 +			 "event_add: timeout in %d seconds, call %p",
  1.2151 +			 (int)tv->tv_sec, ev->ev_callback));
  1.2152 +
  1.2153 +		event_queue_insert(base, ev, EVLIST_TIMEOUT);
  1.2154 +		if (common_timeout) {
  1.2155 +			struct common_timeout_list *ctl =
  1.2156 +			    get_common_timeout_list(base, &ev->ev_timeout);
  1.2157 +			if (ev == TAILQ_FIRST(&ctl->events)) {
  1.2158 +				common_timeout_schedule(ctl, &now, ev);
  1.2159 +			}
  1.2160 +		} else {
  1.2161 +			/* See if the earliest timeout is now earlier than it
  1.2162 +			 * was before: if so, we will need to tell the main
  1.2163 +			 * thread to wake up earlier than it would
  1.2164 +			 * otherwise. */
  1.2165 +			if (min_heap_elt_is_top(ev))
  1.2166 +				notify = 1;
  1.2167 +		}
  1.2168 +	}
  1.2169 +
  1.2170 +	/* if we are not in the right thread, we need to wake up the loop */
  1.2171 +	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
  1.2172 +		evthread_notify_base(base);
  1.2173 +
  1.2174 +	_event_debug_note_add(ev);
  1.2175 +
  1.2176 +	return (res);
  1.2177 +}
  1.2178 +
  1.2179 +int
  1.2180 +event_del(struct event *ev)
  1.2181 +{
  1.2182 +	int res;
  1.2183 +
  1.2184 +	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
  1.2185 +		event_warnx("%s: event has no event_base set.", __func__);
  1.2186 +		return -1;
  1.2187 +	}
  1.2188 +
  1.2189 +	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
  1.2190 +
  1.2191 +	res = event_del_internal(ev);
  1.2192 +
  1.2193 +	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
  1.2194 +
  1.2195 +	return (res);
  1.2196 +}
  1.2197 +
  1.2198 +/* Helper for event_del: always called with th_base_lock held. */
  1.2199 +static inline int
  1.2200 +event_del_internal(struct event *ev)
  1.2201 +{
  1.2202 +	struct event_base *base;
  1.2203 +	int res = 0, notify = 0;
  1.2204 +
  1.2205 +	event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
  1.2206 +		ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
  1.2207 +
  1.2208 +	/* An event without a base has not been added */
  1.2209 +	if (ev->ev_base == NULL)
  1.2210 +		return (-1);
  1.2211 +
  1.2212 +	EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
  1.2213 +
  1.2214 +	/* If the main thread is currently executing this event's callback,
  1.2215 +	 * and we are not the main thread, then we want to wait until the
  1.2216 +	 * callback is done before we start removing the event.  That way,
  1.2217 +	 * when this function returns, it will be safe to free the
  1.2218 +	 * user-supplied argument. */
  1.2219 +	base = ev->ev_base;
  1.2220 +#ifndef _EVENT_DISABLE_THREAD_SUPPORT
  1.2221 +	if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
  1.2222 +		++base->current_event_waiters;
  1.2223 +		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
  1.2224 +	}
  1.2225 +#endif
  1.2226 +
  1.2227 +	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
  1.2228 +
  1.2229 +	/* See if we are just active executing this event in a loop */
  1.2230 +	if (ev->ev_events & EV_SIGNAL) {
  1.2231 +		if (ev->ev_ncalls && ev->ev_pncalls) {
  1.2232 +			/* Abort loop */
  1.2233 +			*ev->ev_pncalls = 0;
  1.2234 +		}
  1.2235 +	}
  1.2236 +
  1.2237 +	if (ev->ev_flags & EVLIST_TIMEOUT) {
  1.2238 +		/* NOTE: We never need to notify the main thread because of a
  1.2239 +		 * deleted timeout event: all that could happen if we don't is
  1.2240 +		 * that the dispatch loop might wake up too early.  But the
  1.2241 +		 * point of notifying the main thread _is_ to wake up the
  1.2242 +		 * dispatch loop early anyway, so we wouldn't gain anything by
  1.2243 +		 * doing it.
  1.2244 +		 */
  1.2245 +		event_queue_remove(base, ev, EVLIST_TIMEOUT);
  1.2246 +	}
  1.2247 +
  1.2248 +	if (ev->ev_flags & EVLIST_ACTIVE)
  1.2249 +		event_queue_remove(base, ev, EVLIST_ACTIVE);
  1.2250 +
  1.2251 +	if (ev->ev_flags & EVLIST_INSERTED) {
  1.2252 +		event_queue_remove(base, ev, EVLIST_INSERTED);
  1.2253 +		if (ev->ev_events & (EV_READ|EV_WRITE))
  1.2254 +			res = evmap_io_del(base, ev->ev_fd, ev);
  1.2255 +		else
  1.2256 +			res = evmap_signal_del(base, (int)ev->ev_fd, ev);
  1.2257 +		if (res == 1) {
  1.2258 +			/* evmap says we need to notify the main thread. */
  1.2259 +			notify = 1;
  1.2260 +			res = 0;
  1.2261 +		}
  1.2262 +	}
  1.2263 +
  1.2264 +	/* if we are not in the right thread, we need to wake up the loop */
  1.2265 +	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
  1.2266 +		evthread_notify_base(base);
  1.2267 +
  1.2268 +	_event_debug_note_del(ev);
  1.2269 +
  1.2270 +	return (res);
  1.2271 +}
  1.2272 +
  1.2273 +void
  1.2274 +event_active(struct event *ev, int res, short ncalls)
  1.2275 +{
  1.2276 +	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
  1.2277 +		event_warnx("%s: event has no event_base set.", __func__);
  1.2278 +		return;
  1.2279 +	}
  1.2280 +
  1.2281 +	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
  1.2282 +
  1.2283 +	_event_debug_assert_is_setup(ev);
  1.2284 +
  1.2285 +	event_active_nolock(ev, res, ncalls);
  1.2286 +
  1.2287 +	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
  1.2288 +}
  1.2289 +
  1.2290 +
  1.2291 +void
  1.2292 +event_active_nolock(struct event *ev, int res, short ncalls)
  1.2293 +{
  1.2294 +	struct event_base *base;
  1.2295 +
  1.2296 +	event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
  1.2297 +		ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
  1.2298 +
  1.2299 +
  1.2300 +	/* We get different kinds of events, add them together */
  1.2301 +	if (ev->ev_flags & EVLIST_ACTIVE) {
  1.2302 +		ev->ev_res |= res;
  1.2303 +		return;
  1.2304 +	}
  1.2305 +
  1.2306 +	base = ev->ev_base;
  1.2307 +
  1.2308 +	EVENT_BASE_ASSERT_LOCKED(base);
  1.2309 +
  1.2310 +	ev->ev_res = res;
  1.2311 +
  1.2312 +	if (ev->ev_pri < base->event_running_priority)
  1.2313 +		base->event_continue = 1;
  1.2314 +
  1.2315 +	if (ev->ev_events & EV_SIGNAL) {
  1.2316 +#ifndef _EVENT_DISABLE_THREAD_SUPPORT
  1.2317 +		if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
  1.2318 +			++base->current_event_waiters;
  1.2319 +			EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
  1.2320 +		}
  1.2321 +#endif
  1.2322 +		ev->ev_ncalls = ncalls;
  1.2323 +		ev->ev_pncalls = NULL;
  1.2324 +	}
  1.2325 +
  1.2326 +	event_queue_insert(base, ev, EVLIST_ACTIVE);
  1.2327 +
  1.2328 +	if (EVBASE_NEED_NOTIFY(base))
  1.2329 +		evthread_notify_base(base);
  1.2330 +}
  1.2331 +
  1.2332 +void
  1.2333 +event_deferred_cb_init(struct deferred_cb *cb, deferred_cb_fn fn, void *arg)
  1.2334 +{
  1.2335 +	memset(cb, 0, sizeof(struct deferred_cb));
  1.2336 +	cb->cb = fn;
  1.2337 +	cb->arg = arg;
  1.2338 +}
  1.2339 +
  1.2340 +void
  1.2341 +event_deferred_cb_cancel(struct deferred_cb_queue *queue,
  1.2342 +    struct deferred_cb *cb)
  1.2343 +{
  1.2344 +	if (!queue) {
  1.2345 +		if (current_base)
  1.2346 +			queue = &current_base->defer_queue;
  1.2347 +		else
  1.2348 +			return;
  1.2349 +	}
  1.2350 +
  1.2351 +	LOCK_DEFERRED_QUEUE(queue);
  1.2352 +	if (cb->queued) {
  1.2353 +		TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
  1.2354 +		--queue->active_count;
  1.2355 +		cb->queued = 0;
  1.2356 +	}
  1.2357 +	UNLOCK_DEFERRED_QUEUE(queue);
  1.2358 +}
  1.2359 +
  1.2360 +void
  1.2361 +event_deferred_cb_schedule(struct deferred_cb_queue *queue,
  1.2362 +    struct deferred_cb *cb)
  1.2363 +{
  1.2364 +	if (!queue) {
  1.2365 +		if (current_base)
  1.2366 +			queue = &current_base->defer_queue;
  1.2367 +		else
  1.2368 +			return;
  1.2369 +	}
  1.2370 +
  1.2371 +	LOCK_DEFERRED_QUEUE(queue);
  1.2372 +	if (!cb->queued) {
  1.2373 +		cb->queued = 1;
  1.2374 +		TAILQ_INSERT_TAIL(&queue->deferred_cb_list, cb, cb_next);
  1.2375 +		++queue->active_count;
  1.2376 +		if (queue->notify_fn)
  1.2377 +			queue->notify_fn(queue, queue->notify_arg);
  1.2378 +	}
  1.2379 +	UNLOCK_DEFERRED_QUEUE(queue);
  1.2380 +}
  1.2381 +
  1.2382 +static int
  1.2383 +timeout_next(struct event_base *base, struct timeval **tv_p)
  1.2384 +{
  1.2385 +	/* Caller must hold th_base_lock */
  1.2386 +	struct timeval now;
  1.2387 +	struct event *ev;
  1.2388 +	struct timeval *tv = *tv_p;
  1.2389 +	int res = 0;
  1.2390 +
  1.2391 +	ev = min_heap_top(&base->timeheap);
  1.2392 +
  1.2393 +	if (ev == NULL) {
  1.2394 +		/* if no time-based events are active wait for I/O */
  1.2395 +		*tv_p = NULL;
  1.2396 +		goto out;
  1.2397 +	}
  1.2398 +
  1.2399 +	if (gettime(base, &now) == -1) {
  1.2400 +		res = -1;
  1.2401 +		goto out;
  1.2402 +	}
  1.2403 +
  1.2404 +	if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
  1.2405 +		evutil_timerclear(tv);
  1.2406 +		goto out;
  1.2407 +	}
  1.2408 +
  1.2409 +	evutil_timersub(&ev->ev_timeout, &now, tv);
  1.2410 +
  1.2411 +	EVUTIL_ASSERT(tv->tv_sec >= 0);
  1.2412 +	EVUTIL_ASSERT(tv->tv_usec >= 0);
  1.2413 +	event_debug(("timeout_next: in %d seconds", (int)tv->tv_sec));
  1.2414 +
  1.2415 +out:
  1.2416 +	return (res);
  1.2417 +}
  1.2418 +
  1.2419 +/*
  1.2420 + * Determines if the time is running backwards by comparing the current time
  1.2421 + * against the last time we checked.  Not needed when using clock monotonic.
  1.2422 + * If time is running backwards, we adjust the firing time of every event by
  1.2423 + * the amount that time seems to have jumped.
  1.2424 + */
  1.2425 +static void
  1.2426 +timeout_correct(struct event_base *base, struct timeval *tv)
  1.2427 +{
  1.2428 +	/* Caller must hold th_base_lock. */
  1.2429 +	struct event **pev;
  1.2430 +	unsigned int size;
  1.2431 +	struct timeval off;
  1.2432 +	int i;
  1.2433 +
  1.2434 +	if (use_monotonic)
  1.2435 +		return;
  1.2436 +
  1.2437 +	/* Check if time is running backwards */
  1.2438 +	gettime(base, tv);
  1.2439 +
  1.2440 +	if (evutil_timercmp(tv, &base->event_tv, >=)) {
  1.2441 +		base->event_tv = *tv;
  1.2442 +		return;
  1.2443 +	}
  1.2444 +
  1.2445 +	event_debug(("%s: time is running backwards, corrected",
  1.2446 +		    __func__));
  1.2447 +	evutil_timersub(&base->event_tv, tv, &off);
  1.2448 +
  1.2449 +	/*
  1.2450 +	 * We can modify the key element of the node without destroying
  1.2451 +	 * the minheap property, because we change every element.
  1.2452 +	 */
  1.2453 +	pev = base->timeheap.p;
  1.2454 +	size = base->timeheap.n;
  1.2455 +	for (; size-- > 0; ++pev) {
  1.2456 +		struct timeval *ev_tv = &(**pev).ev_timeout;
  1.2457 +		evutil_timersub(ev_tv, &off, ev_tv);
  1.2458 +	}
  1.2459 +	for (i=0; i<base->n_common_timeouts; ++i) {
  1.2460 +		struct event *ev;
  1.2461 +		struct common_timeout_list *ctl =
  1.2462 +		    base->common_timeout_queues[i];
  1.2463 +		TAILQ_FOREACH(ev, &ctl->events,
  1.2464 +		    ev_timeout_pos.ev_next_with_common_timeout) {
  1.2465 +			struct timeval *ev_tv = &ev->ev_timeout;
  1.2466 +			ev_tv->tv_usec &= MICROSECONDS_MASK;
  1.2467 +			evutil_timersub(ev_tv, &off, ev_tv);
  1.2468 +			ev_tv->tv_usec |= COMMON_TIMEOUT_MAGIC |
  1.2469 +			    (i<<COMMON_TIMEOUT_IDX_SHIFT);
  1.2470 +		}
  1.2471 +	}
  1.2472 +
  1.2473 +	/* Now remember what the new time turned out to be. */
  1.2474 +	base->event_tv = *tv;
  1.2475 +}
  1.2476 +
  1.2477 +/* Activate every event whose timeout has elapsed. */
  1.2478 +static void
  1.2479 +timeout_process(struct event_base *base)
  1.2480 +{
  1.2481 +	/* Caller must hold lock. */
  1.2482 +	struct timeval now;
  1.2483 +	struct event *ev;
  1.2484 +
  1.2485 +	if (min_heap_empty(&base->timeheap)) {
  1.2486 +		return;
  1.2487 +	}
  1.2488 +
  1.2489 +	gettime(base, &now);
  1.2490 +
  1.2491 +	while ((ev = min_heap_top(&base->timeheap))) {
  1.2492 +		if (evutil_timercmp(&ev->ev_timeout, &now, >))
  1.2493 +			break;
  1.2494 +
  1.2495 +		/* delete this event from the I/O queues */
  1.2496 +		event_del_internal(ev);
  1.2497 +
  1.2498 +		event_debug(("timeout_process: call %p",
  1.2499 +			 ev->ev_callback));
  1.2500 +		event_active_nolock(ev, EV_TIMEOUT, 1);
  1.2501 +	}
  1.2502 +}
  1.2503 +
  1.2504 +/* Remove 'ev' from 'queue' (EVLIST_...) in base. */
  1.2505 +static void
  1.2506 +event_queue_remove(struct event_base *base, struct event *ev, int queue)
  1.2507 +{
  1.2508 +	EVENT_BASE_ASSERT_LOCKED(base);
  1.2509 +
  1.2510 +	if (!(ev->ev_flags & queue)) {
  1.2511 +		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
  1.2512 +		    ev, EV_SOCK_ARG(ev->ev_fd), queue);
  1.2513 +		return;
  1.2514 +	}
  1.2515 +
  1.2516 +	if (~ev->ev_flags & EVLIST_INTERNAL)
  1.2517 +		base->event_count--;
  1.2518 +
  1.2519 +	ev->ev_flags &= ~queue;
  1.2520 +	switch (queue) {
  1.2521 +	case EVLIST_INSERTED:
  1.2522 +		TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
  1.2523 +		break;
  1.2524 +	case EVLIST_ACTIVE:
  1.2525 +		base->event_count_active--;
  1.2526 +		TAILQ_REMOVE(&base->activequeues[ev->ev_pri],
  1.2527 +		    ev, ev_active_next);
  1.2528 +		break;
  1.2529 +	case EVLIST_TIMEOUT:
  1.2530 +		if (is_common_timeout(&ev->ev_timeout, base)) {
  1.2531 +			struct common_timeout_list *ctl =
  1.2532 +			    get_common_timeout_list(base, &ev->ev_timeout);
  1.2533 +			TAILQ_REMOVE(&ctl->events, ev,
  1.2534 +			    ev_timeout_pos.ev_next_with_common_timeout);
  1.2535 +		} else {
  1.2536 +			min_heap_erase(&base->timeheap, ev);
  1.2537 +		}
  1.2538 +		break;
  1.2539 +	default:
  1.2540 +		event_errx(1, "%s: unknown queue %x", __func__, queue);
  1.2541 +	}
  1.2542 +}
  1.2543 +
  1.2544 +/* Add 'ev' to the common timeout list in 'ev'. */
  1.2545 +static void
  1.2546 +insert_common_timeout_inorder(struct common_timeout_list *ctl,
  1.2547 +    struct event *ev)
  1.2548 +{
  1.2549 +	struct event *e;
  1.2550 +	/* By all logic, we should just be able to append 'ev' to the end of
  1.2551 +	 * ctl->events, since the timeout on each 'ev' is set to {the common
  1.2552 +	 * timeout} + {the time when we add the event}, and so the events
  1.2553 +	 * should arrive in order of their timeeouts.  But just in case
  1.2554 +	 * there's some wacky threading issue going on, we do a search from
  1.2555 +	 * the end of 'ev' to find the right insertion point.
  1.2556 +	 */
  1.2557 +	TAILQ_FOREACH_REVERSE(e, &ctl->events,
  1.2558 +	    event_list, ev_timeout_pos.ev_next_with_common_timeout) {
  1.2559 +		/* This timercmp is a little sneaky, since both ev and e have
  1.2560 +		 * magic values in tv_usec.  Fortunately, they ought to have
  1.2561 +		 * the _same_ magic values in tv_usec.  Let's assert for that.
  1.2562 +		 */
  1.2563 +		EVUTIL_ASSERT(
  1.2564 +			is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
  1.2565 +		if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
  1.2566 +			TAILQ_INSERT_AFTER(&ctl->events, e, ev,
  1.2567 +			    ev_timeout_pos.ev_next_with_common_timeout);
  1.2568 +			return;
  1.2569 +		}
  1.2570 +	}
  1.2571 +	TAILQ_INSERT_HEAD(&ctl->events, ev,
  1.2572 +	    ev_timeout_pos.ev_next_with_common_timeout);
  1.2573 +}
  1.2574 +
  1.2575 +static void
  1.2576 +event_queue_insert(struct event_base *base, struct event *ev, int queue)
  1.2577 +{
  1.2578 +	EVENT_BASE_ASSERT_LOCKED(base);
  1.2579 +
  1.2580 +	if (ev->ev_flags & queue) {
  1.2581 +		/* Double insertion is possible for active events */
  1.2582 +		if (queue & EVLIST_ACTIVE)
  1.2583 +			return;
  1.2584 +
  1.2585 +		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on queue %x", __func__,
  1.2586 +		    ev, EV_SOCK_ARG(ev->ev_fd), queue);
  1.2587 +		return;
  1.2588 +	}
  1.2589 +
  1.2590 +	if (~ev->ev_flags & EVLIST_INTERNAL)
  1.2591 +		base->event_count++;
  1.2592 +
  1.2593 +	ev->ev_flags |= queue;
  1.2594 +	switch (queue) {
  1.2595 +	case EVLIST_INSERTED:
  1.2596 +		TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
  1.2597 +		break;
  1.2598 +	case EVLIST_ACTIVE:
  1.2599 +		base->event_count_active++;
  1.2600 +		TAILQ_INSERT_TAIL(&base->activequeues[ev->ev_pri],
  1.2601 +		    ev,ev_active_next);
  1.2602 +		break;
  1.2603 +	case EVLIST_TIMEOUT: {
  1.2604 +		if (is_common_timeout(&ev->ev_timeout, base)) {
  1.2605 +			struct common_timeout_list *ctl =
  1.2606 +			    get_common_timeout_list(base, &ev->ev_timeout);
  1.2607 +			insert_common_timeout_inorder(ctl, ev);
  1.2608 +		} else
  1.2609 +			min_heap_push(&base->timeheap, ev);
  1.2610 +		break;
  1.2611 +	}
  1.2612 +	default:
  1.2613 +		event_errx(1, "%s: unknown queue %x", __func__, queue);
  1.2614 +	}
  1.2615 +}
  1.2616 +
  1.2617 +/* Functions for debugging */
  1.2618 +
  1.2619 +const char *
  1.2620 +event_get_version(void)
  1.2621 +{
  1.2622 +	return (_EVENT_VERSION);
  1.2623 +}
  1.2624 +
  1.2625 +ev_uint32_t
  1.2626 +event_get_version_number(void)
  1.2627 +{
  1.2628 +	return (_EVENT_NUMERIC_VERSION);
  1.2629 +}
  1.2630 +
  1.2631 +/*
  1.2632 + * No thread-safe interface needed - the information should be the same
  1.2633 + * for all threads.
  1.2634 + */
  1.2635 +
  1.2636 +const char *
  1.2637 +event_get_method(void)
  1.2638 +{
  1.2639 +	return (current_base->evsel->name);
  1.2640 +}
  1.2641 +
  1.2642 +#ifndef _EVENT_DISABLE_MM_REPLACEMENT
  1.2643 +static void *(*_mm_malloc_fn)(size_t sz) = NULL;
  1.2644 +static void *(*_mm_realloc_fn)(void *p, size_t sz) = NULL;
  1.2645 +static void (*_mm_free_fn)(void *p) = NULL;
  1.2646 +
  1.2647 +void *
  1.2648 +event_mm_malloc_(size_t sz)
  1.2649 +{
  1.2650 +	if (_mm_malloc_fn)
  1.2651 +		return _mm_malloc_fn(sz);
  1.2652 +	else
  1.2653 +		return malloc(sz);
  1.2654 +}
  1.2655 +
  1.2656 +void *
  1.2657 +event_mm_calloc_(size_t count, size_t size)
  1.2658 +{
  1.2659 +	if (_mm_malloc_fn) {
  1.2660 +		size_t sz = count * size;
  1.2661 +		void *p = _mm_malloc_fn(sz);
  1.2662 +		if (p)
  1.2663 +			memset(p, 0, sz);
  1.2664 +		return p;
  1.2665 +	} else
  1.2666 +		return calloc(count, size);
  1.2667 +}
  1.2668 +
  1.2669 +char *
  1.2670 +event_mm_strdup_(const char *str)
  1.2671 +{
  1.2672 +	if (_mm_malloc_fn) {
  1.2673 +		size_t ln = strlen(str);
  1.2674 +		void *p = _mm_malloc_fn(ln+1);
  1.2675 +		if (p)
  1.2676 +			memcpy(p, str, ln+1);
  1.2677 +		return p;
  1.2678 +	} else
  1.2679 +#ifdef WIN32
  1.2680 +		return _strdup(str);
  1.2681 +#else
  1.2682 +		return strdup(str);
  1.2683 +#endif
  1.2684 +}
  1.2685 +
  1.2686 +void *
  1.2687 +event_mm_realloc_(void *ptr, size_t sz)
  1.2688 +{
  1.2689 +	if (_mm_realloc_fn)
  1.2690 +		return _mm_realloc_fn(ptr, sz);
  1.2691 +	else
  1.2692 +		return realloc(ptr, sz);
  1.2693 +}
  1.2694 +
  1.2695 +void
  1.2696 +event_mm_free_(void *ptr)
  1.2697 +{
  1.2698 +	if (_mm_free_fn)
  1.2699 +		_mm_free_fn(ptr);
  1.2700 +	else
  1.2701 +		free(ptr);
  1.2702 +}
  1.2703 +
  1.2704 +void
  1.2705 +event_set_mem_functions(void *(*malloc_fn)(size_t sz),
  1.2706 +			void *(*realloc_fn)(void *ptr, size_t sz),
  1.2707 +			void (*free_fn)(void *ptr))
  1.2708 +{
  1.2709 +	_mm_malloc_fn = malloc_fn;
  1.2710 +	_mm_realloc_fn = realloc_fn;
  1.2711 +	_mm_free_fn = free_fn;
  1.2712 +}
  1.2713 +#endif
  1.2714 +
  1.2715 +#if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
  1.2716 +static void
  1.2717 +evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
  1.2718 +{
  1.2719 +	ev_uint64_t msg;
  1.2720 +	ev_ssize_t r;
  1.2721 +	struct event_base *base = arg;
  1.2722 +
  1.2723 +	r = read(fd, (void*) &msg, sizeof(msg));
  1.2724 +	if (r<0 && errno != EAGAIN) {
  1.2725 +		event_sock_warn(fd, "Error reading from eventfd");
  1.2726 +	}
  1.2727 +	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1.2728 +	base->is_notify_pending = 0;
  1.2729 +	EVBASE_RELEASE_LOCK(base, th_base_lock);
  1.2730 +}
  1.2731 +#endif
  1.2732 +
  1.2733 +static void
  1.2734 +evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
  1.2735 +{
  1.2736 +	unsigned char buf[1024];
  1.2737 +	struct event_base *base = arg;
  1.2738 +#ifdef WIN32
  1.2739 +	while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
  1.2740 +		;
  1.2741 +#else
  1.2742 +	while (read(fd, (char*)buf, sizeof(buf)) > 0)
  1.2743 +		;
  1.2744 +#endif
  1.2745 +
  1.2746 +	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1.2747 +	base->is_notify_pending = 0;
  1.2748 +	EVBASE_RELEASE_LOCK(base, th_base_lock);
  1.2749 +}
  1.2750 +
  1.2751 +int
  1.2752 +evthread_make_base_notifiable(struct event_base *base)
  1.2753 +{
  1.2754 +	void (*cb)(evutil_socket_t, short, void *) = evthread_notify_drain_default;
  1.2755 +	int (*notify)(struct event_base *) = evthread_notify_base_default;
  1.2756 +
  1.2757 +	/* XXXX grab the lock here? */
  1.2758 +	if (!base)
  1.2759 +		return -1;
  1.2760 +
  1.2761 +	if (base->th_notify_fd[0] >= 0)
  1.2762 +		return 0;
  1.2763 +
  1.2764 +#if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
  1.2765 +#ifndef EFD_CLOEXEC
  1.2766 +#define EFD_CLOEXEC 0
  1.2767 +#endif
  1.2768 +	base->th_notify_fd[0] = eventfd(0, EFD_CLOEXEC);
  1.2769 +	if (base->th_notify_fd[0] >= 0) {
  1.2770 +		evutil_make_socket_closeonexec(base->th_notify_fd[0]);
  1.2771 +		notify = evthread_notify_base_eventfd;
  1.2772 +		cb = evthread_notify_drain_eventfd;
  1.2773 +	}
  1.2774 +#endif
  1.2775 +#if defined(_EVENT_HAVE_PIPE)
  1.2776 +	if (base->th_notify_fd[0] < 0) {
  1.2777 +		if ((base->evsel->features & EV_FEATURE_FDS)) {
  1.2778 +			if (pipe(base->th_notify_fd) < 0) {
  1.2779 +				event_warn("%s: pipe", __func__);
  1.2780 +			} else {
  1.2781 +				evutil_make_socket_closeonexec(base->th_notify_fd[0]);
  1.2782 +				evutil_make_socket_closeonexec(base->th_notify_fd[1]);
  1.2783 +			}
  1.2784 +		}
  1.2785 +	}
  1.2786 +#endif
  1.2787 +
  1.2788 +#ifdef WIN32
  1.2789 +#define LOCAL_SOCKETPAIR_AF AF_INET
  1.2790 +#else
  1.2791 +#define LOCAL_SOCKETPAIR_AF AF_UNIX
  1.2792 +#endif
  1.2793 +	if (base->th_notify_fd[0] < 0) {
  1.2794 +		if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0,
  1.2795 +			base->th_notify_fd) == -1) {
  1.2796 +			event_sock_warn(-1, "%s: socketpair", __func__);
  1.2797 +			return (-1);
  1.2798 +		} else {
  1.2799 +			evutil_make_socket_closeonexec(base->th_notify_fd[0]);
  1.2800 +			evutil_make_socket_closeonexec(base->th_notify_fd[1]);
  1.2801 +		}
  1.2802 +	}
  1.2803 +
  1.2804 +	evutil_make_socket_nonblocking(base->th_notify_fd[0]);
  1.2805 +
  1.2806 +	base->th_notify_fn = notify;
  1.2807 +
  1.2808 +	/*
  1.2809 +	  Making the second socket nonblocking is a bit subtle, given that we
  1.2810 +	  ignore any EAGAIN returns when writing to it, and you don't usally
  1.2811 +	  do that for a nonblocking socket. But if the kernel gives us EAGAIN,
  1.2812 +	  then there's no need to add any more data to the buffer, since
  1.2813 +	  the main thread is already either about to wake up and drain it,
  1.2814 +	  or woken up and in the process of draining it.
  1.2815 +	*/
  1.2816 +	if (base->th_notify_fd[1] > 0)
  1.2817 +		evutil_make_socket_nonblocking(base->th_notify_fd[1]);
  1.2818 +
  1.2819 +	/* prepare an event that we can use for wakeup */
  1.2820 +	event_assign(&base->th_notify, base, base->th_notify_fd[0],
  1.2821 +				 EV_READ|EV_PERSIST, cb, base);
  1.2822 +
  1.2823 +	/* we need to mark this as internal event */
  1.2824 +	base->th_notify.ev_flags |= EVLIST_INTERNAL;
  1.2825 +	event_priority_set(&base->th_notify, 0);
  1.2826 +
  1.2827 +	return event_add(&base->th_notify, NULL);
  1.2828 +}
  1.2829 +
  1.2830 +void
  1.2831 +event_base_dump_events(struct event_base *base, FILE *output)
  1.2832 +{
  1.2833 +	struct event *e;
  1.2834 +	int i;
  1.2835 +	fprintf(output, "Inserted events:\n");
  1.2836 +	TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
  1.2837 +		fprintf(output, "  %p [fd "EV_SOCK_FMT"]%s%s%s%s%s\n",
  1.2838 +				(void*)e, EV_SOCK_ARG(e->ev_fd),
  1.2839 +				(e->ev_events&EV_READ)?" Read":"",
  1.2840 +				(e->ev_events&EV_WRITE)?" Write":"",
  1.2841 +				(e->ev_events&EV_SIGNAL)?" Signal":"",
  1.2842 +				(e->ev_events&EV_TIMEOUT)?" Timeout":"",
  1.2843 +				(e->ev_events&EV_PERSIST)?" Persist":"");
  1.2844 +
  1.2845 +	}
  1.2846 +	for (i = 0; i < base->nactivequeues; ++i) {
  1.2847 +		if (TAILQ_EMPTY(&base->activequeues[i]))
  1.2848 +			continue;
  1.2849 +		fprintf(output, "Active events [priority %d]:\n", i);
  1.2850 +		TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
  1.2851 +			fprintf(output, "  %p [fd "EV_SOCK_FMT"]%s%s%s%s\n",
  1.2852 +					(void*)e, EV_SOCK_ARG(e->ev_fd),
  1.2853 +					(e->ev_res&EV_READ)?" Read active":"",
  1.2854 +					(e->ev_res&EV_WRITE)?" Write active":"",
  1.2855 +					(e->ev_res&EV_SIGNAL)?" Signal active":"",
  1.2856 +					(e->ev_res&EV_TIMEOUT)?" Timeout active":"");
  1.2857 +		}
  1.2858 +	}
  1.2859 +}
  1.2860 +
  1.2861 +void
  1.2862 +event_base_add_virtual(struct event_base *base)
  1.2863 +{
  1.2864 +	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1.2865 +	base->virtual_event_count++;
  1.2866 +	EVBASE_RELEASE_LOCK(base, th_base_lock);
  1.2867 +}
  1.2868 +
  1.2869 +void
  1.2870 +event_base_del_virtual(struct event_base *base)
  1.2871 +{
  1.2872 +	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1.2873 +	EVUTIL_ASSERT(base->virtual_event_count > 0);
  1.2874 +	base->virtual_event_count--;
  1.2875 +	if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
  1.2876 +		evthread_notify_base(base);
  1.2877 +	EVBASE_RELEASE_LOCK(base, th_base_lock);
  1.2878 +}
  1.2879 +
  1.2880 +#ifndef _EVENT_DISABLE_THREAD_SUPPORT
  1.2881 +int
  1.2882 +event_global_setup_locks_(const int enable_locks)
  1.2883 +{
  1.2884 +#ifndef _EVENT_DISABLE_DEBUG_MODE
  1.2885 +	EVTHREAD_SETUP_GLOBAL_LOCK(_event_debug_map_lock, 0);
  1.2886 +#endif
  1.2887 +	if (evsig_global_setup_locks_(enable_locks) < 0)
  1.2888 +		return -1;
  1.2889 +	if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
  1.2890 +		return -1;
  1.2891 +	return 0;
  1.2892 +}
  1.2893 +#endif
  1.2894 +
  1.2895 +void
  1.2896 +event_base_assert_ok(struct event_base *base)
  1.2897 +{
  1.2898 +	int i;
  1.2899 +	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1.2900 +	evmap_check_integrity(base);
  1.2901 +
  1.2902 +	/* Check the heap property */
  1.2903 +	for (i = 1; i < (int)base->timeheap.n; ++i) {
  1.2904 +		int parent = (i - 1) / 2;
  1.2905 +		struct event *ev, *p_ev;
  1.2906 +		ev = base->timeheap.p[i];
  1.2907 +		p_ev = base->timeheap.p[parent];
  1.2908 +		EVUTIL_ASSERT(ev->ev_flags & EV_TIMEOUT);
  1.2909 +		EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
  1.2910 +		EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
  1.2911 +	}
  1.2912 +
  1.2913 +	/* Check that the common timeouts are fine */
  1.2914 +	for (i = 0; i < base->n_common_timeouts; ++i) {
  1.2915 +		struct common_timeout_list *ctl = base->common_timeout_queues[i];
  1.2916 +		struct event *last=NULL, *ev;
  1.2917 +		TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
  1.2918 +			if (last)
  1.2919 +				EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
  1.2920 +			EVUTIL_ASSERT(ev->ev_flags & EV_TIMEOUT);
  1.2921 +			EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
  1.2922 +			EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
  1.2923 +			last = ev;
  1.2924 +		}
  1.2925 +	}
  1.2926 +
  1.2927 +	EVBASE_RELEASE_LOCK(base, th_base_lock);
  1.2928 +}

mercurial