ipc/chromium/src/third_party/libevent/evmap.c

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/ipc/chromium/src/third_party/libevent/evmap.c	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,799 @@
     1.4 +/*
     1.5 + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
     1.6 + *
     1.7 + * Redistribution and use in source and binary forms, with or without
     1.8 + * modification, are permitted provided that the following conditions
     1.9 + * are met:
    1.10 + * 1. Redistributions of source code must retain the above copyright
    1.11 + *    notice, this list of conditions and the following disclaimer.
    1.12 + * 2. Redistributions in binary form must reproduce the above copyright
    1.13 + *    notice, this list of conditions and the following disclaimer in the
    1.14 + *    documentation and/or other materials provided with the distribution.
    1.15 + * 3. The name of the author may not be used to endorse or promote products
    1.16 + *    derived from this software without specific prior written permission.
    1.17 + *
    1.18 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
    1.19 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
    1.20 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
    1.21 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
    1.22 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
    1.23 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    1.24 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    1.25 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    1.26 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
    1.27 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    1.28 + */
    1.29 +#include "event2/event-config.h"
    1.30 +
    1.31 +#ifdef WIN32
    1.32 +#include <winsock2.h>
    1.33 +#define WIN32_LEAN_AND_MEAN
    1.34 +#include <windows.h>
    1.35 +#undef WIN32_LEAN_AND_MEAN
    1.36 +#endif
    1.37 +#include <sys/types.h>
    1.38 +#if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
    1.39 +#include <sys/time.h>
    1.40 +#endif
    1.41 +#include <sys/queue.h>
    1.42 +#include <stdio.h>
    1.43 +#include <stdlib.h>
    1.44 +#ifndef WIN32
    1.45 +#include <unistd.h>
    1.46 +#endif
    1.47 +#include <errno.h>
    1.48 +#include <signal.h>
    1.49 +#include <string.h>
    1.50 +#include <time.h>
    1.51 +
    1.52 +#include "event-internal.h"
    1.53 +#include "evmap-internal.h"
    1.54 +#include "mm-internal.h"
    1.55 +#include "changelist-internal.h"
    1.56 +
    1.57 +/** An entry for an evmap_io list: notes all the events that want to read or
    1.58 +	write on a given fd, and the number of each.
    1.59 +  */
    1.60 +struct evmap_io {
    1.61 +	struct event_list events;
    1.62 +	ev_uint16_t nread;
    1.63 +	ev_uint16_t nwrite;
    1.64 +};
    1.65 +
    1.66 +/* An entry for an evmap_signal list: notes all the events that want to know
    1.67 +   when a signal triggers. */
    1.68 +struct evmap_signal {
    1.69 +	struct event_list events;
    1.70 +};
    1.71 +
    1.72 +/* On some platforms, fds start at 0 and increment by 1 as they are
    1.73 +   allocated, and old numbers get used.  For these platforms, we
    1.74 +   implement io maps just like signal maps: as an array of pointers to
    1.75 +   struct evmap_io.  But on other platforms (windows), sockets are not
    1.76 +   0-indexed, not necessarily consecutive, and not necessarily reused.
    1.77 +   There, we use a hashtable to implement evmap_io.
    1.78 +*/
    1.79 +#ifdef EVMAP_USE_HT
    1.80 +struct event_map_entry {
    1.81 +	HT_ENTRY(event_map_entry) map_node;
    1.82 +	evutil_socket_t fd;
    1.83 +	union { /* This is a union in case we need to make more things that can
    1.84 +			   be in the hashtable. */
    1.85 +		struct evmap_io evmap_io;
    1.86 +	} ent;
    1.87 +};
    1.88 +
    1.89 +/* Helper used by the event_io_map hashtable code; tries to return a good hash
    1.90 + * of the fd in e->fd. */
    1.91 +static inline unsigned
    1.92 +hashsocket(struct event_map_entry *e)
    1.93 +{
    1.94 +	/* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
    1.95 +	 * matter.  Our hashtable implementation really likes low-order bits,
    1.96 +	 * though, so let's do the rotate-and-add trick. */
    1.97 +	unsigned h = (unsigned) e->fd;
    1.98 +	h += (h >> 2) | (h << 30);
    1.99 +	return h;
   1.100 +}
   1.101 +
   1.102 +/* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
   1.103 + * have the same e->fd. */
   1.104 +static inline int
   1.105 +eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
   1.106 +{
   1.107 +	return e1->fd == e2->fd;
   1.108 +}
   1.109 +
   1.110 +HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
   1.111 +HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
   1.112 +			0.5, mm_malloc, mm_realloc, mm_free)
   1.113 +
   1.114 +#define GET_IO_SLOT(x, map, slot, type)					\
   1.115 +	do {								\
   1.116 +		struct event_map_entry _key, *_ent;			\
   1.117 +		_key.fd = slot;						\
   1.118 +		_ent = HT_FIND(event_io_map, map, &_key);		\
   1.119 +		(x) = _ent ? &_ent->ent.type : NULL;			\
   1.120 +	} while (0);
   1.121 +
   1.122 +#define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)	\
   1.123 +	do {								\
   1.124 +		struct event_map_entry _key, *_ent;			\
   1.125 +		_key.fd = slot;						\
   1.126 +		_HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \
   1.127 +		    event_map_entry, &_key, ptr,			\
   1.128 +		    {							\
   1.129 +			    _ent = *ptr;				\
   1.130 +		    },							\
   1.131 +		    {							\
   1.132 +			    _ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
   1.133 +			    if (EVUTIL_UNLIKELY(_ent == NULL))		\
   1.134 +				    return (-1);			\
   1.135 +			    _ent->fd = slot;				\
   1.136 +			    (ctor)(&_ent->ent.type);			\
   1.137 +			    _HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \
   1.138 +				});					\
   1.139 +		(x) = &_ent->ent.type;					\
   1.140 +	} while (0)
   1.141 +
   1.142 +void evmap_io_initmap(struct event_io_map *ctx)
   1.143 +{
   1.144 +	HT_INIT(event_io_map, ctx);
   1.145 +}
   1.146 +
   1.147 +void evmap_io_clear(struct event_io_map *ctx)
   1.148 +{
   1.149 +	struct event_map_entry **ent, **next, *this;
   1.150 +	for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
   1.151 +		this = *ent;
   1.152 +		next = HT_NEXT_RMV(event_io_map, ctx, ent);
   1.153 +		mm_free(this);
   1.154 +	}
   1.155 +	HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
   1.156 +}
   1.157 +#endif
   1.158 +
   1.159 +/* Set the variable 'x' to the field in event_map 'map' with fields of type
   1.160 +   'struct type *' corresponding to the fd or signal 'slot'.  Set 'x' to NULL
   1.161 +   if there are no entries for 'slot'.  Does no bounds-checking. */
   1.162 +#define GET_SIGNAL_SLOT(x, map, slot, type)			\
   1.163 +	(x) = (struct type *)((map)->entries[slot])
   1.164 +/* As GET_SLOT, but construct the entry for 'slot' if it is not present,
   1.165 +   by allocating enough memory for a 'struct type', and initializing the new
   1.166 +   value by calling the function 'ctor' on it.  Makes the function
   1.167 +   return -1 on allocation failure.
   1.168 + */
   1.169 +#define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)	\
   1.170 +	do {								\
   1.171 +		if ((map)->entries[slot] == NULL) {			\
   1.172 +			(map)->entries[slot] =				\
   1.173 +			    mm_calloc(1,sizeof(struct type)+fdinfo_len); \
   1.174 +			if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
   1.175 +				return (-1);				\
   1.176 +			(ctor)((struct type *)(map)->entries[slot]);	\
   1.177 +		}							\
   1.178 +		(x) = (struct type *)((map)->entries[slot]);		\
   1.179 +	} while (0)
   1.180 +
   1.181 +/* If we aren't using hashtables, then define the IO_SLOT macros and functions
   1.182 +   as thin aliases over the SIGNAL_SLOT versions. */
   1.183 +#ifndef EVMAP_USE_HT
   1.184 +#define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
   1.185 +#define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)	\
   1.186 +	GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
   1.187 +#define FDINFO_OFFSET sizeof(struct evmap_io)
   1.188 +void
   1.189 +evmap_io_initmap(struct event_io_map* ctx)
   1.190 +{
   1.191 +	evmap_signal_initmap(ctx);
   1.192 +}
   1.193 +void
   1.194 +evmap_io_clear(struct event_io_map* ctx)
   1.195 +{
   1.196 +	evmap_signal_clear(ctx);
   1.197 +}
   1.198 +#endif
   1.199 +
   1.200 +
   1.201 +/** Expand 'map' with new entries of width 'msize' until it is big enough
   1.202 +	to store a value in 'slot'.
   1.203 + */
   1.204 +static int
   1.205 +evmap_make_space(struct event_signal_map *map, int slot, int msize)
   1.206 +{
   1.207 +	if (map->nentries <= slot) {
   1.208 +		int nentries = map->nentries ? map->nentries : 32;
   1.209 +		void **tmp;
   1.210 +
   1.211 +		while (nentries <= slot)
   1.212 +			nentries <<= 1;
   1.213 +
   1.214 +		tmp = (void **)mm_realloc(map->entries, nentries * msize);
   1.215 +		if (tmp == NULL)
   1.216 +			return (-1);
   1.217 +
   1.218 +		memset(&tmp[map->nentries], 0,
   1.219 +		    (nentries - map->nentries) * msize);
   1.220 +
   1.221 +		map->nentries = nentries;
   1.222 +		map->entries = tmp;
   1.223 +	}
   1.224 +
   1.225 +	return (0);
   1.226 +}
   1.227 +
   1.228 +void
   1.229 +evmap_signal_initmap(struct event_signal_map *ctx)
   1.230 +{
   1.231 +	ctx->nentries = 0;
   1.232 +	ctx->entries = NULL;
   1.233 +}
   1.234 +
   1.235 +void
   1.236 +evmap_signal_clear(struct event_signal_map *ctx)
   1.237 +{
   1.238 +	if (ctx->entries != NULL) {
   1.239 +		int i;
   1.240 +		for (i = 0; i < ctx->nentries; ++i) {
   1.241 +			if (ctx->entries[i] != NULL)
   1.242 +				mm_free(ctx->entries[i]);
   1.243 +		}
   1.244 +		mm_free(ctx->entries);
   1.245 +		ctx->entries = NULL;
   1.246 +	}
   1.247 +	ctx->nentries = 0;
   1.248 +}
   1.249 +
   1.250 +
   1.251 +/* code specific to file descriptors */
   1.252 +
   1.253 +/** Constructor for struct evmap_io */
   1.254 +static void
   1.255 +evmap_io_init(struct evmap_io *entry)
   1.256 +{
   1.257 +	TAILQ_INIT(&entry->events);
   1.258 +	entry->nread = 0;
   1.259 +	entry->nwrite = 0;
   1.260 +}
   1.261 +
   1.262 +
   1.263 +/* return -1 on error, 0 on success if nothing changed in the event backend,
   1.264 + * and 1 on success if something did. */
   1.265 +int
   1.266 +evmap_io_add(struct event_base *base, evutil_socket_t fd, struct event *ev)
   1.267 +{
   1.268 +	const struct eventop *evsel = base->evsel;
   1.269 +	struct event_io_map *io = &base->io;
   1.270 +	struct evmap_io *ctx = NULL;
   1.271 +	int nread, nwrite, retval = 0;
   1.272 +	short res = 0, old = 0;
   1.273 +	struct event *old_ev;
   1.274 +
   1.275 +	EVUTIL_ASSERT(fd == ev->ev_fd);
   1.276 +
   1.277 +	if (fd < 0)
   1.278 +		return 0;
   1.279 +
   1.280 +#ifndef EVMAP_USE_HT
   1.281 +	if (fd >= io->nentries) {
   1.282 +		if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
   1.283 +			return (-1);
   1.284 +	}
   1.285 +#endif
   1.286 +	GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
   1.287 +						 evsel->fdinfo_len);
   1.288 +
   1.289 +	nread = ctx->nread;
   1.290 +	nwrite = ctx->nwrite;
   1.291 +
   1.292 +	if (nread)
   1.293 +		old |= EV_READ;
   1.294 +	if (nwrite)
   1.295 +		old |= EV_WRITE;
   1.296 +
   1.297 +	if (ev->ev_events & EV_READ) {
   1.298 +		if (++nread == 1)
   1.299 +			res |= EV_READ;
   1.300 +	}
   1.301 +	if (ev->ev_events & EV_WRITE) {
   1.302 +		if (++nwrite == 1)
   1.303 +			res |= EV_WRITE;
   1.304 +	}
   1.305 +	if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff)) {
   1.306 +		event_warnx("Too many events reading or writing on fd %d",
   1.307 +		    (int)fd);
   1.308 +		return -1;
   1.309 +	}
   1.310 +	if (EVENT_DEBUG_MODE_IS_ON() &&
   1.311 +	    (old_ev = TAILQ_FIRST(&ctx->events)) &&
   1.312 +	    (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
   1.313 +		event_warnx("Tried to mix edge-triggered and non-edge-triggered"
   1.314 +		    " events on fd %d", (int)fd);
   1.315 +		return -1;
   1.316 +	}
   1.317 +
   1.318 +	if (res) {
   1.319 +		void *extra = ((char*)ctx) + sizeof(struct evmap_io);
   1.320 +		/* XXX(niels): we cannot mix edge-triggered and
   1.321 +		 * level-triggered, we should probably assert on
   1.322 +		 * this. */
   1.323 +		if (evsel->add(base, ev->ev_fd,
   1.324 +			old, (ev->ev_events & EV_ET) | res, extra) == -1)
   1.325 +			return (-1);
   1.326 +		retval = 1;
   1.327 +	}
   1.328 +
   1.329 +	ctx->nread = (ev_uint16_t) nread;
   1.330 +	ctx->nwrite = (ev_uint16_t) nwrite;
   1.331 +	TAILQ_INSERT_TAIL(&ctx->events, ev, ev_io_next);
   1.332 +
   1.333 +	return (retval);
   1.334 +}
   1.335 +
   1.336 +/* return -1 on error, 0 on success if nothing changed in the event backend,
   1.337 + * and 1 on success if something did. */
   1.338 +int
   1.339 +evmap_io_del(struct event_base *base, evutil_socket_t fd, struct event *ev)
   1.340 +{
   1.341 +	const struct eventop *evsel = base->evsel;
   1.342 +	struct event_io_map *io = &base->io;
   1.343 +	struct evmap_io *ctx;
   1.344 +	int nread, nwrite, retval = 0;
   1.345 +	short res = 0, old = 0;
   1.346 +
   1.347 +	if (fd < 0)
   1.348 +		return 0;
   1.349 +
   1.350 +	EVUTIL_ASSERT(fd == ev->ev_fd);
   1.351 +
   1.352 +#ifndef EVMAP_USE_HT
   1.353 +	if (fd >= io->nentries)
   1.354 +		return (-1);
   1.355 +#endif
   1.356 +
   1.357 +	GET_IO_SLOT(ctx, io, fd, evmap_io);
   1.358 +
   1.359 +	nread = ctx->nread;
   1.360 +	nwrite = ctx->nwrite;
   1.361 +
   1.362 +	if (nread)
   1.363 +		old |= EV_READ;
   1.364 +	if (nwrite)
   1.365 +		old |= EV_WRITE;
   1.366 +
   1.367 +	if (ev->ev_events & EV_READ) {
   1.368 +		if (--nread == 0)
   1.369 +			res |= EV_READ;
   1.370 +		EVUTIL_ASSERT(nread >= 0);
   1.371 +	}
   1.372 +	if (ev->ev_events & EV_WRITE) {
   1.373 +		if (--nwrite == 0)
   1.374 +			res |= EV_WRITE;
   1.375 +		EVUTIL_ASSERT(nwrite >= 0);
   1.376 +	}
   1.377 +
   1.378 +	if (res) {
   1.379 +		void *extra = ((char*)ctx) + sizeof(struct evmap_io);
   1.380 +		if (evsel->del(base, ev->ev_fd, old, res, extra) == -1)
   1.381 +			return (-1);
   1.382 +		retval = 1;
   1.383 +	}
   1.384 +
   1.385 +	ctx->nread = nread;
   1.386 +	ctx->nwrite = nwrite;
   1.387 +	TAILQ_REMOVE(&ctx->events, ev, ev_io_next);
   1.388 +
   1.389 +	return (retval);
   1.390 +}
   1.391 +
   1.392 +void
   1.393 +evmap_io_active(struct event_base *base, evutil_socket_t fd, short events)
   1.394 +{
   1.395 +	struct event_io_map *io = &base->io;
   1.396 +	struct evmap_io *ctx;
   1.397 +	struct event *ev;
   1.398 +
   1.399 +#ifndef EVMAP_USE_HT
   1.400 +	EVUTIL_ASSERT(fd < io->nentries);
   1.401 +#endif
   1.402 +	GET_IO_SLOT(ctx, io, fd, evmap_io);
   1.403 +
   1.404 +	EVUTIL_ASSERT(ctx);
   1.405 +	TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
   1.406 +		if (ev->ev_events & events)
   1.407 +			event_active_nolock(ev, ev->ev_events & events, 1);
   1.408 +	}
   1.409 +}
   1.410 +
   1.411 +/* code specific to signals */
   1.412 +
   1.413 +static void
   1.414 +evmap_signal_init(struct evmap_signal *entry)
   1.415 +{
   1.416 +	TAILQ_INIT(&entry->events);
   1.417 +}
   1.418 +
   1.419 +
   1.420 +int
   1.421 +evmap_signal_add(struct event_base *base, int sig, struct event *ev)
   1.422 +{
   1.423 +	const struct eventop *evsel = base->evsigsel;
   1.424 +	struct event_signal_map *map = &base->sigmap;
   1.425 +	struct evmap_signal *ctx = NULL;
   1.426 +
   1.427 +	if (sig >= map->nentries) {
   1.428 +		if (evmap_make_space(
   1.429 +			map, sig, sizeof(struct evmap_signal *)) == -1)
   1.430 +			return (-1);
   1.431 +	}
   1.432 +	GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
   1.433 +	    base->evsigsel->fdinfo_len);
   1.434 +
   1.435 +	if (TAILQ_EMPTY(&ctx->events)) {
   1.436 +		if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
   1.437 +		    == -1)
   1.438 +			return (-1);
   1.439 +	}
   1.440 +
   1.441 +	TAILQ_INSERT_TAIL(&ctx->events, ev, ev_signal_next);
   1.442 +
   1.443 +	return (1);
   1.444 +}
   1.445 +
   1.446 +int
   1.447 +evmap_signal_del(struct event_base *base, int sig, struct event *ev)
   1.448 +{
   1.449 +	const struct eventop *evsel = base->evsigsel;
   1.450 +	struct event_signal_map *map = &base->sigmap;
   1.451 +	struct evmap_signal *ctx;
   1.452 +
   1.453 +	if (sig >= map->nentries)
   1.454 +		return (-1);
   1.455 +
   1.456 +	GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
   1.457 +
   1.458 +	if (TAILQ_FIRST(&ctx->events) == TAILQ_LAST(&ctx->events, event_list)) {
   1.459 +		if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
   1.460 +			return (-1);
   1.461 +	}
   1.462 +
   1.463 +	TAILQ_REMOVE(&ctx->events, ev, ev_signal_next);
   1.464 +
   1.465 +	return (1);
   1.466 +}
   1.467 +
   1.468 +void
   1.469 +evmap_signal_active(struct event_base *base, evutil_socket_t sig, int ncalls)
   1.470 +{
   1.471 +	struct event_signal_map *map = &base->sigmap;
   1.472 +	struct evmap_signal *ctx;
   1.473 +	struct event *ev;
   1.474 +
   1.475 +	EVUTIL_ASSERT(sig < map->nentries);
   1.476 +	GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
   1.477 +
   1.478 +	TAILQ_FOREACH(ev, &ctx->events, ev_signal_next)
   1.479 +		event_active_nolock(ev, EV_SIGNAL, ncalls);
   1.480 +}
   1.481 +
   1.482 +void *
   1.483 +evmap_io_get_fdinfo(struct event_io_map *map, evutil_socket_t fd)
   1.484 +{
   1.485 +	struct evmap_io *ctx;
   1.486 +	GET_IO_SLOT(ctx, map, fd, evmap_io);
   1.487 +	if (ctx)
   1.488 +		return ((char*)ctx) + sizeof(struct evmap_io);
   1.489 +	else
   1.490 +		return NULL;
   1.491 +}
   1.492 +
   1.493 +/** Per-fd structure for use with changelists.  It keeps track, for each fd or
   1.494 + * signal using the changelist, of where its entry in the changelist is.
   1.495 + */
   1.496 +struct event_changelist_fdinfo {
   1.497 +	int idxplus1; /* this is the index +1, so that memset(0) will make it
   1.498 +		       * a no-such-element */
   1.499 +};
   1.500 +
   1.501 +void
   1.502 +event_changelist_init(struct event_changelist *changelist)
   1.503 +{
   1.504 +	changelist->changes = NULL;
   1.505 +	changelist->changes_size = 0;
   1.506 +	changelist->n_changes = 0;
   1.507 +}
   1.508 +
   1.509 +/** Helper: return the changelist_fdinfo corresponding to a given change. */
   1.510 +static inline struct event_changelist_fdinfo *
   1.511 +event_change_get_fdinfo(struct event_base *base,
   1.512 +    const struct event_change *change)
   1.513 +{
   1.514 +	char *ptr;
   1.515 +	if (change->read_change & EV_CHANGE_SIGNAL) {
   1.516 +		struct evmap_signal *ctx;
   1.517 +		GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
   1.518 +		ptr = ((char*)ctx) + sizeof(struct evmap_signal);
   1.519 +	} else {
   1.520 +		struct evmap_io *ctx;
   1.521 +		GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
   1.522 +		ptr = ((char*)ctx) + sizeof(struct evmap_io);
   1.523 +	}
   1.524 +	return (void*)ptr;
   1.525 +}
   1.526 +
   1.527 +#ifdef DEBUG_CHANGELIST
   1.528 +/** Make sure that the changelist is consistent with the evmap structures. */
   1.529 +static void
   1.530 +event_changelist_check(struct event_base *base)
   1.531 +{
   1.532 +	int i;
   1.533 +	struct event_changelist *changelist = &base->changelist;
   1.534 +
   1.535 +	EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
   1.536 +	for (i = 0; i < changelist->n_changes; ++i) {
   1.537 +		struct event_change *c = &changelist->changes[i];
   1.538 +		struct event_changelist_fdinfo *f;
   1.539 +		EVUTIL_ASSERT(c->fd >= 0);
   1.540 +		f = event_change_get_fdinfo(base, c);
   1.541 +		EVUTIL_ASSERT(f);
   1.542 +		EVUTIL_ASSERT(f->idxplus1 == i + 1);
   1.543 +	}
   1.544 +
   1.545 +	for (i = 0; i < base->io.nentries; ++i) {
   1.546 +		struct evmap_io *io = base->io.entries[i];
   1.547 +		struct event_changelist_fdinfo *f;
   1.548 +		if (!io)
   1.549 +			continue;
   1.550 +		f = (void*)
   1.551 +		    ( ((char*)io) + sizeof(struct evmap_io) );
   1.552 +		if (f->idxplus1) {
   1.553 +			struct event_change *c = &changelist->changes[f->idxplus1 - 1];
   1.554 +			EVUTIL_ASSERT(c->fd == i);
   1.555 +		}
   1.556 +	}
   1.557 +}
   1.558 +#else
   1.559 +#define event_changelist_check(base)  ((void)0)
   1.560 +#endif
   1.561 +
   1.562 +void
   1.563 +event_changelist_remove_all(struct event_changelist *changelist,
   1.564 +    struct event_base *base)
   1.565 +{
   1.566 +	int i;
   1.567 +
   1.568 +	event_changelist_check(base);
   1.569 +
   1.570 +	for (i = 0; i < changelist->n_changes; ++i) {
   1.571 +		struct event_change *ch = &changelist->changes[i];
   1.572 +		struct event_changelist_fdinfo *fdinfo =
   1.573 +		    event_change_get_fdinfo(base, ch);
   1.574 +		EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
   1.575 +		fdinfo->idxplus1 = 0;
   1.576 +	}
   1.577 +
   1.578 +	changelist->n_changes = 0;
   1.579 +
   1.580 +	event_changelist_check(base);
   1.581 +}
   1.582 +
   1.583 +void
   1.584 +event_changelist_freemem(struct event_changelist *changelist)
   1.585 +{
   1.586 +	if (changelist->changes)
   1.587 +		mm_free(changelist->changes);
   1.588 +	event_changelist_init(changelist); /* zero it all out. */
   1.589 +}
   1.590 +
   1.591 +/** Increase the size of 'changelist' to hold more changes. */
   1.592 +static int
   1.593 +event_changelist_grow(struct event_changelist *changelist)
   1.594 +{
   1.595 +	int new_size;
   1.596 +	struct event_change *new_changes;
   1.597 +	if (changelist->changes_size < 64)
   1.598 +		new_size = 64;
   1.599 +	else
   1.600 +		new_size = changelist->changes_size * 2;
   1.601 +
   1.602 +	new_changes = mm_realloc(changelist->changes,
   1.603 +	    new_size * sizeof(struct event_change));
   1.604 +
   1.605 +	if (EVUTIL_UNLIKELY(new_changes == NULL))
   1.606 +		return (-1);
   1.607 +
   1.608 +	changelist->changes = new_changes;
   1.609 +	changelist->changes_size = new_size;
   1.610 +
   1.611 +	return (0);
   1.612 +}
   1.613 +
   1.614 +/** Return a pointer to the changelist entry for the file descriptor or signal
   1.615 + * 'fd', whose fdinfo is 'fdinfo'.  If none exists, construct it, setting its
   1.616 + * old_events field to old_events.
   1.617 + */
   1.618 +static struct event_change *
   1.619 +event_changelist_get_or_construct(struct event_changelist *changelist,
   1.620 +    evutil_socket_t fd,
   1.621 +    short old_events,
   1.622 +    struct event_changelist_fdinfo *fdinfo)
   1.623 +{
   1.624 +	struct event_change *change;
   1.625 +
   1.626 +	if (fdinfo->idxplus1 == 0) {
   1.627 +		int idx;
   1.628 +		EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
   1.629 +
   1.630 +		if (changelist->n_changes == changelist->changes_size) {
   1.631 +			if (event_changelist_grow(changelist) < 0)
   1.632 +				return NULL;
   1.633 +		}
   1.634 +
   1.635 +		idx = changelist->n_changes++;
   1.636 +		change = &changelist->changes[idx];
   1.637 +		fdinfo->idxplus1 = idx + 1;
   1.638 +
   1.639 +		memset(change, 0, sizeof(struct event_change));
   1.640 +		change->fd = fd;
   1.641 +		change->old_events = old_events;
   1.642 +	} else {
   1.643 +		change = &changelist->changes[fdinfo->idxplus1 - 1];
   1.644 +		EVUTIL_ASSERT(change->fd == fd);
   1.645 +	}
   1.646 +	return change;
   1.647 +}
   1.648 +
   1.649 +int
   1.650 +event_changelist_add(struct event_base *base, evutil_socket_t fd, short old, short events,
   1.651 +    void *p)
   1.652 +{
   1.653 +	struct event_changelist *changelist = &base->changelist;
   1.654 +	struct event_changelist_fdinfo *fdinfo = p;
   1.655 +	struct event_change *change;
   1.656 +
   1.657 +	event_changelist_check(base);
   1.658 +
   1.659 +	change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
   1.660 +	if (!change)
   1.661 +		return -1;
   1.662 +
   1.663 +	/* An add replaces any previous delete, but doesn't result in a no-op,
   1.664 +	 * since the delete might fail (because the fd had been closed since
   1.665 +	 * the last add, for instance. */
   1.666 +
   1.667 +	if (events & (EV_READ|EV_SIGNAL)) {
   1.668 +		change->read_change = EV_CHANGE_ADD |
   1.669 +		    (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
   1.670 +	}
   1.671 +	if (events & EV_WRITE) {
   1.672 +		change->write_change = EV_CHANGE_ADD |
   1.673 +		    (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
   1.674 +	}
   1.675 +
   1.676 +	event_changelist_check(base);
   1.677 +	return (0);
   1.678 +}
   1.679 +
   1.680 +int
   1.681 +event_changelist_del(struct event_base *base, evutil_socket_t fd, short old, short events,
   1.682 +    void *p)
   1.683 +{
   1.684 +	struct event_changelist *changelist = &base->changelist;
   1.685 +	struct event_changelist_fdinfo *fdinfo = p;
   1.686 +	struct event_change *change;
   1.687 +
   1.688 +	event_changelist_check(base);
   1.689 +	change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
   1.690 +	event_changelist_check(base);
   1.691 +	if (!change)
   1.692 +		return -1;
   1.693 +
   1.694 +	/* A delete removes any previous add, rather than replacing it:
   1.695 +	   on those platforms where "add, delete, dispatch" is not the same
   1.696 +	   as "no-op, dispatch", we want the no-op behavior.
   1.697 +
   1.698 +	   As well as checking the current operation we should also check
   1.699 +	   the original set of events to make sure were not ignoring
   1.700 +	   the case where the add operation is present on an event that
   1.701 +	   was already set.
   1.702 +
   1.703 +	   If we have a no-op item, we could remove it it from the list
   1.704 +	   entirely, but really there's not much point: skipping the no-op
   1.705 +	   change when we do the dispatch later is far cheaper than rejuggling
   1.706 +	   the array now.
   1.707 +
   1.708 +	   As this stands, it also lets through deletions of events that are
   1.709 +	   not currently set.
   1.710 +	 */
   1.711 +
   1.712 +	if (events & (EV_READ|EV_SIGNAL)) {
   1.713 +		if (!(change->old_events & (EV_READ | EV_SIGNAL)) &&
   1.714 +		    (change->read_change & EV_CHANGE_ADD))
   1.715 +			change->read_change = 0;
   1.716 +		else
   1.717 +			change->read_change = EV_CHANGE_DEL;
   1.718 +	}
   1.719 +	if (events & EV_WRITE) {
   1.720 +		if (!(change->old_events & EV_WRITE) &&
   1.721 +		    (change->write_change & EV_CHANGE_ADD))
   1.722 +			change->write_change = 0;
   1.723 +		else
   1.724 +			change->write_change = EV_CHANGE_DEL;
   1.725 +	}
   1.726 +
   1.727 +	event_changelist_check(base);
   1.728 +	return (0);
   1.729 +}
   1.730 +
   1.731 +void
   1.732 +evmap_check_integrity(struct event_base *base)
   1.733 +{
   1.734 +#define EVLIST_X_SIGFOUND 0x1000
   1.735 +#define EVLIST_X_IOFOUND 0x2000
   1.736 +
   1.737 +	evutil_socket_t i;
   1.738 +	struct event *ev;
   1.739 +	struct event_io_map *io = &base->io;
   1.740 +	struct event_signal_map *sigmap = &base->sigmap;
   1.741 +#ifdef EVMAP_USE_HT
   1.742 +	struct event_map_entry **mapent;
   1.743 +#endif
   1.744 +	int nsignals, ntimers, nio;
   1.745 +	nsignals = ntimers = nio = 0;
   1.746 +
   1.747 +	TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
   1.748 +		EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
   1.749 +		EVUTIL_ASSERT(ev->ev_flags & EVLIST_INIT);
   1.750 +		ev->ev_flags &= ~(EVLIST_X_SIGFOUND|EVLIST_X_IOFOUND);
   1.751 +	}
   1.752 +
   1.753 +#ifdef EVMAP_USE_HT
   1.754 +	HT_FOREACH(mapent, event_io_map, io) {
   1.755 +		struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
   1.756 +		i = (*mapent)->fd;
   1.757 +#else
   1.758 +	for (i = 0; i < io->nentries; ++i) {
   1.759 +		struct evmap_io *ctx = io->entries[i];
   1.760 +
   1.761 +		if (!ctx)
   1.762 +			continue;
   1.763 +#endif
   1.764 +
   1.765 +		TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
   1.766 +			EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_IOFOUND));
   1.767 +			EVUTIL_ASSERT(ev->ev_fd == i);
   1.768 +			ev->ev_flags |= EVLIST_X_IOFOUND;
   1.769 +			nio++;
   1.770 +		}
   1.771 +	}
   1.772 +
   1.773 +	for (i = 0; i < sigmap->nentries; ++i) {
   1.774 +		struct evmap_signal *ctx = sigmap->entries[i];
   1.775 +		if (!ctx)
   1.776 +			continue;
   1.777 +
   1.778 +		TAILQ_FOREACH(ev, &ctx->events, ev_signal_next) {
   1.779 +			EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_SIGFOUND));
   1.780 +			EVUTIL_ASSERT(ev->ev_fd == i);
   1.781 +			ev->ev_flags |= EVLIST_X_SIGFOUND;
   1.782 +			nsignals++;
   1.783 +		}
   1.784 +	}
   1.785 +
   1.786 +	TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
   1.787 +		if (ev->ev_events & (EV_READ|EV_WRITE)) {
   1.788 +			EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_IOFOUND);
   1.789 +			--nio;
   1.790 +		}
   1.791 +		if (ev->ev_events & EV_SIGNAL) {
   1.792 +			EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_SIGFOUND);
   1.793 +			--nsignals;
   1.794 +		}
   1.795 +	}
   1.796 +
   1.797 +	EVUTIL_ASSERT(nio == 0);
   1.798 +	EVUTIL_ASSERT(nsignals == 0);
   1.799 +	/* There is no "EVUTIL_ASSERT(ntimers == 0)": eventqueue is only for
   1.800 +	 * pending signals and io events.
   1.801 +	 */
   1.802 +}

mercurial