ipc/chromium/src/third_party/libevent/kqueue.c

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/ipc/chromium/src/third_party/libevent/kqueue.c	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,476 @@
     1.4 +/*	$OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $	*/
     1.5 +
     1.6 +/*
     1.7 + * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
     1.8 + * Copyright 2007-2012 Niels Provos and Nick Mathewson
     1.9 + *
    1.10 + * Redistribution and use in source and binary forms, with or without
    1.11 + * modification, are permitted provided that the following conditions
    1.12 + * are met:
    1.13 + * 1. Redistributions of source code must retain the above copyright
    1.14 + *    notice, this list of conditions and the following disclaimer.
    1.15 + * 2. Redistributions in binary form must reproduce the above copyright
    1.16 + *    notice, this list of conditions and the following disclaimer in the
    1.17 + *    documentation and/or other materials provided with the distribution.
    1.18 + * 3. The name of the author may not be used to endorse or promote products
    1.19 + *    derived from this software without specific prior written permission.
    1.20 + *
    1.21 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
    1.22 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
    1.23 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
    1.24 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
    1.25 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
    1.26 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    1.27 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    1.28 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    1.29 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
    1.30 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    1.31 + */
    1.32 +#include "event2/event-config.h"
    1.33 +
    1.34 +#define _GNU_SOURCE
    1.35 +
    1.36 +#include <sys/types.h>
    1.37 +#ifdef _EVENT_HAVE_SYS_TIME_H
    1.38 +#include <sys/time.h>
    1.39 +#endif
    1.40 +#include <sys/queue.h>
    1.41 +#include <sys/event.h>
    1.42 +#include <signal.h>
    1.43 +#include <stdio.h>
    1.44 +#include <stdlib.h>
    1.45 +#include <string.h>
    1.46 +#include <unistd.h>
    1.47 +#include <errno.h>
    1.48 +#ifdef _EVENT_HAVE_INTTYPES_H
    1.49 +#include <inttypes.h>
    1.50 +#endif
    1.51 +
    1.52 +/* Some platforms apparently define the udata field of struct kevent as
    1.53 + * intptr_t, whereas others define it as void*.  There doesn't seem to be an
    1.54 + * easy way to tell them apart via autoconf, so we need to use OS macros. */
    1.55 +#if defined(_EVENT_HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__)
    1.56 +#define PTR_TO_UDATA(x)	((intptr_t)(x))
    1.57 +#define INT_TO_UDATA(x) ((intptr_t)(x))
    1.58 +#else
    1.59 +#define PTR_TO_UDATA(x)	(x)
    1.60 +#define INT_TO_UDATA(x) ((void*)(x))
    1.61 +#endif
    1.62 +
    1.63 +#include "event-internal.h"
    1.64 +#include "log-internal.h"
    1.65 +#include "evmap-internal.h"
    1.66 +#include "event2/thread.h"
    1.67 +#include "evthread-internal.h"
    1.68 +#include "changelist-internal.h"
    1.69 +
    1.70 +#define NEVENT		64
    1.71 +
    1.72 +struct kqop {
    1.73 +	struct kevent *changes;
    1.74 +	int changes_size;
    1.75 +
    1.76 +	struct kevent *events;
    1.77 +	int events_size;
    1.78 +	int kq;
    1.79 +	pid_t pid;
    1.80 +};
    1.81 +
    1.82 +static void kqop_free(struct kqop *kqop);
    1.83 +
    1.84 +static void *kq_init(struct event_base *);
    1.85 +static int kq_sig_add(struct event_base *, int, short, short, void *);
    1.86 +static int kq_sig_del(struct event_base *, int, short, short, void *);
    1.87 +static int kq_dispatch(struct event_base *, struct timeval *);
    1.88 +static void kq_dealloc(struct event_base *);
    1.89 +
    1.90 +const struct eventop kqops = {
    1.91 +	"kqueue",
    1.92 +	kq_init,
    1.93 +	event_changelist_add,
    1.94 +	event_changelist_del,
    1.95 +	kq_dispatch,
    1.96 +	kq_dealloc,
    1.97 +	1 /* need reinit */,
    1.98 +    EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_FDS,
    1.99 +	EVENT_CHANGELIST_FDINFO_SIZE
   1.100 +};
   1.101 +
   1.102 +static const struct eventop kqsigops = {
   1.103 +	"kqueue_signal",
   1.104 +	NULL,
   1.105 +	kq_sig_add,
   1.106 +	kq_sig_del,
   1.107 +	NULL,
   1.108 +	NULL,
   1.109 +	1 /* need reinit */,
   1.110 +	0,
   1.111 +	0
   1.112 +};
   1.113 +
   1.114 +static void *
   1.115 +kq_init(struct event_base *base)
   1.116 +{
   1.117 +	int kq = -1;
   1.118 +	struct kqop *kqueueop = NULL;
   1.119 +
   1.120 +	if (!(kqueueop = mm_calloc(1, sizeof(struct kqop))))
   1.121 +		return (NULL);
   1.122 +
   1.123 +/* Initialize the kernel queue */
   1.124 +
   1.125 +	if ((kq = kqueue()) == -1) {
   1.126 +		event_warn("kqueue");
   1.127 +		goto err;
   1.128 +	}
   1.129 +
   1.130 +	kqueueop->kq = kq;
   1.131 +
   1.132 +	kqueueop->pid = getpid();
   1.133 +
   1.134 +	/* Initialize fields */
   1.135 +	kqueueop->changes = mm_calloc(NEVENT, sizeof(struct kevent));
   1.136 +	if (kqueueop->changes == NULL)
   1.137 +		goto err;
   1.138 +	kqueueop->events = mm_calloc(NEVENT, sizeof(struct kevent));
   1.139 +	if (kqueueop->events == NULL)
   1.140 +		goto err;
   1.141 +	kqueueop->events_size = kqueueop->changes_size = NEVENT;
   1.142 +
   1.143 +	/* Check for Mac OS X kqueue bug. */
   1.144 +	memset(&kqueueop->changes[0], 0, sizeof kqueueop->changes[0]);
   1.145 +	kqueueop->changes[0].ident = -1;
   1.146 +	kqueueop->changes[0].filter = EVFILT_READ;
   1.147 +	kqueueop->changes[0].flags = EV_ADD;
   1.148 +	/*
   1.149 +	 * If kqueue works, then kevent will succeed, and it will
   1.150 +	 * stick an error in events[0].  If kqueue is broken, then
   1.151 +	 * kevent will fail.
   1.152 +	 */
   1.153 +	if (kevent(kq,
   1.154 +		kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 ||
   1.155 +	    (int)kqueueop->events[0].ident != -1 ||
   1.156 +	    kqueueop->events[0].flags != EV_ERROR) {
   1.157 +		event_warn("%s: detected broken kqueue; not using.", __func__);
   1.158 +		goto err;
   1.159 +	}
   1.160 +
   1.161 +	base->evsigsel = &kqsigops;
   1.162 +
   1.163 +	return (kqueueop);
   1.164 +err:
   1.165 +	if (kqueueop)
   1.166 +		kqop_free(kqueueop);
   1.167 +
   1.168 +	return (NULL);
   1.169 +}
   1.170 +
   1.171 +#define ADD_UDATA 0x30303
   1.172 +
   1.173 +static void
   1.174 +kq_setup_kevent(struct kevent *out, evutil_socket_t fd, int filter, short change)
   1.175 +{
   1.176 +	memset(out, 0, sizeof(struct kevent));
   1.177 +	out->ident = fd;
   1.178 +	out->filter = filter;
   1.179 +
   1.180 +	if (change & EV_CHANGE_ADD) {
   1.181 +		out->flags = EV_ADD;
   1.182 +		/* We set a magic number here so that we can tell 'add'
   1.183 +		 * errors from 'del' errors. */
   1.184 +		out->udata = INT_TO_UDATA(ADD_UDATA);
   1.185 +		if (change & EV_ET)
   1.186 +			out->flags |= EV_CLEAR;
   1.187 +#ifdef NOTE_EOF
   1.188 +		/* Make it behave like select() and poll() */
   1.189 +		if (filter == EVFILT_READ)
   1.190 +			out->fflags = NOTE_EOF;
   1.191 +#endif
   1.192 +	} else {
   1.193 +		EVUTIL_ASSERT(change & EV_CHANGE_DEL);
   1.194 +		out->flags = EV_DELETE;
   1.195 +	}
   1.196 +}
   1.197 +
   1.198 +static int
   1.199 +kq_build_changes_list(const struct event_changelist *changelist,
   1.200 +    struct kqop *kqop)
   1.201 +{
   1.202 +	int i;
   1.203 +	int n_changes = 0;
   1.204 +
   1.205 +	for (i = 0; i < changelist->n_changes; ++i) {
   1.206 +		struct event_change *in_ch = &changelist->changes[i];
   1.207 +		struct kevent *out_ch;
   1.208 +		if (n_changes >= kqop->changes_size - 1) {
   1.209 +			int newsize = kqop->changes_size * 2;
   1.210 +			struct kevent *newchanges;
   1.211 +
   1.212 +			newchanges = mm_realloc(kqop->changes,
   1.213 +			    newsize * sizeof(struct kevent));
   1.214 +			if (newchanges == NULL) {
   1.215 +				event_warn("%s: realloc", __func__);
   1.216 +				return (-1);
   1.217 +			}
   1.218 +			kqop->changes = newchanges;
   1.219 +			kqop->changes_size = newsize;
   1.220 +		}
   1.221 +		if (in_ch->read_change) {
   1.222 +			out_ch = &kqop->changes[n_changes++];
   1.223 +			kq_setup_kevent(out_ch, in_ch->fd, EVFILT_READ,
   1.224 +			    in_ch->read_change);
   1.225 +		}
   1.226 +		if (in_ch->write_change) {
   1.227 +			out_ch = &kqop->changes[n_changes++];
   1.228 +			kq_setup_kevent(out_ch, in_ch->fd, EVFILT_WRITE,
   1.229 +			    in_ch->write_change);
   1.230 +		}
   1.231 +	}
   1.232 +	return n_changes;
   1.233 +}
   1.234 +
   1.235 +static int
   1.236 +kq_grow_events(struct kqop *kqop, size_t new_size)
   1.237 +{
   1.238 +	struct kevent *newresult;
   1.239 +
   1.240 +	newresult = mm_realloc(kqop->events,
   1.241 +	    new_size * sizeof(struct kevent));
   1.242 +
   1.243 +	if (newresult) {
   1.244 +		kqop->events = newresult;
   1.245 +		kqop->events_size = new_size;
   1.246 +		return 0;
   1.247 +	} else {
   1.248 +		return -1;
   1.249 +	}
   1.250 +}
   1.251 +
   1.252 +static int
   1.253 +kq_dispatch(struct event_base *base, struct timeval *tv)
   1.254 +{
   1.255 +	struct kqop *kqop = base->evbase;
   1.256 +	struct kevent *events = kqop->events;
   1.257 +	struct kevent *changes;
   1.258 +	struct timespec ts, *ts_p = NULL;
   1.259 +	int i, n_changes, res;
   1.260 +
   1.261 +	if (tv != NULL) {
   1.262 +		TIMEVAL_TO_TIMESPEC(tv, &ts);
   1.263 +		ts_p = &ts;
   1.264 +	}
   1.265 +
   1.266 +	/* Build "changes" from "base->changes" */
   1.267 +	EVUTIL_ASSERT(kqop->changes);
   1.268 +	n_changes = kq_build_changes_list(&base->changelist, kqop);
   1.269 +	if (n_changes < 0)
   1.270 +		return -1;
   1.271 +
   1.272 +	event_changelist_remove_all(&base->changelist, base);
   1.273 +
   1.274 +	/* steal the changes array in case some broken code tries to call
   1.275 +	 * dispatch twice at once. */
   1.276 +	changes = kqop->changes;
   1.277 +	kqop->changes = NULL;
   1.278 +
   1.279 +	/* Make sure that 'events' is at least as long as the list of changes:
   1.280 +	 * otherwise errors in the changes can get reported as a -1 return
   1.281 +	 * value from kevent() rather than as EV_ERROR events in the events
   1.282 +	 * array.
   1.283 +	 *
   1.284 +	 * (We could instead handle -1 return values from kevent() by
   1.285 +	 * retrying with a smaller changes array or a larger events array,
   1.286 +	 * but this approach seems less risky for now.)
   1.287 +	 */
   1.288 +	if (kqop->events_size < n_changes) {
   1.289 +		int new_size = kqop->events_size;
   1.290 +		do {
   1.291 +			new_size *= 2;
   1.292 +		} while (new_size < n_changes);
   1.293 +
   1.294 +		kq_grow_events(kqop, new_size);
   1.295 +		events = kqop->events;
   1.296 +	}
   1.297 +
   1.298 +	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1.299 +
   1.300 +	res = kevent(kqop->kq, changes, n_changes,
   1.301 +	    events, kqop->events_size, ts_p);
   1.302 +
   1.303 +	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1.304 +
   1.305 +	EVUTIL_ASSERT(kqop->changes == NULL);
   1.306 +	kqop->changes = changes;
   1.307 +
   1.308 +	if (res == -1) {
   1.309 +		if (errno != EINTR) {
   1.310 +			event_warn("kevent");
   1.311 +			return (-1);
   1.312 +		}
   1.313 +
   1.314 +		return (0);
   1.315 +	}
   1.316 +
   1.317 +	event_debug(("%s: kevent reports %d", __func__, res));
   1.318 +
   1.319 +	for (i = 0; i < res; i++) {
   1.320 +		int which = 0;
   1.321 +
   1.322 +		if (events[i].flags & EV_ERROR) {
   1.323 +			switch (events[i].data) {
   1.324 +
   1.325 +			/* Can occur on delete if we are not currently
   1.326 +			 * watching any events on this fd.  That can
   1.327 +			 * happen when the fd was closed and another
   1.328 +			 * file was opened with that fd. */
   1.329 +			case ENOENT:
   1.330 +			/* Can occur for reasons not fully understood
   1.331 +			 * on FreeBSD. */
   1.332 +			case EINVAL:
   1.333 +				continue;
   1.334 +
   1.335 +			/* Can occur on a delete if the fd is closed. */
   1.336 +			case EBADF:
   1.337 +				/* XXXX On NetBSD, we can also get EBADF if we
   1.338 +				 * try to add the write side of a pipe, but
   1.339 +				 * the read side has already been closed.
   1.340 +				 * Other BSDs call this situation 'EPIPE'. It
   1.341 +				 * would be good if we had a way to report
   1.342 +				 * this situation. */
   1.343 +				continue;
   1.344 +			/* These two can occur on an add if the fd was one side
   1.345 +			 * of a pipe, and the other side was closed. */
   1.346 +			case EPERM:
   1.347 +			case EPIPE:
   1.348 +				/* Report read events, if we're listening for
   1.349 +				 * them, so that the user can learn about any
   1.350 +				 * add errors.  (If the operation was a
   1.351 +				 * delete, then udata should be cleared.) */
   1.352 +				if (events[i].udata) {
   1.353 +					/* The operation was an add:
   1.354 +					 * report the error as a read. */
   1.355 +					which |= EV_READ;
   1.356 +					break;
   1.357 +				} else {
   1.358 +					/* The operation was a del:
   1.359 +					 * report nothing. */
   1.360 +					continue;
   1.361 +				}
   1.362 +
   1.363 +			/* Other errors shouldn't occur. */
   1.364 +			default:
   1.365 +				errno = events[i].data;
   1.366 +				return (-1);
   1.367 +			}
   1.368 +		} else if (events[i].filter == EVFILT_READ) {
   1.369 +			which |= EV_READ;
   1.370 +		} else if (events[i].filter == EVFILT_WRITE) {
   1.371 +			which |= EV_WRITE;
   1.372 +		} else if (events[i].filter == EVFILT_SIGNAL) {
   1.373 +			which |= EV_SIGNAL;
   1.374 +		}
   1.375 +
   1.376 +		if (!which)
   1.377 +			continue;
   1.378 +
   1.379 +		if (events[i].filter == EVFILT_SIGNAL) {
   1.380 +			evmap_signal_active(base, events[i].ident, 1);
   1.381 +		} else {
   1.382 +			evmap_io_active(base, events[i].ident, which | EV_ET);
   1.383 +		}
   1.384 +	}
   1.385 +
   1.386 +	if (res == kqop->events_size) {
   1.387 +		/* We used all the events space that we have. Maybe we should
   1.388 +		   make it bigger. */
   1.389 +		kq_grow_events(kqop, kqop->events_size * 2);
   1.390 +	}
   1.391 +
   1.392 +	return (0);
   1.393 +}
   1.394 +
   1.395 +static void
   1.396 +kqop_free(struct kqop *kqop)
   1.397 +{
   1.398 +	if (kqop->changes)
   1.399 +		mm_free(kqop->changes);
   1.400 +	if (kqop->events)
   1.401 +		mm_free(kqop->events);
   1.402 +	if (kqop->kq >= 0 && kqop->pid == getpid())
   1.403 +		close(kqop->kq);
   1.404 +	memset(kqop, 0, sizeof(struct kqop));
   1.405 +	mm_free(kqop);
   1.406 +}
   1.407 +
   1.408 +static void
   1.409 +kq_dealloc(struct event_base *base)
   1.410 +{
   1.411 +	struct kqop *kqop = base->evbase;
   1.412 +	evsig_dealloc(base);
   1.413 +	kqop_free(kqop);
   1.414 +}
   1.415 +
   1.416 +/* signal handling */
   1.417 +static int
   1.418 +kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *p)
   1.419 +{
   1.420 +	struct kqop *kqop = base->evbase;
   1.421 +	struct kevent kev;
   1.422 +	struct timespec timeout = { 0, 0 };
   1.423 +	(void)p;
   1.424 +
   1.425 +	EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
   1.426 +
   1.427 +	memset(&kev, 0, sizeof(kev));
   1.428 +	kev.ident = nsignal;
   1.429 +	kev.filter = EVFILT_SIGNAL;
   1.430 +	kev.flags = EV_ADD;
   1.431 +
   1.432 +	/* Be ready for the signal if it is sent any
   1.433 +	 * time between now and the next call to
   1.434 +	 * kq_dispatch. */
   1.435 +	if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
   1.436 +		return (-1);
   1.437 +
   1.438 +	/* Backported from
   1.439 +	 * https://github.com/nmathewson/Libevent/commit/148458e0a1fd25e167aa2ef229d1c9a70b27c3e9 */
   1.440 +	/* We can set the handler for most signals to SIG_IGN and
   1.441 +	 * still have them reported to us in the queue.  However,
   1.442 +	 * if the handler for SIGCHLD is SIG_IGN, the system reaps
   1.443 +	 * zombie processes for us, and we don't get any notification.
   1.444 +	 * This appears to be the only signal with this quirk. */
   1.445 +	if (_evsig_set_handler(base, nsignal,
   1.446 +	                       nsignal == SIGCHLD ? SIG_DFL : SIG_IGN) == -1) {
   1.447 +		return (-1);
   1.448 +	}
   1.449 +
   1.450 +	return (0);
   1.451 +}
   1.452 +
   1.453 +static int
   1.454 +kq_sig_del(struct event_base *base, int nsignal, short old, short events, void *p)
   1.455 +{
   1.456 +	struct kqop *kqop = base->evbase;
   1.457 +	struct kevent kev;
   1.458 +
   1.459 +	struct timespec timeout = { 0, 0 };
   1.460 +	(void)p;
   1.461 +
   1.462 +	EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
   1.463 +
   1.464 +	memset(&kev, 0, sizeof(kev));
   1.465 +	kev.ident = nsignal;
   1.466 +	kev.filter = EVFILT_SIGNAL;
   1.467 +	kev.flags = EV_DELETE;
   1.468 +
   1.469 +	/* Because we insert signal events
   1.470 +	 * immediately, we need to delete them
   1.471 +	 * immediately, too */
   1.472 +	if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
   1.473 +		return (-1);
   1.474 +
   1.475 +	if (_evsig_restore_handler(base, nsignal) == -1)
   1.476 +		return (-1);
   1.477 +
   1.478 +	return (0);
   1.479 +}

mercurial