ipc/chromium/src/third_party/libevent/kqueue.c

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /* $OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $ */
michael@0 2
michael@0 3 /*
michael@0 4 * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
michael@0 5 * Copyright 2007-2012 Niels Provos and Nick Mathewson
michael@0 6 *
michael@0 7 * Redistribution and use in source and binary forms, with or without
michael@0 8 * modification, are permitted provided that the following conditions
michael@0 9 * are met:
michael@0 10 * 1. Redistributions of source code must retain the above copyright
michael@0 11 * notice, this list of conditions and the following disclaimer.
michael@0 12 * 2. Redistributions in binary form must reproduce the above copyright
michael@0 13 * notice, this list of conditions and the following disclaimer in the
michael@0 14 * documentation and/or other materials provided with the distribution.
michael@0 15 * 3. The name of the author may not be used to endorse or promote products
michael@0 16 * derived from this software without specific prior written permission.
michael@0 17 *
michael@0 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
michael@0 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
michael@0 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
michael@0 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
michael@0 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
michael@0 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
michael@0 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
michael@0 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
michael@0 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
michael@0 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
michael@0 28 */
michael@0 29 #include "event2/event-config.h"
michael@0 30
michael@0 31 #define _GNU_SOURCE
michael@0 32
michael@0 33 #include <sys/types.h>
michael@0 34 #ifdef _EVENT_HAVE_SYS_TIME_H
michael@0 35 #include <sys/time.h>
michael@0 36 #endif
michael@0 37 #include <sys/queue.h>
michael@0 38 #include <sys/event.h>
michael@0 39 #include <signal.h>
michael@0 40 #include <stdio.h>
michael@0 41 #include <stdlib.h>
michael@0 42 #include <string.h>
michael@0 43 #include <unistd.h>
michael@0 44 #include <errno.h>
michael@0 45 #ifdef _EVENT_HAVE_INTTYPES_H
michael@0 46 #include <inttypes.h>
michael@0 47 #endif
michael@0 48
michael@0 49 /* Some platforms apparently define the udata field of struct kevent as
michael@0 50 * intptr_t, whereas others define it as void*. There doesn't seem to be an
michael@0 51 * easy way to tell them apart via autoconf, so we need to use OS macros. */
michael@0 52 #if defined(_EVENT_HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__)
michael@0 53 #define PTR_TO_UDATA(x) ((intptr_t)(x))
michael@0 54 #define INT_TO_UDATA(x) ((intptr_t)(x))
michael@0 55 #else
michael@0 56 #define PTR_TO_UDATA(x) (x)
michael@0 57 #define INT_TO_UDATA(x) ((void*)(x))
michael@0 58 #endif
michael@0 59
michael@0 60 #include "event-internal.h"
michael@0 61 #include "log-internal.h"
michael@0 62 #include "evmap-internal.h"
michael@0 63 #include "event2/thread.h"
michael@0 64 #include "evthread-internal.h"
michael@0 65 #include "changelist-internal.h"
michael@0 66
michael@0 67 #define NEVENT 64
michael@0 68
michael@0 69 struct kqop {
michael@0 70 struct kevent *changes;
michael@0 71 int changes_size;
michael@0 72
michael@0 73 struct kevent *events;
michael@0 74 int events_size;
michael@0 75 int kq;
michael@0 76 pid_t pid;
michael@0 77 };
michael@0 78
michael@0 79 static void kqop_free(struct kqop *kqop);
michael@0 80
michael@0 81 static void *kq_init(struct event_base *);
michael@0 82 static int kq_sig_add(struct event_base *, int, short, short, void *);
michael@0 83 static int kq_sig_del(struct event_base *, int, short, short, void *);
michael@0 84 static int kq_dispatch(struct event_base *, struct timeval *);
michael@0 85 static void kq_dealloc(struct event_base *);
michael@0 86
michael@0 87 const struct eventop kqops = {
michael@0 88 "kqueue",
michael@0 89 kq_init,
michael@0 90 event_changelist_add,
michael@0 91 event_changelist_del,
michael@0 92 kq_dispatch,
michael@0 93 kq_dealloc,
michael@0 94 1 /* need reinit */,
michael@0 95 EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_FDS,
michael@0 96 EVENT_CHANGELIST_FDINFO_SIZE
michael@0 97 };
michael@0 98
michael@0 99 static const struct eventop kqsigops = {
michael@0 100 "kqueue_signal",
michael@0 101 NULL,
michael@0 102 kq_sig_add,
michael@0 103 kq_sig_del,
michael@0 104 NULL,
michael@0 105 NULL,
michael@0 106 1 /* need reinit */,
michael@0 107 0,
michael@0 108 0
michael@0 109 };
michael@0 110
michael@0 111 static void *
michael@0 112 kq_init(struct event_base *base)
michael@0 113 {
michael@0 114 int kq = -1;
michael@0 115 struct kqop *kqueueop = NULL;
michael@0 116
michael@0 117 if (!(kqueueop = mm_calloc(1, sizeof(struct kqop))))
michael@0 118 return (NULL);
michael@0 119
michael@0 120 /* Initialize the kernel queue */
michael@0 121
michael@0 122 if ((kq = kqueue()) == -1) {
michael@0 123 event_warn("kqueue");
michael@0 124 goto err;
michael@0 125 }
michael@0 126
michael@0 127 kqueueop->kq = kq;
michael@0 128
michael@0 129 kqueueop->pid = getpid();
michael@0 130
michael@0 131 /* Initialize fields */
michael@0 132 kqueueop->changes = mm_calloc(NEVENT, sizeof(struct kevent));
michael@0 133 if (kqueueop->changes == NULL)
michael@0 134 goto err;
michael@0 135 kqueueop->events = mm_calloc(NEVENT, sizeof(struct kevent));
michael@0 136 if (kqueueop->events == NULL)
michael@0 137 goto err;
michael@0 138 kqueueop->events_size = kqueueop->changes_size = NEVENT;
michael@0 139
michael@0 140 /* Check for Mac OS X kqueue bug. */
michael@0 141 memset(&kqueueop->changes[0], 0, sizeof kqueueop->changes[0]);
michael@0 142 kqueueop->changes[0].ident = -1;
michael@0 143 kqueueop->changes[0].filter = EVFILT_READ;
michael@0 144 kqueueop->changes[0].flags = EV_ADD;
michael@0 145 /*
michael@0 146 * If kqueue works, then kevent will succeed, and it will
michael@0 147 * stick an error in events[0]. If kqueue is broken, then
michael@0 148 * kevent will fail.
michael@0 149 */
michael@0 150 if (kevent(kq,
michael@0 151 kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 ||
michael@0 152 (int)kqueueop->events[0].ident != -1 ||
michael@0 153 kqueueop->events[0].flags != EV_ERROR) {
michael@0 154 event_warn("%s: detected broken kqueue; not using.", __func__);
michael@0 155 goto err;
michael@0 156 }
michael@0 157
michael@0 158 base->evsigsel = &kqsigops;
michael@0 159
michael@0 160 return (kqueueop);
michael@0 161 err:
michael@0 162 if (kqueueop)
michael@0 163 kqop_free(kqueueop);
michael@0 164
michael@0 165 return (NULL);
michael@0 166 }
michael@0 167
michael@0 168 #define ADD_UDATA 0x30303
michael@0 169
michael@0 170 static void
michael@0 171 kq_setup_kevent(struct kevent *out, evutil_socket_t fd, int filter, short change)
michael@0 172 {
michael@0 173 memset(out, 0, sizeof(struct kevent));
michael@0 174 out->ident = fd;
michael@0 175 out->filter = filter;
michael@0 176
michael@0 177 if (change & EV_CHANGE_ADD) {
michael@0 178 out->flags = EV_ADD;
michael@0 179 /* We set a magic number here so that we can tell 'add'
michael@0 180 * errors from 'del' errors. */
michael@0 181 out->udata = INT_TO_UDATA(ADD_UDATA);
michael@0 182 if (change & EV_ET)
michael@0 183 out->flags |= EV_CLEAR;
michael@0 184 #ifdef NOTE_EOF
michael@0 185 /* Make it behave like select() and poll() */
michael@0 186 if (filter == EVFILT_READ)
michael@0 187 out->fflags = NOTE_EOF;
michael@0 188 #endif
michael@0 189 } else {
michael@0 190 EVUTIL_ASSERT(change & EV_CHANGE_DEL);
michael@0 191 out->flags = EV_DELETE;
michael@0 192 }
michael@0 193 }
michael@0 194
michael@0 195 static int
michael@0 196 kq_build_changes_list(const struct event_changelist *changelist,
michael@0 197 struct kqop *kqop)
michael@0 198 {
michael@0 199 int i;
michael@0 200 int n_changes = 0;
michael@0 201
michael@0 202 for (i = 0; i < changelist->n_changes; ++i) {
michael@0 203 struct event_change *in_ch = &changelist->changes[i];
michael@0 204 struct kevent *out_ch;
michael@0 205 if (n_changes >= kqop->changes_size - 1) {
michael@0 206 int newsize = kqop->changes_size * 2;
michael@0 207 struct kevent *newchanges;
michael@0 208
michael@0 209 newchanges = mm_realloc(kqop->changes,
michael@0 210 newsize * sizeof(struct kevent));
michael@0 211 if (newchanges == NULL) {
michael@0 212 event_warn("%s: realloc", __func__);
michael@0 213 return (-1);
michael@0 214 }
michael@0 215 kqop->changes = newchanges;
michael@0 216 kqop->changes_size = newsize;
michael@0 217 }
michael@0 218 if (in_ch->read_change) {
michael@0 219 out_ch = &kqop->changes[n_changes++];
michael@0 220 kq_setup_kevent(out_ch, in_ch->fd, EVFILT_READ,
michael@0 221 in_ch->read_change);
michael@0 222 }
michael@0 223 if (in_ch->write_change) {
michael@0 224 out_ch = &kqop->changes[n_changes++];
michael@0 225 kq_setup_kevent(out_ch, in_ch->fd, EVFILT_WRITE,
michael@0 226 in_ch->write_change);
michael@0 227 }
michael@0 228 }
michael@0 229 return n_changes;
michael@0 230 }
michael@0 231
michael@0 232 static int
michael@0 233 kq_grow_events(struct kqop *kqop, size_t new_size)
michael@0 234 {
michael@0 235 struct kevent *newresult;
michael@0 236
michael@0 237 newresult = mm_realloc(kqop->events,
michael@0 238 new_size * sizeof(struct kevent));
michael@0 239
michael@0 240 if (newresult) {
michael@0 241 kqop->events = newresult;
michael@0 242 kqop->events_size = new_size;
michael@0 243 return 0;
michael@0 244 } else {
michael@0 245 return -1;
michael@0 246 }
michael@0 247 }
michael@0 248
michael@0 249 static int
michael@0 250 kq_dispatch(struct event_base *base, struct timeval *tv)
michael@0 251 {
michael@0 252 struct kqop *kqop = base->evbase;
michael@0 253 struct kevent *events = kqop->events;
michael@0 254 struct kevent *changes;
michael@0 255 struct timespec ts, *ts_p = NULL;
michael@0 256 int i, n_changes, res;
michael@0 257
michael@0 258 if (tv != NULL) {
michael@0 259 TIMEVAL_TO_TIMESPEC(tv, &ts);
michael@0 260 ts_p = &ts;
michael@0 261 }
michael@0 262
michael@0 263 /* Build "changes" from "base->changes" */
michael@0 264 EVUTIL_ASSERT(kqop->changes);
michael@0 265 n_changes = kq_build_changes_list(&base->changelist, kqop);
michael@0 266 if (n_changes < 0)
michael@0 267 return -1;
michael@0 268
michael@0 269 event_changelist_remove_all(&base->changelist, base);
michael@0 270
michael@0 271 /* steal the changes array in case some broken code tries to call
michael@0 272 * dispatch twice at once. */
michael@0 273 changes = kqop->changes;
michael@0 274 kqop->changes = NULL;
michael@0 275
michael@0 276 /* Make sure that 'events' is at least as long as the list of changes:
michael@0 277 * otherwise errors in the changes can get reported as a -1 return
michael@0 278 * value from kevent() rather than as EV_ERROR events in the events
michael@0 279 * array.
michael@0 280 *
michael@0 281 * (We could instead handle -1 return values from kevent() by
michael@0 282 * retrying with a smaller changes array or a larger events array,
michael@0 283 * but this approach seems less risky for now.)
michael@0 284 */
michael@0 285 if (kqop->events_size < n_changes) {
michael@0 286 int new_size = kqop->events_size;
michael@0 287 do {
michael@0 288 new_size *= 2;
michael@0 289 } while (new_size < n_changes);
michael@0 290
michael@0 291 kq_grow_events(kqop, new_size);
michael@0 292 events = kqop->events;
michael@0 293 }
michael@0 294
michael@0 295 EVBASE_RELEASE_LOCK(base, th_base_lock);
michael@0 296
michael@0 297 res = kevent(kqop->kq, changes, n_changes,
michael@0 298 events, kqop->events_size, ts_p);
michael@0 299
michael@0 300 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
michael@0 301
michael@0 302 EVUTIL_ASSERT(kqop->changes == NULL);
michael@0 303 kqop->changes = changes;
michael@0 304
michael@0 305 if (res == -1) {
michael@0 306 if (errno != EINTR) {
michael@0 307 event_warn("kevent");
michael@0 308 return (-1);
michael@0 309 }
michael@0 310
michael@0 311 return (0);
michael@0 312 }
michael@0 313
michael@0 314 event_debug(("%s: kevent reports %d", __func__, res));
michael@0 315
michael@0 316 for (i = 0; i < res; i++) {
michael@0 317 int which = 0;
michael@0 318
michael@0 319 if (events[i].flags & EV_ERROR) {
michael@0 320 switch (events[i].data) {
michael@0 321
michael@0 322 /* Can occur on delete if we are not currently
michael@0 323 * watching any events on this fd. That can
michael@0 324 * happen when the fd was closed and another
michael@0 325 * file was opened with that fd. */
michael@0 326 case ENOENT:
michael@0 327 /* Can occur for reasons not fully understood
michael@0 328 * on FreeBSD. */
michael@0 329 case EINVAL:
michael@0 330 continue;
michael@0 331
michael@0 332 /* Can occur on a delete if the fd is closed. */
michael@0 333 case EBADF:
michael@0 334 /* XXXX On NetBSD, we can also get EBADF if we
michael@0 335 * try to add the write side of a pipe, but
michael@0 336 * the read side has already been closed.
michael@0 337 * Other BSDs call this situation 'EPIPE'. It
michael@0 338 * would be good if we had a way to report
michael@0 339 * this situation. */
michael@0 340 continue;
michael@0 341 /* These two can occur on an add if the fd was one side
michael@0 342 * of a pipe, and the other side was closed. */
michael@0 343 case EPERM:
michael@0 344 case EPIPE:
michael@0 345 /* Report read events, if we're listening for
michael@0 346 * them, so that the user can learn about any
michael@0 347 * add errors. (If the operation was a
michael@0 348 * delete, then udata should be cleared.) */
michael@0 349 if (events[i].udata) {
michael@0 350 /* The operation was an add:
michael@0 351 * report the error as a read. */
michael@0 352 which |= EV_READ;
michael@0 353 break;
michael@0 354 } else {
michael@0 355 /* The operation was a del:
michael@0 356 * report nothing. */
michael@0 357 continue;
michael@0 358 }
michael@0 359
michael@0 360 /* Other errors shouldn't occur. */
michael@0 361 default:
michael@0 362 errno = events[i].data;
michael@0 363 return (-1);
michael@0 364 }
michael@0 365 } else if (events[i].filter == EVFILT_READ) {
michael@0 366 which |= EV_READ;
michael@0 367 } else if (events[i].filter == EVFILT_WRITE) {
michael@0 368 which |= EV_WRITE;
michael@0 369 } else if (events[i].filter == EVFILT_SIGNAL) {
michael@0 370 which |= EV_SIGNAL;
michael@0 371 }
michael@0 372
michael@0 373 if (!which)
michael@0 374 continue;
michael@0 375
michael@0 376 if (events[i].filter == EVFILT_SIGNAL) {
michael@0 377 evmap_signal_active(base, events[i].ident, 1);
michael@0 378 } else {
michael@0 379 evmap_io_active(base, events[i].ident, which | EV_ET);
michael@0 380 }
michael@0 381 }
michael@0 382
michael@0 383 if (res == kqop->events_size) {
michael@0 384 /* We used all the events space that we have. Maybe we should
michael@0 385 make it bigger. */
michael@0 386 kq_grow_events(kqop, kqop->events_size * 2);
michael@0 387 }
michael@0 388
michael@0 389 return (0);
michael@0 390 }
michael@0 391
michael@0 392 static void
michael@0 393 kqop_free(struct kqop *kqop)
michael@0 394 {
michael@0 395 if (kqop->changes)
michael@0 396 mm_free(kqop->changes);
michael@0 397 if (kqop->events)
michael@0 398 mm_free(kqop->events);
michael@0 399 if (kqop->kq >= 0 && kqop->pid == getpid())
michael@0 400 close(kqop->kq);
michael@0 401 memset(kqop, 0, sizeof(struct kqop));
michael@0 402 mm_free(kqop);
michael@0 403 }
michael@0 404
michael@0 405 static void
michael@0 406 kq_dealloc(struct event_base *base)
michael@0 407 {
michael@0 408 struct kqop *kqop = base->evbase;
michael@0 409 evsig_dealloc(base);
michael@0 410 kqop_free(kqop);
michael@0 411 }
michael@0 412
michael@0 413 /* signal handling */
michael@0 414 static int
michael@0 415 kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *p)
michael@0 416 {
michael@0 417 struct kqop *kqop = base->evbase;
michael@0 418 struct kevent kev;
michael@0 419 struct timespec timeout = { 0, 0 };
michael@0 420 (void)p;
michael@0 421
michael@0 422 EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
michael@0 423
michael@0 424 memset(&kev, 0, sizeof(kev));
michael@0 425 kev.ident = nsignal;
michael@0 426 kev.filter = EVFILT_SIGNAL;
michael@0 427 kev.flags = EV_ADD;
michael@0 428
michael@0 429 /* Be ready for the signal if it is sent any
michael@0 430 * time between now and the next call to
michael@0 431 * kq_dispatch. */
michael@0 432 if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
michael@0 433 return (-1);
michael@0 434
michael@0 435 /* Backported from
michael@0 436 * https://github.com/nmathewson/Libevent/commit/148458e0a1fd25e167aa2ef229d1c9a70b27c3e9 */
michael@0 437 /* We can set the handler for most signals to SIG_IGN and
michael@0 438 * still have them reported to us in the queue. However,
michael@0 439 * if the handler for SIGCHLD is SIG_IGN, the system reaps
michael@0 440 * zombie processes for us, and we don't get any notification.
michael@0 441 * This appears to be the only signal with this quirk. */
michael@0 442 if (_evsig_set_handler(base, nsignal,
michael@0 443 nsignal == SIGCHLD ? SIG_DFL : SIG_IGN) == -1) {
michael@0 444 return (-1);
michael@0 445 }
michael@0 446
michael@0 447 return (0);
michael@0 448 }
michael@0 449
michael@0 450 static int
michael@0 451 kq_sig_del(struct event_base *base, int nsignal, short old, short events, void *p)
michael@0 452 {
michael@0 453 struct kqop *kqop = base->evbase;
michael@0 454 struct kevent kev;
michael@0 455
michael@0 456 struct timespec timeout = { 0, 0 };
michael@0 457 (void)p;
michael@0 458
michael@0 459 EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
michael@0 460
michael@0 461 memset(&kev, 0, sizeof(kev));
michael@0 462 kev.ident = nsignal;
michael@0 463 kev.filter = EVFILT_SIGNAL;
michael@0 464 kev.flags = EV_DELETE;
michael@0 465
michael@0 466 /* Because we insert signal events
michael@0 467 * immediately, we need to delete them
michael@0 468 * immediately, too */
michael@0 469 if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
michael@0 470 return (-1);
michael@0 471
michael@0 472 if (_evsig_restore_handler(base, nsignal) == -1)
michael@0 473 return (-1);
michael@0 474
michael@0 475 return (0);
michael@0 476 }

mercurial