ipc/chromium/src/third_party/libevent/evmap.c

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /*
michael@0 2 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
michael@0 3 *
michael@0 4 * Redistribution and use in source and binary forms, with or without
michael@0 5 * modification, are permitted provided that the following conditions
michael@0 6 * are met:
michael@0 7 * 1. Redistributions of source code must retain the above copyright
michael@0 8 * notice, this list of conditions and the following disclaimer.
michael@0 9 * 2. Redistributions in binary form must reproduce the above copyright
michael@0 10 * notice, this list of conditions and the following disclaimer in the
michael@0 11 * documentation and/or other materials provided with the distribution.
michael@0 12 * 3. The name of the author may not be used to endorse or promote products
michael@0 13 * derived from this software without specific prior written permission.
michael@0 14 *
michael@0 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
michael@0 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
michael@0 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
michael@0 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
michael@0 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
michael@0 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
michael@0 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
michael@0 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
michael@0 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
michael@0 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
michael@0 25 */
michael@0 26 #include "event2/event-config.h"
michael@0 27
michael@0 28 #ifdef WIN32
michael@0 29 #include <winsock2.h>
michael@0 30 #define WIN32_LEAN_AND_MEAN
michael@0 31 #include <windows.h>
michael@0 32 #undef WIN32_LEAN_AND_MEAN
michael@0 33 #endif
michael@0 34 #include <sys/types.h>
michael@0 35 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
michael@0 36 #include <sys/time.h>
michael@0 37 #endif
michael@0 38 #include <sys/queue.h>
michael@0 39 #include <stdio.h>
michael@0 40 #include <stdlib.h>
michael@0 41 #ifndef WIN32
michael@0 42 #include <unistd.h>
michael@0 43 #endif
michael@0 44 #include <errno.h>
michael@0 45 #include <signal.h>
michael@0 46 #include <string.h>
michael@0 47 #include <time.h>
michael@0 48
michael@0 49 #include "event-internal.h"
michael@0 50 #include "evmap-internal.h"
michael@0 51 #include "mm-internal.h"
michael@0 52 #include "changelist-internal.h"
michael@0 53
michael@0 54 /** An entry for an evmap_io list: notes all the events that want to read or
michael@0 55 write on a given fd, and the number of each.
michael@0 56 */
michael@0 57 struct evmap_io {
michael@0 58 struct event_list events;
michael@0 59 ev_uint16_t nread;
michael@0 60 ev_uint16_t nwrite;
michael@0 61 };
michael@0 62
michael@0 63 /* An entry for an evmap_signal list: notes all the events that want to know
michael@0 64 when a signal triggers. */
michael@0 65 struct evmap_signal {
michael@0 66 struct event_list events;
michael@0 67 };
michael@0 68
michael@0 69 /* On some platforms, fds start at 0 and increment by 1 as they are
michael@0 70 allocated, and old numbers get used. For these platforms, we
michael@0 71 implement io maps just like signal maps: as an array of pointers to
michael@0 72 struct evmap_io. But on other platforms (windows), sockets are not
michael@0 73 0-indexed, not necessarily consecutive, and not necessarily reused.
michael@0 74 There, we use a hashtable to implement evmap_io.
michael@0 75 */
michael@0 76 #ifdef EVMAP_USE_HT
michael@0 77 struct event_map_entry {
michael@0 78 HT_ENTRY(event_map_entry) map_node;
michael@0 79 evutil_socket_t fd;
michael@0 80 union { /* This is a union in case we need to make more things that can
michael@0 81 be in the hashtable. */
michael@0 82 struct evmap_io evmap_io;
michael@0 83 } ent;
michael@0 84 };
michael@0 85
michael@0 86 /* Helper used by the event_io_map hashtable code; tries to return a good hash
michael@0 87 * of the fd in e->fd. */
michael@0 88 static inline unsigned
michael@0 89 hashsocket(struct event_map_entry *e)
michael@0 90 {
michael@0 91 /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
michael@0 92 * matter. Our hashtable implementation really likes low-order bits,
michael@0 93 * though, so let's do the rotate-and-add trick. */
michael@0 94 unsigned h = (unsigned) e->fd;
michael@0 95 h += (h >> 2) | (h << 30);
michael@0 96 return h;
michael@0 97 }
michael@0 98
michael@0 99 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
michael@0 100 * have the same e->fd. */
michael@0 101 static inline int
michael@0 102 eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
michael@0 103 {
michael@0 104 return e1->fd == e2->fd;
michael@0 105 }
michael@0 106
michael@0 107 HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
michael@0 108 HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
michael@0 109 0.5, mm_malloc, mm_realloc, mm_free)
michael@0 110
michael@0 111 #define GET_IO_SLOT(x, map, slot, type) \
michael@0 112 do { \
michael@0 113 struct event_map_entry _key, *_ent; \
michael@0 114 _key.fd = slot; \
michael@0 115 _ent = HT_FIND(event_io_map, map, &_key); \
michael@0 116 (x) = _ent ? &_ent->ent.type : NULL; \
michael@0 117 } while (0);
michael@0 118
michael@0 119 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
michael@0 120 do { \
michael@0 121 struct event_map_entry _key, *_ent; \
michael@0 122 _key.fd = slot; \
michael@0 123 _HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \
michael@0 124 event_map_entry, &_key, ptr, \
michael@0 125 { \
michael@0 126 _ent = *ptr; \
michael@0 127 }, \
michael@0 128 { \
michael@0 129 _ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
michael@0 130 if (EVUTIL_UNLIKELY(_ent == NULL)) \
michael@0 131 return (-1); \
michael@0 132 _ent->fd = slot; \
michael@0 133 (ctor)(&_ent->ent.type); \
michael@0 134 _HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \
michael@0 135 }); \
michael@0 136 (x) = &_ent->ent.type; \
michael@0 137 } while (0)
michael@0 138
michael@0 139 void evmap_io_initmap(struct event_io_map *ctx)
michael@0 140 {
michael@0 141 HT_INIT(event_io_map, ctx);
michael@0 142 }
michael@0 143
michael@0 144 void evmap_io_clear(struct event_io_map *ctx)
michael@0 145 {
michael@0 146 struct event_map_entry **ent, **next, *this;
michael@0 147 for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
michael@0 148 this = *ent;
michael@0 149 next = HT_NEXT_RMV(event_io_map, ctx, ent);
michael@0 150 mm_free(this);
michael@0 151 }
michael@0 152 HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
michael@0 153 }
michael@0 154 #endif
michael@0 155
michael@0 156 /* Set the variable 'x' to the field in event_map 'map' with fields of type
michael@0 157 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
michael@0 158 if there are no entries for 'slot'. Does no bounds-checking. */
michael@0 159 #define GET_SIGNAL_SLOT(x, map, slot, type) \
michael@0 160 (x) = (struct type *)((map)->entries[slot])
michael@0 161 /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
michael@0 162 by allocating enough memory for a 'struct type', and initializing the new
michael@0 163 value by calling the function 'ctor' on it. Makes the function
michael@0 164 return -1 on allocation failure.
michael@0 165 */
michael@0 166 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
michael@0 167 do { \
michael@0 168 if ((map)->entries[slot] == NULL) { \
michael@0 169 (map)->entries[slot] = \
michael@0 170 mm_calloc(1,sizeof(struct type)+fdinfo_len); \
michael@0 171 if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
michael@0 172 return (-1); \
michael@0 173 (ctor)((struct type *)(map)->entries[slot]); \
michael@0 174 } \
michael@0 175 (x) = (struct type *)((map)->entries[slot]); \
michael@0 176 } while (0)
michael@0 177
michael@0 178 /* If we aren't using hashtables, then define the IO_SLOT macros and functions
michael@0 179 as thin aliases over the SIGNAL_SLOT versions. */
michael@0 180 #ifndef EVMAP_USE_HT
michael@0 181 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
michael@0 182 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
michael@0 183 GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
michael@0 184 #define FDINFO_OFFSET sizeof(struct evmap_io)
michael@0 185 void
michael@0 186 evmap_io_initmap(struct event_io_map* ctx)
michael@0 187 {
michael@0 188 evmap_signal_initmap(ctx);
michael@0 189 }
michael@0 190 void
michael@0 191 evmap_io_clear(struct event_io_map* ctx)
michael@0 192 {
michael@0 193 evmap_signal_clear(ctx);
michael@0 194 }
michael@0 195 #endif
michael@0 196
michael@0 197
michael@0 198 /** Expand 'map' with new entries of width 'msize' until it is big enough
michael@0 199 to store a value in 'slot'.
michael@0 200 */
michael@0 201 static int
michael@0 202 evmap_make_space(struct event_signal_map *map, int slot, int msize)
michael@0 203 {
michael@0 204 if (map->nentries <= slot) {
michael@0 205 int nentries = map->nentries ? map->nentries : 32;
michael@0 206 void **tmp;
michael@0 207
michael@0 208 while (nentries <= slot)
michael@0 209 nentries <<= 1;
michael@0 210
michael@0 211 tmp = (void **)mm_realloc(map->entries, nentries * msize);
michael@0 212 if (tmp == NULL)
michael@0 213 return (-1);
michael@0 214
michael@0 215 memset(&tmp[map->nentries], 0,
michael@0 216 (nentries - map->nentries) * msize);
michael@0 217
michael@0 218 map->nentries = nentries;
michael@0 219 map->entries = tmp;
michael@0 220 }
michael@0 221
michael@0 222 return (0);
michael@0 223 }
michael@0 224
michael@0 225 void
michael@0 226 evmap_signal_initmap(struct event_signal_map *ctx)
michael@0 227 {
michael@0 228 ctx->nentries = 0;
michael@0 229 ctx->entries = NULL;
michael@0 230 }
michael@0 231
michael@0 232 void
michael@0 233 evmap_signal_clear(struct event_signal_map *ctx)
michael@0 234 {
michael@0 235 if (ctx->entries != NULL) {
michael@0 236 int i;
michael@0 237 for (i = 0; i < ctx->nentries; ++i) {
michael@0 238 if (ctx->entries[i] != NULL)
michael@0 239 mm_free(ctx->entries[i]);
michael@0 240 }
michael@0 241 mm_free(ctx->entries);
michael@0 242 ctx->entries = NULL;
michael@0 243 }
michael@0 244 ctx->nentries = 0;
michael@0 245 }
michael@0 246
michael@0 247
michael@0 248 /* code specific to file descriptors */
michael@0 249
michael@0 250 /** Constructor for struct evmap_io */
michael@0 251 static void
michael@0 252 evmap_io_init(struct evmap_io *entry)
michael@0 253 {
michael@0 254 TAILQ_INIT(&entry->events);
michael@0 255 entry->nread = 0;
michael@0 256 entry->nwrite = 0;
michael@0 257 }
michael@0 258
michael@0 259
michael@0 260 /* return -1 on error, 0 on success if nothing changed in the event backend,
michael@0 261 * and 1 on success if something did. */
michael@0 262 int
michael@0 263 evmap_io_add(struct event_base *base, evutil_socket_t fd, struct event *ev)
michael@0 264 {
michael@0 265 const struct eventop *evsel = base->evsel;
michael@0 266 struct event_io_map *io = &base->io;
michael@0 267 struct evmap_io *ctx = NULL;
michael@0 268 int nread, nwrite, retval = 0;
michael@0 269 short res = 0, old = 0;
michael@0 270 struct event *old_ev;
michael@0 271
michael@0 272 EVUTIL_ASSERT(fd == ev->ev_fd);
michael@0 273
michael@0 274 if (fd < 0)
michael@0 275 return 0;
michael@0 276
michael@0 277 #ifndef EVMAP_USE_HT
michael@0 278 if (fd >= io->nentries) {
michael@0 279 if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
michael@0 280 return (-1);
michael@0 281 }
michael@0 282 #endif
michael@0 283 GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
michael@0 284 evsel->fdinfo_len);
michael@0 285
michael@0 286 nread = ctx->nread;
michael@0 287 nwrite = ctx->nwrite;
michael@0 288
michael@0 289 if (nread)
michael@0 290 old |= EV_READ;
michael@0 291 if (nwrite)
michael@0 292 old |= EV_WRITE;
michael@0 293
michael@0 294 if (ev->ev_events & EV_READ) {
michael@0 295 if (++nread == 1)
michael@0 296 res |= EV_READ;
michael@0 297 }
michael@0 298 if (ev->ev_events & EV_WRITE) {
michael@0 299 if (++nwrite == 1)
michael@0 300 res |= EV_WRITE;
michael@0 301 }
michael@0 302 if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff)) {
michael@0 303 event_warnx("Too many events reading or writing on fd %d",
michael@0 304 (int)fd);
michael@0 305 return -1;
michael@0 306 }
michael@0 307 if (EVENT_DEBUG_MODE_IS_ON() &&
michael@0 308 (old_ev = TAILQ_FIRST(&ctx->events)) &&
michael@0 309 (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
michael@0 310 event_warnx("Tried to mix edge-triggered and non-edge-triggered"
michael@0 311 " events on fd %d", (int)fd);
michael@0 312 return -1;
michael@0 313 }
michael@0 314
michael@0 315 if (res) {
michael@0 316 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
michael@0 317 /* XXX(niels): we cannot mix edge-triggered and
michael@0 318 * level-triggered, we should probably assert on
michael@0 319 * this. */
michael@0 320 if (evsel->add(base, ev->ev_fd,
michael@0 321 old, (ev->ev_events & EV_ET) | res, extra) == -1)
michael@0 322 return (-1);
michael@0 323 retval = 1;
michael@0 324 }
michael@0 325
michael@0 326 ctx->nread = (ev_uint16_t) nread;
michael@0 327 ctx->nwrite = (ev_uint16_t) nwrite;
michael@0 328 TAILQ_INSERT_TAIL(&ctx->events, ev, ev_io_next);
michael@0 329
michael@0 330 return (retval);
michael@0 331 }
michael@0 332
michael@0 333 /* return -1 on error, 0 on success if nothing changed in the event backend,
michael@0 334 * and 1 on success if something did. */
michael@0 335 int
michael@0 336 evmap_io_del(struct event_base *base, evutil_socket_t fd, struct event *ev)
michael@0 337 {
michael@0 338 const struct eventop *evsel = base->evsel;
michael@0 339 struct event_io_map *io = &base->io;
michael@0 340 struct evmap_io *ctx;
michael@0 341 int nread, nwrite, retval = 0;
michael@0 342 short res = 0, old = 0;
michael@0 343
michael@0 344 if (fd < 0)
michael@0 345 return 0;
michael@0 346
michael@0 347 EVUTIL_ASSERT(fd == ev->ev_fd);
michael@0 348
michael@0 349 #ifndef EVMAP_USE_HT
michael@0 350 if (fd >= io->nentries)
michael@0 351 return (-1);
michael@0 352 #endif
michael@0 353
michael@0 354 GET_IO_SLOT(ctx, io, fd, evmap_io);
michael@0 355
michael@0 356 nread = ctx->nread;
michael@0 357 nwrite = ctx->nwrite;
michael@0 358
michael@0 359 if (nread)
michael@0 360 old |= EV_READ;
michael@0 361 if (nwrite)
michael@0 362 old |= EV_WRITE;
michael@0 363
michael@0 364 if (ev->ev_events & EV_READ) {
michael@0 365 if (--nread == 0)
michael@0 366 res |= EV_READ;
michael@0 367 EVUTIL_ASSERT(nread >= 0);
michael@0 368 }
michael@0 369 if (ev->ev_events & EV_WRITE) {
michael@0 370 if (--nwrite == 0)
michael@0 371 res |= EV_WRITE;
michael@0 372 EVUTIL_ASSERT(nwrite >= 0);
michael@0 373 }
michael@0 374
michael@0 375 if (res) {
michael@0 376 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
michael@0 377 if (evsel->del(base, ev->ev_fd, old, res, extra) == -1)
michael@0 378 return (-1);
michael@0 379 retval = 1;
michael@0 380 }
michael@0 381
michael@0 382 ctx->nread = nread;
michael@0 383 ctx->nwrite = nwrite;
michael@0 384 TAILQ_REMOVE(&ctx->events, ev, ev_io_next);
michael@0 385
michael@0 386 return (retval);
michael@0 387 }
michael@0 388
michael@0 389 void
michael@0 390 evmap_io_active(struct event_base *base, evutil_socket_t fd, short events)
michael@0 391 {
michael@0 392 struct event_io_map *io = &base->io;
michael@0 393 struct evmap_io *ctx;
michael@0 394 struct event *ev;
michael@0 395
michael@0 396 #ifndef EVMAP_USE_HT
michael@0 397 EVUTIL_ASSERT(fd < io->nentries);
michael@0 398 #endif
michael@0 399 GET_IO_SLOT(ctx, io, fd, evmap_io);
michael@0 400
michael@0 401 EVUTIL_ASSERT(ctx);
michael@0 402 TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
michael@0 403 if (ev->ev_events & events)
michael@0 404 event_active_nolock(ev, ev->ev_events & events, 1);
michael@0 405 }
michael@0 406 }
michael@0 407
michael@0 408 /* code specific to signals */
michael@0 409
michael@0 410 static void
michael@0 411 evmap_signal_init(struct evmap_signal *entry)
michael@0 412 {
michael@0 413 TAILQ_INIT(&entry->events);
michael@0 414 }
michael@0 415
michael@0 416
michael@0 417 int
michael@0 418 evmap_signal_add(struct event_base *base, int sig, struct event *ev)
michael@0 419 {
michael@0 420 const struct eventop *evsel = base->evsigsel;
michael@0 421 struct event_signal_map *map = &base->sigmap;
michael@0 422 struct evmap_signal *ctx = NULL;
michael@0 423
michael@0 424 if (sig >= map->nentries) {
michael@0 425 if (evmap_make_space(
michael@0 426 map, sig, sizeof(struct evmap_signal *)) == -1)
michael@0 427 return (-1);
michael@0 428 }
michael@0 429 GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
michael@0 430 base->evsigsel->fdinfo_len);
michael@0 431
michael@0 432 if (TAILQ_EMPTY(&ctx->events)) {
michael@0 433 if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
michael@0 434 == -1)
michael@0 435 return (-1);
michael@0 436 }
michael@0 437
michael@0 438 TAILQ_INSERT_TAIL(&ctx->events, ev, ev_signal_next);
michael@0 439
michael@0 440 return (1);
michael@0 441 }
michael@0 442
michael@0 443 int
michael@0 444 evmap_signal_del(struct event_base *base, int sig, struct event *ev)
michael@0 445 {
michael@0 446 const struct eventop *evsel = base->evsigsel;
michael@0 447 struct event_signal_map *map = &base->sigmap;
michael@0 448 struct evmap_signal *ctx;
michael@0 449
michael@0 450 if (sig >= map->nentries)
michael@0 451 return (-1);
michael@0 452
michael@0 453 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
michael@0 454
michael@0 455 if (TAILQ_FIRST(&ctx->events) == TAILQ_LAST(&ctx->events, event_list)) {
michael@0 456 if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
michael@0 457 return (-1);
michael@0 458 }
michael@0 459
michael@0 460 TAILQ_REMOVE(&ctx->events, ev, ev_signal_next);
michael@0 461
michael@0 462 return (1);
michael@0 463 }
michael@0 464
michael@0 465 void
michael@0 466 evmap_signal_active(struct event_base *base, evutil_socket_t sig, int ncalls)
michael@0 467 {
michael@0 468 struct event_signal_map *map = &base->sigmap;
michael@0 469 struct evmap_signal *ctx;
michael@0 470 struct event *ev;
michael@0 471
michael@0 472 EVUTIL_ASSERT(sig < map->nentries);
michael@0 473 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
michael@0 474
michael@0 475 TAILQ_FOREACH(ev, &ctx->events, ev_signal_next)
michael@0 476 event_active_nolock(ev, EV_SIGNAL, ncalls);
michael@0 477 }
michael@0 478
michael@0 479 void *
michael@0 480 evmap_io_get_fdinfo(struct event_io_map *map, evutil_socket_t fd)
michael@0 481 {
michael@0 482 struct evmap_io *ctx;
michael@0 483 GET_IO_SLOT(ctx, map, fd, evmap_io);
michael@0 484 if (ctx)
michael@0 485 return ((char*)ctx) + sizeof(struct evmap_io);
michael@0 486 else
michael@0 487 return NULL;
michael@0 488 }
michael@0 489
michael@0 490 /** Per-fd structure for use with changelists. It keeps track, for each fd or
michael@0 491 * signal using the changelist, of where its entry in the changelist is.
michael@0 492 */
michael@0 493 struct event_changelist_fdinfo {
michael@0 494 int idxplus1; /* this is the index +1, so that memset(0) will make it
michael@0 495 * a no-such-element */
michael@0 496 };
michael@0 497
michael@0 498 void
michael@0 499 event_changelist_init(struct event_changelist *changelist)
michael@0 500 {
michael@0 501 changelist->changes = NULL;
michael@0 502 changelist->changes_size = 0;
michael@0 503 changelist->n_changes = 0;
michael@0 504 }
michael@0 505
michael@0 506 /** Helper: return the changelist_fdinfo corresponding to a given change. */
michael@0 507 static inline struct event_changelist_fdinfo *
michael@0 508 event_change_get_fdinfo(struct event_base *base,
michael@0 509 const struct event_change *change)
michael@0 510 {
michael@0 511 char *ptr;
michael@0 512 if (change->read_change & EV_CHANGE_SIGNAL) {
michael@0 513 struct evmap_signal *ctx;
michael@0 514 GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
michael@0 515 ptr = ((char*)ctx) + sizeof(struct evmap_signal);
michael@0 516 } else {
michael@0 517 struct evmap_io *ctx;
michael@0 518 GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
michael@0 519 ptr = ((char*)ctx) + sizeof(struct evmap_io);
michael@0 520 }
michael@0 521 return (void*)ptr;
michael@0 522 }
michael@0 523
michael@0 524 #ifdef DEBUG_CHANGELIST
michael@0 525 /** Make sure that the changelist is consistent with the evmap structures. */
michael@0 526 static void
michael@0 527 event_changelist_check(struct event_base *base)
michael@0 528 {
michael@0 529 int i;
michael@0 530 struct event_changelist *changelist = &base->changelist;
michael@0 531
michael@0 532 EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
michael@0 533 for (i = 0; i < changelist->n_changes; ++i) {
michael@0 534 struct event_change *c = &changelist->changes[i];
michael@0 535 struct event_changelist_fdinfo *f;
michael@0 536 EVUTIL_ASSERT(c->fd >= 0);
michael@0 537 f = event_change_get_fdinfo(base, c);
michael@0 538 EVUTIL_ASSERT(f);
michael@0 539 EVUTIL_ASSERT(f->idxplus1 == i + 1);
michael@0 540 }
michael@0 541
michael@0 542 for (i = 0; i < base->io.nentries; ++i) {
michael@0 543 struct evmap_io *io = base->io.entries[i];
michael@0 544 struct event_changelist_fdinfo *f;
michael@0 545 if (!io)
michael@0 546 continue;
michael@0 547 f = (void*)
michael@0 548 ( ((char*)io) + sizeof(struct evmap_io) );
michael@0 549 if (f->idxplus1) {
michael@0 550 struct event_change *c = &changelist->changes[f->idxplus1 - 1];
michael@0 551 EVUTIL_ASSERT(c->fd == i);
michael@0 552 }
michael@0 553 }
michael@0 554 }
michael@0 555 #else
michael@0 556 #define event_changelist_check(base) ((void)0)
michael@0 557 #endif
michael@0 558
michael@0 559 void
michael@0 560 event_changelist_remove_all(struct event_changelist *changelist,
michael@0 561 struct event_base *base)
michael@0 562 {
michael@0 563 int i;
michael@0 564
michael@0 565 event_changelist_check(base);
michael@0 566
michael@0 567 for (i = 0; i < changelist->n_changes; ++i) {
michael@0 568 struct event_change *ch = &changelist->changes[i];
michael@0 569 struct event_changelist_fdinfo *fdinfo =
michael@0 570 event_change_get_fdinfo(base, ch);
michael@0 571 EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
michael@0 572 fdinfo->idxplus1 = 0;
michael@0 573 }
michael@0 574
michael@0 575 changelist->n_changes = 0;
michael@0 576
michael@0 577 event_changelist_check(base);
michael@0 578 }
michael@0 579
michael@0 580 void
michael@0 581 event_changelist_freemem(struct event_changelist *changelist)
michael@0 582 {
michael@0 583 if (changelist->changes)
michael@0 584 mm_free(changelist->changes);
michael@0 585 event_changelist_init(changelist); /* zero it all out. */
michael@0 586 }
michael@0 587
michael@0 588 /** Increase the size of 'changelist' to hold more changes. */
michael@0 589 static int
michael@0 590 event_changelist_grow(struct event_changelist *changelist)
michael@0 591 {
michael@0 592 int new_size;
michael@0 593 struct event_change *new_changes;
michael@0 594 if (changelist->changes_size < 64)
michael@0 595 new_size = 64;
michael@0 596 else
michael@0 597 new_size = changelist->changes_size * 2;
michael@0 598
michael@0 599 new_changes = mm_realloc(changelist->changes,
michael@0 600 new_size * sizeof(struct event_change));
michael@0 601
michael@0 602 if (EVUTIL_UNLIKELY(new_changes == NULL))
michael@0 603 return (-1);
michael@0 604
michael@0 605 changelist->changes = new_changes;
michael@0 606 changelist->changes_size = new_size;
michael@0 607
michael@0 608 return (0);
michael@0 609 }
michael@0 610
michael@0 611 /** Return a pointer to the changelist entry for the file descriptor or signal
michael@0 612 * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
michael@0 613 * old_events field to old_events.
michael@0 614 */
michael@0 615 static struct event_change *
michael@0 616 event_changelist_get_or_construct(struct event_changelist *changelist,
michael@0 617 evutil_socket_t fd,
michael@0 618 short old_events,
michael@0 619 struct event_changelist_fdinfo *fdinfo)
michael@0 620 {
michael@0 621 struct event_change *change;
michael@0 622
michael@0 623 if (fdinfo->idxplus1 == 0) {
michael@0 624 int idx;
michael@0 625 EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
michael@0 626
michael@0 627 if (changelist->n_changes == changelist->changes_size) {
michael@0 628 if (event_changelist_grow(changelist) < 0)
michael@0 629 return NULL;
michael@0 630 }
michael@0 631
michael@0 632 idx = changelist->n_changes++;
michael@0 633 change = &changelist->changes[idx];
michael@0 634 fdinfo->idxplus1 = idx + 1;
michael@0 635
michael@0 636 memset(change, 0, sizeof(struct event_change));
michael@0 637 change->fd = fd;
michael@0 638 change->old_events = old_events;
michael@0 639 } else {
michael@0 640 change = &changelist->changes[fdinfo->idxplus1 - 1];
michael@0 641 EVUTIL_ASSERT(change->fd == fd);
michael@0 642 }
michael@0 643 return change;
michael@0 644 }
michael@0 645
michael@0 646 int
michael@0 647 event_changelist_add(struct event_base *base, evutil_socket_t fd, short old, short events,
michael@0 648 void *p)
michael@0 649 {
michael@0 650 struct event_changelist *changelist = &base->changelist;
michael@0 651 struct event_changelist_fdinfo *fdinfo = p;
michael@0 652 struct event_change *change;
michael@0 653
michael@0 654 event_changelist_check(base);
michael@0 655
michael@0 656 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
michael@0 657 if (!change)
michael@0 658 return -1;
michael@0 659
michael@0 660 /* An add replaces any previous delete, but doesn't result in a no-op,
michael@0 661 * since the delete might fail (because the fd had been closed since
michael@0 662 * the last add, for instance. */
michael@0 663
michael@0 664 if (events & (EV_READ|EV_SIGNAL)) {
michael@0 665 change->read_change = EV_CHANGE_ADD |
michael@0 666 (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
michael@0 667 }
michael@0 668 if (events & EV_WRITE) {
michael@0 669 change->write_change = EV_CHANGE_ADD |
michael@0 670 (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
michael@0 671 }
michael@0 672
michael@0 673 event_changelist_check(base);
michael@0 674 return (0);
michael@0 675 }
michael@0 676
michael@0 677 int
michael@0 678 event_changelist_del(struct event_base *base, evutil_socket_t fd, short old, short events,
michael@0 679 void *p)
michael@0 680 {
michael@0 681 struct event_changelist *changelist = &base->changelist;
michael@0 682 struct event_changelist_fdinfo *fdinfo = p;
michael@0 683 struct event_change *change;
michael@0 684
michael@0 685 event_changelist_check(base);
michael@0 686 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
michael@0 687 event_changelist_check(base);
michael@0 688 if (!change)
michael@0 689 return -1;
michael@0 690
michael@0 691 /* A delete removes any previous add, rather than replacing it:
michael@0 692 on those platforms where "add, delete, dispatch" is not the same
michael@0 693 as "no-op, dispatch", we want the no-op behavior.
michael@0 694
michael@0 695 As well as checking the current operation we should also check
michael@0 696 the original set of events to make sure were not ignoring
michael@0 697 the case where the add operation is present on an event that
michael@0 698 was already set.
michael@0 699
michael@0 700 If we have a no-op item, we could remove it it from the list
michael@0 701 entirely, but really there's not much point: skipping the no-op
michael@0 702 change when we do the dispatch later is far cheaper than rejuggling
michael@0 703 the array now.
michael@0 704
michael@0 705 As this stands, it also lets through deletions of events that are
michael@0 706 not currently set.
michael@0 707 */
michael@0 708
michael@0 709 if (events & (EV_READ|EV_SIGNAL)) {
michael@0 710 if (!(change->old_events & (EV_READ | EV_SIGNAL)) &&
michael@0 711 (change->read_change & EV_CHANGE_ADD))
michael@0 712 change->read_change = 0;
michael@0 713 else
michael@0 714 change->read_change = EV_CHANGE_DEL;
michael@0 715 }
michael@0 716 if (events & EV_WRITE) {
michael@0 717 if (!(change->old_events & EV_WRITE) &&
michael@0 718 (change->write_change & EV_CHANGE_ADD))
michael@0 719 change->write_change = 0;
michael@0 720 else
michael@0 721 change->write_change = EV_CHANGE_DEL;
michael@0 722 }
michael@0 723
michael@0 724 event_changelist_check(base);
michael@0 725 return (0);
michael@0 726 }
michael@0 727
michael@0 728 void
michael@0 729 evmap_check_integrity(struct event_base *base)
michael@0 730 {
michael@0 731 #define EVLIST_X_SIGFOUND 0x1000
michael@0 732 #define EVLIST_X_IOFOUND 0x2000
michael@0 733
michael@0 734 evutil_socket_t i;
michael@0 735 struct event *ev;
michael@0 736 struct event_io_map *io = &base->io;
michael@0 737 struct event_signal_map *sigmap = &base->sigmap;
michael@0 738 #ifdef EVMAP_USE_HT
michael@0 739 struct event_map_entry **mapent;
michael@0 740 #endif
michael@0 741 int nsignals, ntimers, nio;
michael@0 742 nsignals = ntimers = nio = 0;
michael@0 743
michael@0 744 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
michael@0 745 EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
michael@0 746 EVUTIL_ASSERT(ev->ev_flags & EVLIST_INIT);
michael@0 747 ev->ev_flags &= ~(EVLIST_X_SIGFOUND|EVLIST_X_IOFOUND);
michael@0 748 }
michael@0 749
michael@0 750 #ifdef EVMAP_USE_HT
michael@0 751 HT_FOREACH(mapent, event_io_map, io) {
michael@0 752 struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
michael@0 753 i = (*mapent)->fd;
michael@0 754 #else
michael@0 755 for (i = 0; i < io->nentries; ++i) {
michael@0 756 struct evmap_io *ctx = io->entries[i];
michael@0 757
michael@0 758 if (!ctx)
michael@0 759 continue;
michael@0 760 #endif
michael@0 761
michael@0 762 TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
michael@0 763 EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_IOFOUND));
michael@0 764 EVUTIL_ASSERT(ev->ev_fd == i);
michael@0 765 ev->ev_flags |= EVLIST_X_IOFOUND;
michael@0 766 nio++;
michael@0 767 }
michael@0 768 }
michael@0 769
michael@0 770 for (i = 0; i < sigmap->nentries; ++i) {
michael@0 771 struct evmap_signal *ctx = sigmap->entries[i];
michael@0 772 if (!ctx)
michael@0 773 continue;
michael@0 774
michael@0 775 TAILQ_FOREACH(ev, &ctx->events, ev_signal_next) {
michael@0 776 EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_SIGFOUND));
michael@0 777 EVUTIL_ASSERT(ev->ev_fd == i);
michael@0 778 ev->ev_flags |= EVLIST_X_SIGFOUND;
michael@0 779 nsignals++;
michael@0 780 }
michael@0 781 }
michael@0 782
michael@0 783 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
michael@0 784 if (ev->ev_events & (EV_READ|EV_WRITE)) {
michael@0 785 EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_IOFOUND);
michael@0 786 --nio;
michael@0 787 }
michael@0 788 if (ev->ev_events & EV_SIGNAL) {
michael@0 789 EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_SIGFOUND);
michael@0 790 --nsignals;
michael@0 791 }
michael@0 792 }
michael@0 793
michael@0 794 EVUTIL_ASSERT(nio == 0);
michael@0 795 EVUTIL_ASSERT(nsignals == 0);
michael@0 796 /* There is no "EVUTIL_ASSERT(ntimers == 0)": eventqueue is only for
michael@0 797 * pending signals and io events.
michael@0 798 */
michael@0 799 }

mercurial