michael@0: /* michael@0: * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson michael@0: * michael@0: * Redistribution and use in source and binary forms, with or without michael@0: * modification, are permitted provided that the following conditions michael@0: * are met: michael@0: * 1. Redistributions of source code must retain the above copyright michael@0: * notice, this list of conditions and the following disclaimer. michael@0: * 2. Redistributions in binary form must reproduce the above copyright michael@0: * notice, this list of conditions and the following disclaimer in the michael@0: * documentation and/or other materials provided with the distribution. michael@0: * 3. The name of the author may not be used to endorse or promote products michael@0: * derived from this software without specific prior written permission. michael@0: * michael@0: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR michael@0: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES michael@0: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. michael@0: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, michael@0: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT michael@0: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, michael@0: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY michael@0: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT michael@0: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF michael@0: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. michael@0: */ michael@0: #include "event2/event-config.h" michael@0: michael@0: #ifdef WIN32 michael@0: #include michael@0: #define WIN32_LEAN_AND_MEAN michael@0: #include michael@0: #undef WIN32_LEAN_AND_MEAN michael@0: #endif michael@0: #include michael@0: #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H) michael@0: #include michael@0: #endif michael@0: #include michael@0: #include michael@0: #include michael@0: #ifndef WIN32 michael@0: #include michael@0: #endif michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: michael@0: #include "event-internal.h" michael@0: #include "evmap-internal.h" michael@0: #include "mm-internal.h" michael@0: #include "changelist-internal.h" michael@0: michael@0: /** An entry for an evmap_io list: notes all the events that want to read or michael@0: write on a given fd, and the number of each. michael@0: */ michael@0: struct evmap_io { michael@0: struct event_list events; michael@0: ev_uint16_t nread; michael@0: ev_uint16_t nwrite; michael@0: }; michael@0: michael@0: /* An entry for an evmap_signal list: notes all the events that want to know michael@0: when a signal triggers. */ michael@0: struct evmap_signal { michael@0: struct event_list events; michael@0: }; michael@0: michael@0: /* On some platforms, fds start at 0 and increment by 1 as they are michael@0: allocated, and old numbers get used. For these platforms, we michael@0: implement io maps just like signal maps: as an array of pointers to michael@0: struct evmap_io. But on other platforms (windows), sockets are not michael@0: 0-indexed, not necessarily consecutive, and not necessarily reused. michael@0: There, we use a hashtable to implement evmap_io. michael@0: */ michael@0: #ifdef EVMAP_USE_HT michael@0: struct event_map_entry { michael@0: HT_ENTRY(event_map_entry) map_node; michael@0: evutil_socket_t fd; michael@0: union { /* This is a union in case we need to make more things that can michael@0: be in the hashtable. */ michael@0: struct evmap_io evmap_io; michael@0: } ent; michael@0: }; michael@0: michael@0: /* Helper used by the event_io_map hashtable code; tries to return a good hash michael@0: * of the fd in e->fd. */ michael@0: static inline unsigned michael@0: hashsocket(struct event_map_entry *e) michael@0: { michael@0: /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to michael@0: * matter. Our hashtable implementation really likes low-order bits, michael@0: * though, so let's do the rotate-and-add trick. */ michael@0: unsigned h = (unsigned) e->fd; michael@0: h += (h >> 2) | (h << 30); michael@0: return h; michael@0: } michael@0: michael@0: /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2 michael@0: * have the same e->fd. */ michael@0: static inline int michael@0: eqsocket(struct event_map_entry *e1, struct event_map_entry *e2) michael@0: { michael@0: return e1->fd == e2->fd; michael@0: } michael@0: michael@0: HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket) michael@0: HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket, michael@0: 0.5, mm_malloc, mm_realloc, mm_free) michael@0: michael@0: #define GET_IO_SLOT(x, map, slot, type) \ michael@0: do { \ michael@0: struct event_map_entry _key, *_ent; \ michael@0: _key.fd = slot; \ michael@0: _ent = HT_FIND(event_io_map, map, &_key); \ michael@0: (x) = _ent ? &_ent->ent.type : NULL; \ michael@0: } while (0); michael@0: michael@0: #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ michael@0: do { \ michael@0: struct event_map_entry _key, *_ent; \ michael@0: _key.fd = slot; \ michael@0: _HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \ michael@0: event_map_entry, &_key, ptr, \ michael@0: { \ michael@0: _ent = *ptr; \ michael@0: }, \ michael@0: { \ michael@0: _ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \ michael@0: if (EVUTIL_UNLIKELY(_ent == NULL)) \ michael@0: return (-1); \ michael@0: _ent->fd = slot; \ michael@0: (ctor)(&_ent->ent.type); \ michael@0: _HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \ michael@0: }); \ michael@0: (x) = &_ent->ent.type; \ michael@0: } while (0) michael@0: michael@0: void evmap_io_initmap(struct event_io_map *ctx) michael@0: { michael@0: HT_INIT(event_io_map, ctx); michael@0: } michael@0: michael@0: void evmap_io_clear(struct event_io_map *ctx) michael@0: { michael@0: struct event_map_entry **ent, **next, *this; michael@0: for (ent = HT_START(event_io_map, ctx); ent; ent = next) { michael@0: this = *ent; michael@0: next = HT_NEXT_RMV(event_io_map, ctx, ent); michael@0: mm_free(this); michael@0: } michael@0: HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */ michael@0: } michael@0: #endif michael@0: michael@0: /* Set the variable 'x' to the field in event_map 'map' with fields of type michael@0: 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL michael@0: if there are no entries for 'slot'. Does no bounds-checking. */ michael@0: #define GET_SIGNAL_SLOT(x, map, slot, type) \ michael@0: (x) = (struct type *)((map)->entries[slot]) michael@0: /* As GET_SLOT, but construct the entry for 'slot' if it is not present, michael@0: by allocating enough memory for a 'struct type', and initializing the new michael@0: value by calling the function 'ctor' on it. Makes the function michael@0: return -1 on allocation failure. michael@0: */ michael@0: #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ michael@0: do { \ michael@0: if ((map)->entries[slot] == NULL) { \ michael@0: (map)->entries[slot] = \ michael@0: mm_calloc(1,sizeof(struct type)+fdinfo_len); \ michael@0: if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \ michael@0: return (-1); \ michael@0: (ctor)((struct type *)(map)->entries[slot]); \ michael@0: } \ michael@0: (x) = (struct type *)((map)->entries[slot]); \ michael@0: } while (0) michael@0: michael@0: /* If we aren't using hashtables, then define the IO_SLOT macros and functions michael@0: as thin aliases over the SIGNAL_SLOT versions. */ michael@0: #ifndef EVMAP_USE_HT michael@0: #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type) michael@0: #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \ michael@0: GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) michael@0: #define FDINFO_OFFSET sizeof(struct evmap_io) michael@0: void michael@0: evmap_io_initmap(struct event_io_map* ctx) michael@0: { michael@0: evmap_signal_initmap(ctx); michael@0: } michael@0: void michael@0: evmap_io_clear(struct event_io_map* ctx) michael@0: { michael@0: evmap_signal_clear(ctx); michael@0: } michael@0: #endif michael@0: michael@0: michael@0: /** Expand 'map' with new entries of width 'msize' until it is big enough michael@0: to store a value in 'slot'. michael@0: */ michael@0: static int michael@0: evmap_make_space(struct event_signal_map *map, int slot, int msize) michael@0: { michael@0: if (map->nentries <= slot) { michael@0: int nentries = map->nentries ? map->nentries : 32; michael@0: void **tmp; michael@0: michael@0: while (nentries <= slot) michael@0: nentries <<= 1; michael@0: michael@0: tmp = (void **)mm_realloc(map->entries, nentries * msize); michael@0: if (tmp == NULL) michael@0: return (-1); michael@0: michael@0: memset(&tmp[map->nentries], 0, michael@0: (nentries - map->nentries) * msize); michael@0: michael@0: map->nentries = nentries; michael@0: map->entries = tmp; michael@0: } michael@0: michael@0: return (0); michael@0: } michael@0: michael@0: void michael@0: evmap_signal_initmap(struct event_signal_map *ctx) michael@0: { michael@0: ctx->nentries = 0; michael@0: ctx->entries = NULL; michael@0: } michael@0: michael@0: void michael@0: evmap_signal_clear(struct event_signal_map *ctx) michael@0: { michael@0: if (ctx->entries != NULL) { michael@0: int i; michael@0: for (i = 0; i < ctx->nentries; ++i) { michael@0: if (ctx->entries[i] != NULL) michael@0: mm_free(ctx->entries[i]); michael@0: } michael@0: mm_free(ctx->entries); michael@0: ctx->entries = NULL; michael@0: } michael@0: ctx->nentries = 0; michael@0: } michael@0: michael@0: michael@0: /* code specific to file descriptors */ michael@0: michael@0: /** Constructor for struct evmap_io */ michael@0: static void michael@0: evmap_io_init(struct evmap_io *entry) michael@0: { michael@0: TAILQ_INIT(&entry->events); michael@0: entry->nread = 0; michael@0: entry->nwrite = 0; michael@0: } michael@0: michael@0: michael@0: /* return -1 on error, 0 on success if nothing changed in the event backend, michael@0: * and 1 on success if something did. */ michael@0: int michael@0: evmap_io_add(struct event_base *base, evutil_socket_t fd, struct event *ev) michael@0: { michael@0: const struct eventop *evsel = base->evsel; michael@0: struct event_io_map *io = &base->io; michael@0: struct evmap_io *ctx = NULL; michael@0: int nread, nwrite, retval = 0; michael@0: short res = 0, old = 0; michael@0: struct event *old_ev; michael@0: michael@0: EVUTIL_ASSERT(fd == ev->ev_fd); michael@0: michael@0: if (fd < 0) michael@0: return 0; michael@0: michael@0: #ifndef EVMAP_USE_HT michael@0: if (fd >= io->nentries) { michael@0: if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1) michael@0: return (-1); michael@0: } michael@0: #endif michael@0: GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init, michael@0: evsel->fdinfo_len); michael@0: michael@0: nread = ctx->nread; michael@0: nwrite = ctx->nwrite; michael@0: michael@0: if (nread) michael@0: old |= EV_READ; michael@0: if (nwrite) michael@0: old |= EV_WRITE; michael@0: michael@0: if (ev->ev_events & EV_READ) { michael@0: if (++nread == 1) michael@0: res |= EV_READ; michael@0: } michael@0: if (ev->ev_events & EV_WRITE) { michael@0: if (++nwrite == 1) michael@0: res |= EV_WRITE; michael@0: } michael@0: if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff)) { michael@0: event_warnx("Too many events reading or writing on fd %d", michael@0: (int)fd); michael@0: return -1; michael@0: } michael@0: if (EVENT_DEBUG_MODE_IS_ON() && michael@0: (old_ev = TAILQ_FIRST(&ctx->events)) && michael@0: (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) { michael@0: event_warnx("Tried to mix edge-triggered and non-edge-triggered" michael@0: " events on fd %d", (int)fd); michael@0: return -1; michael@0: } michael@0: michael@0: if (res) { michael@0: void *extra = ((char*)ctx) + sizeof(struct evmap_io); michael@0: /* XXX(niels): we cannot mix edge-triggered and michael@0: * level-triggered, we should probably assert on michael@0: * this. */ michael@0: if (evsel->add(base, ev->ev_fd, michael@0: old, (ev->ev_events & EV_ET) | res, extra) == -1) michael@0: return (-1); michael@0: retval = 1; michael@0: } michael@0: michael@0: ctx->nread = (ev_uint16_t) nread; michael@0: ctx->nwrite = (ev_uint16_t) nwrite; michael@0: TAILQ_INSERT_TAIL(&ctx->events, ev, ev_io_next); michael@0: michael@0: return (retval); michael@0: } michael@0: michael@0: /* return -1 on error, 0 on success if nothing changed in the event backend, michael@0: * and 1 on success if something did. */ michael@0: int michael@0: evmap_io_del(struct event_base *base, evutil_socket_t fd, struct event *ev) michael@0: { michael@0: const struct eventop *evsel = base->evsel; michael@0: struct event_io_map *io = &base->io; michael@0: struct evmap_io *ctx; michael@0: int nread, nwrite, retval = 0; michael@0: short res = 0, old = 0; michael@0: michael@0: if (fd < 0) michael@0: return 0; michael@0: michael@0: EVUTIL_ASSERT(fd == ev->ev_fd); michael@0: michael@0: #ifndef EVMAP_USE_HT michael@0: if (fd >= io->nentries) michael@0: return (-1); michael@0: #endif michael@0: michael@0: GET_IO_SLOT(ctx, io, fd, evmap_io); michael@0: michael@0: nread = ctx->nread; michael@0: nwrite = ctx->nwrite; michael@0: michael@0: if (nread) michael@0: old |= EV_READ; michael@0: if (nwrite) michael@0: old |= EV_WRITE; michael@0: michael@0: if (ev->ev_events & EV_READ) { michael@0: if (--nread == 0) michael@0: res |= EV_READ; michael@0: EVUTIL_ASSERT(nread >= 0); michael@0: } michael@0: if (ev->ev_events & EV_WRITE) { michael@0: if (--nwrite == 0) michael@0: res |= EV_WRITE; michael@0: EVUTIL_ASSERT(nwrite >= 0); michael@0: } michael@0: michael@0: if (res) { michael@0: void *extra = ((char*)ctx) + sizeof(struct evmap_io); michael@0: if (evsel->del(base, ev->ev_fd, old, res, extra) == -1) michael@0: return (-1); michael@0: retval = 1; michael@0: } michael@0: michael@0: ctx->nread = nread; michael@0: ctx->nwrite = nwrite; michael@0: TAILQ_REMOVE(&ctx->events, ev, ev_io_next); michael@0: michael@0: return (retval); michael@0: } michael@0: michael@0: void michael@0: evmap_io_active(struct event_base *base, evutil_socket_t fd, short events) michael@0: { michael@0: struct event_io_map *io = &base->io; michael@0: struct evmap_io *ctx; michael@0: struct event *ev; michael@0: michael@0: #ifndef EVMAP_USE_HT michael@0: EVUTIL_ASSERT(fd < io->nentries); michael@0: #endif michael@0: GET_IO_SLOT(ctx, io, fd, evmap_io); michael@0: michael@0: EVUTIL_ASSERT(ctx); michael@0: TAILQ_FOREACH(ev, &ctx->events, ev_io_next) { michael@0: if (ev->ev_events & events) michael@0: event_active_nolock(ev, ev->ev_events & events, 1); michael@0: } michael@0: } michael@0: michael@0: /* code specific to signals */ michael@0: michael@0: static void michael@0: evmap_signal_init(struct evmap_signal *entry) michael@0: { michael@0: TAILQ_INIT(&entry->events); michael@0: } michael@0: michael@0: michael@0: int michael@0: evmap_signal_add(struct event_base *base, int sig, struct event *ev) michael@0: { michael@0: const struct eventop *evsel = base->evsigsel; michael@0: struct event_signal_map *map = &base->sigmap; michael@0: struct evmap_signal *ctx = NULL; michael@0: michael@0: if (sig >= map->nentries) { michael@0: if (evmap_make_space( michael@0: map, sig, sizeof(struct evmap_signal *)) == -1) michael@0: return (-1); michael@0: } michael@0: GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init, michael@0: base->evsigsel->fdinfo_len); michael@0: michael@0: if (TAILQ_EMPTY(&ctx->events)) { michael@0: if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL) michael@0: == -1) michael@0: return (-1); michael@0: } michael@0: michael@0: TAILQ_INSERT_TAIL(&ctx->events, ev, ev_signal_next); michael@0: michael@0: return (1); michael@0: } michael@0: michael@0: int michael@0: evmap_signal_del(struct event_base *base, int sig, struct event *ev) michael@0: { michael@0: const struct eventop *evsel = base->evsigsel; michael@0: struct event_signal_map *map = &base->sigmap; michael@0: struct evmap_signal *ctx; michael@0: michael@0: if (sig >= map->nentries) michael@0: return (-1); michael@0: michael@0: GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); michael@0: michael@0: if (TAILQ_FIRST(&ctx->events) == TAILQ_LAST(&ctx->events, event_list)) { michael@0: if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1) michael@0: return (-1); michael@0: } michael@0: michael@0: TAILQ_REMOVE(&ctx->events, ev, ev_signal_next); michael@0: michael@0: return (1); michael@0: } michael@0: michael@0: void michael@0: evmap_signal_active(struct event_base *base, evutil_socket_t sig, int ncalls) michael@0: { michael@0: struct event_signal_map *map = &base->sigmap; michael@0: struct evmap_signal *ctx; michael@0: struct event *ev; michael@0: michael@0: EVUTIL_ASSERT(sig < map->nentries); michael@0: GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); michael@0: michael@0: TAILQ_FOREACH(ev, &ctx->events, ev_signal_next) michael@0: event_active_nolock(ev, EV_SIGNAL, ncalls); michael@0: } michael@0: michael@0: void * michael@0: evmap_io_get_fdinfo(struct event_io_map *map, evutil_socket_t fd) michael@0: { michael@0: struct evmap_io *ctx; michael@0: GET_IO_SLOT(ctx, map, fd, evmap_io); michael@0: if (ctx) michael@0: return ((char*)ctx) + sizeof(struct evmap_io); michael@0: else michael@0: return NULL; michael@0: } michael@0: michael@0: /** Per-fd structure for use with changelists. It keeps track, for each fd or michael@0: * signal using the changelist, of where its entry in the changelist is. michael@0: */ michael@0: struct event_changelist_fdinfo { michael@0: int idxplus1; /* this is the index +1, so that memset(0) will make it michael@0: * a no-such-element */ michael@0: }; michael@0: michael@0: void michael@0: event_changelist_init(struct event_changelist *changelist) michael@0: { michael@0: changelist->changes = NULL; michael@0: changelist->changes_size = 0; michael@0: changelist->n_changes = 0; michael@0: } michael@0: michael@0: /** Helper: return the changelist_fdinfo corresponding to a given change. */ michael@0: static inline struct event_changelist_fdinfo * michael@0: event_change_get_fdinfo(struct event_base *base, michael@0: const struct event_change *change) michael@0: { michael@0: char *ptr; michael@0: if (change->read_change & EV_CHANGE_SIGNAL) { michael@0: struct evmap_signal *ctx; michael@0: GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal); michael@0: ptr = ((char*)ctx) + sizeof(struct evmap_signal); michael@0: } else { michael@0: struct evmap_io *ctx; michael@0: GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io); michael@0: ptr = ((char*)ctx) + sizeof(struct evmap_io); michael@0: } michael@0: return (void*)ptr; michael@0: } michael@0: michael@0: #ifdef DEBUG_CHANGELIST michael@0: /** Make sure that the changelist is consistent with the evmap structures. */ michael@0: static void michael@0: event_changelist_check(struct event_base *base) michael@0: { michael@0: int i; michael@0: struct event_changelist *changelist = &base->changelist; michael@0: michael@0: EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes); michael@0: for (i = 0; i < changelist->n_changes; ++i) { michael@0: struct event_change *c = &changelist->changes[i]; michael@0: struct event_changelist_fdinfo *f; michael@0: EVUTIL_ASSERT(c->fd >= 0); michael@0: f = event_change_get_fdinfo(base, c); michael@0: EVUTIL_ASSERT(f); michael@0: EVUTIL_ASSERT(f->idxplus1 == i + 1); michael@0: } michael@0: michael@0: for (i = 0; i < base->io.nentries; ++i) { michael@0: struct evmap_io *io = base->io.entries[i]; michael@0: struct event_changelist_fdinfo *f; michael@0: if (!io) michael@0: continue; michael@0: f = (void*) michael@0: ( ((char*)io) + sizeof(struct evmap_io) ); michael@0: if (f->idxplus1) { michael@0: struct event_change *c = &changelist->changes[f->idxplus1 - 1]; michael@0: EVUTIL_ASSERT(c->fd == i); michael@0: } michael@0: } michael@0: } michael@0: #else michael@0: #define event_changelist_check(base) ((void)0) michael@0: #endif michael@0: michael@0: void michael@0: event_changelist_remove_all(struct event_changelist *changelist, michael@0: struct event_base *base) michael@0: { michael@0: int i; michael@0: michael@0: event_changelist_check(base); michael@0: michael@0: for (i = 0; i < changelist->n_changes; ++i) { michael@0: struct event_change *ch = &changelist->changes[i]; michael@0: struct event_changelist_fdinfo *fdinfo = michael@0: event_change_get_fdinfo(base, ch); michael@0: EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1); michael@0: fdinfo->idxplus1 = 0; michael@0: } michael@0: michael@0: changelist->n_changes = 0; michael@0: michael@0: event_changelist_check(base); michael@0: } michael@0: michael@0: void michael@0: event_changelist_freemem(struct event_changelist *changelist) michael@0: { michael@0: if (changelist->changes) michael@0: mm_free(changelist->changes); michael@0: event_changelist_init(changelist); /* zero it all out. */ michael@0: } michael@0: michael@0: /** Increase the size of 'changelist' to hold more changes. */ michael@0: static int michael@0: event_changelist_grow(struct event_changelist *changelist) michael@0: { michael@0: int new_size; michael@0: struct event_change *new_changes; michael@0: if (changelist->changes_size < 64) michael@0: new_size = 64; michael@0: else michael@0: new_size = changelist->changes_size * 2; michael@0: michael@0: new_changes = mm_realloc(changelist->changes, michael@0: new_size * sizeof(struct event_change)); michael@0: michael@0: if (EVUTIL_UNLIKELY(new_changes == NULL)) michael@0: return (-1); michael@0: michael@0: changelist->changes = new_changes; michael@0: changelist->changes_size = new_size; michael@0: michael@0: return (0); michael@0: } michael@0: michael@0: /** Return a pointer to the changelist entry for the file descriptor or signal michael@0: * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its michael@0: * old_events field to old_events. michael@0: */ michael@0: static struct event_change * michael@0: event_changelist_get_or_construct(struct event_changelist *changelist, michael@0: evutil_socket_t fd, michael@0: short old_events, michael@0: struct event_changelist_fdinfo *fdinfo) michael@0: { michael@0: struct event_change *change; michael@0: michael@0: if (fdinfo->idxplus1 == 0) { michael@0: int idx; michael@0: EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size); michael@0: michael@0: if (changelist->n_changes == changelist->changes_size) { michael@0: if (event_changelist_grow(changelist) < 0) michael@0: return NULL; michael@0: } michael@0: michael@0: idx = changelist->n_changes++; michael@0: change = &changelist->changes[idx]; michael@0: fdinfo->idxplus1 = idx + 1; michael@0: michael@0: memset(change, 0, sizeof(struct event_change)); michael@0: change->fd = fd; michael@0: change->old_events = old_events; michael@0: } else { michael@0: change = &changelist->changes[fdinfo->idxplus1 - 1]; michael@0: EVUTIL_ASSERT(change->fd == fd); michael@0: } michael@0: return change; michael@0: } michael@0: michael@0: int michael@0: event_changelist_add(struct event_base *base, evutil_socket_t fd, short old, short events, michael@0: void *p) michael@0: { michael@0: struct event_changelist *changelist = &base->changelist; michael@0: struct event_changelist_fdinfo *fdinfo = p; michael@0: struct event_change *change; michael@0: michael@0: event_changelist_check(base); michael@0: michael@0: change = event_changelist_get_or_construct(changelist, fd, old, fdinfo); michael@0: if (!change) michael@0: return -1; michael@0: michael@0: /* An add replaces any previous delete, but doesn't result in a no-op, michael@0: * since the delete might fail (because the fd had been closed since michael@0: * the last add, for instance. */ michael@0: michael@0: if (events & (EV_READ|EV_SIGNAL)) { michael@0: change->read_change = EV_CHANGE_ADD | michael@0: (events & (EV_ET|EV_PERSIST|EV_SIGNAL)); michael@0: } michael@0: if (events & EV_WRITE) { michael@0: change->write_change = EV_CHANGE_ADD | michael@0: (events & (EV_ET|EV_PERSIST|EV_SIGNAL)); michael@0: } michael@0: michael@0: event_changelist_check(base); michael@0: return (0); michael@0: } michael@0: michael@0: int michael@0: event_changelist_del(struct event_base *base, evutil_socket_t fd, short old, short events, michael@0: void *p) michael@0: { michael@0: struct event_changelist *changelist = &base->changelist; michael@0: struct event_changelist_fdinfo *fdinfo = p; michael@0: struct event_change *change; michael@0: michael@0: event_changelist_check(base); michael@0: change = event_changelist_get_or_construct(changelist, fd, old, fdinfo); michael@0: event_changelist_check(base); michael@0: if (!change) michael@0: return -1; michael@0: michael@0: /* A delete removes any previous add, rather than replacing it: michael@0: on those platforms where "add, delete, dispatch" is not the same michael@0: as "no-op, dispatch", we want the no-op behavior. michael@0: michael@0: As well as checking the current operation we should also check michael@0: the original set of events to make sure were not ignoring michael@0: the case where the add operation is present on an event that michael@0: was already set. michael@0: michael@0: If we have a no-op item, we could remove it it from the list michael@0: entirely, but really there's not much point: skipping the no-op michael@0: change when we do the dispatch later is far cheaper than rejuggling michael@0: the array now. michael@0: michael@0: As this stands, it also lets through deletions of events that are michael@0: not currently set. michael@0: */ michael@0: michael@0: if (events & (EV_READ|EV_SIGNAL)) { michael@0: if (!(change->old_events & (EV_READ | EV_SIGNAL)) && michael@0: (change->read_change & EV_CHANGE_ADD)) michael@0: change->read_change = 0; michael@0: else michael@0: change->read_change = EV_CHANGE_DEL; michael@0: } michael@0: if (events & EV_WRITE) { michael@0: if (!(change->old_events & EV_WRITE) && michael@0: (change->write_change & EV_CHANGE_ADD)) michael@0: change->write_change = 0; michael@0: else michael@0: change->write_change = EV_CHANGE_DEL; michael@0: } michael@0: michael@0: event_changelist_check(base); michael@0: return (0); michael@0: } michael@0: michael@0: void michael@0: evmap_check_integrity(struct event_base *base) michael@0: { michael@0: #define EVLIST_X_SIGFOUND 0x1000 michael@0: #define EVLIST_X_IOFOUND 0x2000 michael@0: michael@0: evutil_socket_t i; michael@0: struct event *ev; michael@0: struct event_io_map *io = &base->io; michael@0: struct event_signal_map *sigmap = &base->sigmap; michael@0: #ifdef EVMAP_USE_HT michael@0: struct event_map_entry **mapent; michael@0: #endif michael@0: int nsignals, ntimers, nio; michael@0: nsignals = ntimers = nio = 0; michael@0: michael@0: TAILQ_FOREACH(ev, &base->eventqueue, ev_next) { michael@0: EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED); michael@0: EVUTIL_ASSERT(ev->ev_flags & EVLIST_INIT); michael@0: ev->ev_flags &= ~(EVLIST_X_SIGFOUND|EVLIST_X_IOFOUND); michael@0: } michael@0: michael@0: #ifdef EVMAP_USE_HT michael@0: HT_FOREACH(mapent, event_io_map, io) { michael@0: struct evmap_io *ctx = &(*mapent)->ent.evmap_io; michael@0: i = (*mapent)->fd; michael@0: #else michael@0: for (i = 0; i < io->nentries; ++i) { michael@0: struct evmap_io *ctx = io->entries[i]; michael@0: michael@0: if (!ctx) michael@0: continue; michael@0: #endif michael@0: michael@0: TAILQ_FOREACH(ev, &ctx->events, ev_io_next) { michael@0: EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_IOFOUND)); michael@0: EVUTIL_ASSERT(ev->ev_fd == i); michael@0: ev->ev_flags |= EVLIST_X_IOFOUND; michael@0: nio++; michael@0: } michael@0: } michael@0: michael@0: for (i = 0; i < sigmap->nentries; ++i) { michael@0: struct evmap_signal *ctx = sigmap->entries[i]; michael@0: if (!ctx) michael@0: continue; michael@0: michael@0: TAILQ_FOREACH(ev, &ctx->events, ev_signal_next) { michael@0: EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_SIGFOUND)); michael@0: EVUTIL_ASSERT(ev->ev_fd == i); michael@0: ev->ev_flags |= EVLIST_X_SIGFOUND; michael@0: nsignals++; michael@0: } michael@0: } michael@0: michael@0: TAILQ_FOREACH(ev, &base->eventqueue, ev_next) { michael@0: if (ev->ev_events & (EV_READ|EV_WRITE)) { michael@0: EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_IOFOUND); michael@0: --nio; michael@0: } michael@0: if (ev->ev_events & EV_SIGNAL) { michael@0: EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_SIGFOUND); michael@0: --nsignals; michael@0: } michael@0: } michael@0: michael@0: EVUTIL_ASSERT(nio == 0); michael@0: EVUTIL_ASSERT(nsignals == 0); michael@0: /* There is no "EVUTIL_ASSERT(ntimers == 0)": eventqueue is only for michael@0: * pending signals and io events. michael@0: */ michael@0: }