michael@0: /* michael@0: * Copyright (c) 2000-2007 Niels Provos michael@0: * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson michael@0: * michael@0: * Redistribution and use in source and binary forms, with or without michael@0: * modification, are permitted provided that the following conditions michael@0: * are met: michael@0: * 1. Redistributions of source code must retain the above copyright michael@0: * notice, this list of conditions and the following disclaimer. michael@0: * 2. Redistributions in binary form must reproduce the above copyright michael@0: * notice, this list of conditions and the following disclaimer in the michael@0: * documentation and/or other materials provided with the distribution. michael@0: * 3. The name of the author may not be used to endorse or promote products michael@0: * derived from this software without specific prior written permission. michael@0: * michael@0: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR michael@0: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES michael@0: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. michael@0: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, michael@0: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT michael@0: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, michael@0: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY michael@0: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT michael@0: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF michael@0: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. michael@0: */ michael@0: #ifndef _EVENT_INTERNAL_H_ michael@0: #define _EVENT_INTERNAL_H_ michael@0: michael@0: #ifdef __cplusplus michael@0: extern "C" { michael@0: #endif michael@0: michael@0: #include "event2/event-config.h" michael@0: #include michael@0: #include michael@0: #include "event2/event_struct.h" michael@0: #include "minheap-internal.h" michael@0: #include "evsignal-internal.h" michael@0: #include "mm-internal.h" michael@0: #include "defer-internal.h" michael@0: michael@0: /* map union members back */ michael@0: michael@0: /* mutually exclusive */ michael@0: #define ev_signal_next _ev.ev_signal.ev_signal_next michael@0: #define ev_io_next _ev.ev_io.ev_io_next michael@0: #define ev_io_timeout _ev.ev_io.ev_timeout michael@0: michael@0: /* used only by signals */ michael@0: #define ev_ncalls _ev.ev_signal.ev_ncalls michael@0: #define ev_pncalls _ev.ev_signal.ev_pncalls michael@0: michael@0: /* Possible values for ev_closure in struct event. */ michael@0: #define EV_CLOSURE_NONE 0 michael@0: #define EV_CLOSURE_SIGNAL 1 michael@0: #define EV_CLOSURE_PERSIST 2 michael@0: michael@0: /** Structure to define the backend of a given event_base. */ michael@0: struct eventop { michael@0: /** The name of this backend. */ michael@0: const char *name; michael@0: /** Function to set up an event_base to use this backend. It should michael@0: * create a new structure holding whatever information is needed to michael@0: * run the backend, and return it. The returned pointer will get michael@0: * stored by event_init into the event_base.evbase field. On failure, michael@0: * this function should return NULL. */ michael@0: void *(*init)(struct event_base *); michael@0: /** Enable reading/writing on a given fd or signal. 'events' will be michael@0: * the events that we're trying to enable: one or more of EV_READ, michael@0: * EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that michael@0: * were enabled on this fd previously. 'fdinfo' will be a structure michael@0: * associated with the fd by the evmap; its size is defined by the michael@0: * fdinfo field below. It will be set to 0 the first time the fd is michael@0: * added. The function should return 0 on success and -1 on error. michael@0: */ michael@0: int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo); michael@0: /** As "add", except 'events' contains the events we mean to disable. */ michael@0: int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo); michael@0: /** Function to implement the core of an event loop. It must see which michael@0: added events are ready, and cause event_active to be called for each michael@0: active event (usually via event_io_active or such). It should michael@0: return 0 on success and -1 on error. michael@0: */ michael@0: int (*dispatch)(struct event_base *, struct timeval *); michael@0: /** Function to clean up and free our data from the event_base. */ michael@0: void (*dealloc)(struct event_base *); michael@0: /** Flag: set if we need to reinitialize the event base after we fork. michael@0: */ michael@0: int need_reinit; michael@0: /** Bit-array of supported event_method_features that this backend can michael@0: * provide. */ michael@0: enum event_method_feature features; michael@0: /** Length of the extra information we should record for each fd that michael@0: has one or more active events. This information is recorded michael@0: as part of the evmap entry for each fd, and passed as an argument michael@0: to the add and del functions above. michael@0: */ michael@0: size_t fdinfo_len; michael@0: }; michael@0: michael@0: #ifdef WIN32 michael@0: /* If we're on win32, then file descriptors are not nice low densely packed michael@0: integers. Instead, they are pointer-like windows handles, and we want to michael@0: use a hashtable instead of an array to map fds to events. michael@0: */ michael@0: #define EVMAP_USE_HT michael@0: #endif michael@0: michael@0: /* #define HT_CACHE_HASH_VALS */ michael@0: michael@0: #ifdef EVMAP_USE_HT michael@0: #include "ht-internal.h" michael@0: struct event_map_entry; michael@0: HT_HEAD(event_io_map, event_map_entry); michael@0: #else michael@0: #define event_io_map event_signal_map michael@0: #endif michael@0: michael@0: /* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not michael@0: defined, this structure is also used as event_io_map, which maps fds to a michael@0: list of events. michael@0: */ michael@0: struct event_signal_map { michael@0: /* An array of evmap_io * or of evmap_signal *; empty entries are michael@0: * set to NULL. */ michael@0: void **entries; michael@0: /* The number of entries available in entries */ michael@0: int nentries; michael@0: }; michael@0: michael@0: /* A list of events waiting on a given 'common' timeout value. Ordinarily, michael@0: * events waiting for a timeout wait on a minheap. Sometimes, however, a michael@0: * queue can be faster. michael@0: **/ michael@0: struct common_timeout_list { michael@0: /* List of events currently waiting in the queue. */ michael@0: struct event_list events; michael@0: /* 'magic' timeval used to indicate the duration of events in this michael@0: * queue. */ michael@0: struct timeval duration; michael@0: /* Event that triggers whenever one of the events in the queue is michael@0: * ready to activate */ michael@0: struct event timeout_event; michael@0: /* The event_base that this timeout list is part of */ michael@0: struct event_base *base; michael@0: }; michael@0: michael@0: /** Mask used to get the real tv_usec value from a common timeout. */ michael@0: #define COMMON_TIMEOUT_MICROSECONDS_MASK 0x000fffff michael@0: michael@0: struct event_change; michael@0: michael@0: /* List of 'changes' since the last call to eventop.dispatch. Only maintained michael@0: * if the backend is using changesets. */ michael@0: struct event_changelist { michael@0: struct event_change *changes; michael@0: int n_changes; michael@0: int changes_size; michael@0: }; michael@0: michael@0: #ifndef _EVENT_DISABLE_DEBUG_MODE michael@0: /* Global internal flag: set to one if debug mode is on. */ michael@0: extern int _event_debug_mode_on; michael@0: #define EVENT_DEBUG_MODE_IS_ON() (_event_debug_mode_on) michael@0: #else michael@0: #define EVENT_DEBUG_MODE_IS_ON() (0) michael@0: #endif michael@0: michael@0: struct event_base { michael@0: /** Function pointers and other data to describe this event_base's michael@0: * backend. */ michael@0: const struct eventop *evsel; michael@0: /** Pointer to backend-specific data. */ michael@0: void *evbase; michael@0: michael@0: /** List of changes to tell backend about at next dispatch. Only used michael@0: * by the O(1) backends. */ michael@0: struct event_changelist changelist; michael@0: michael@0: /** Function pointers used to describe the backend that this event_base michael@0: * uses for signals */ michael@0: const struct eventop *evsigsel; michael@0: /** Data to implement the common signal handelr code. */ michael@0: struct evsig_info sig; michael@0: michael@0: /** Number of virtual events */ michael@0: int virtual_event_count; michael@0: /** Number of total events added to this event_base */ michael@0: int event_count; michael@0: /** Number of total events active in this event_base */ michael@0: int event_count_active; michael@0: michael@0: /** Set if we should terminate the loop once we're done processing michael@0: * events. */ michael@0: int event_gotterm; michael@0: /** Set if we should terminate the loop immediately */ michael@0: int event_break; michael@0: /** Set if we should start a new instance of the loop immediately. */ michael@0: int event_continue; michael@0: michael@0: /** The currently running priority of events */ michael@0: int event_running_priority; michael@0: michael@0: /** Set if we're running the event_base_loop function, to prevent michael@0: * reentrant invocation. */ michael@0: int running_loop; michael@0: michael@0: /* Active event management. */ michael@0: /** An array of nactivequeues queues for active events (ones that michael@0: * have triggered, and whose callbacks need to be called). Low michael@0: * priority numbers are more important, and stall higher ones. michael@0: */ michael@0: struct event_list *activequeues; michael@0: /** The length of the activequeues array */ michael@0: int nactivequeues; michael@0: michael@0: /* common timeout logic */ michael@0: michael@0: /** An array of common_timeout_list* for all of the common timeout michael@0: * values we know. */ michael@0: struct common_timeout_list **common_timeout_queues; michael@0: /** The number of entries used in common_timeout_queues */ michael@0: int n_common_timeouts; michael@0: /** The total size of common_timeout_queues. */ michael@0: int n_common_timeouts_allocated; michael@0: michael@0: /** List of defered_cb that are active. We run these after the active michael@0: * events. */ michael@0: struct deferred_cb_queue defer_queue; michael@0: michael@0: /** Mapping from file descriptors to enabled (added) events */ michael@0: struct event_io_map io; michael@0: michael@0: /** Mapping from signal numbers to enabled (added) events. */ michael@0: struct event_signal_map sigmap; michael@0: michael@0: /** All events that have been enabled (added) in this event_base */ michael@0: struct event_list eventqueue; michael@0: michael@0: /** Stored timeval; used to detect when time is running backwards. */ michael@0: struct timeval event_tv; michael@0: michael@0: /** Priority queue of events with timeouts. */ michael@0: struct min_heap timeheap; michael@0: michael@0: /** Stored timeval: used to avoid calling gettimeofday/clock_gettime michael@0: * too often. */ michael@0: struct timeval tv_cache; michael@0: michael@0: #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) michael@0: /** Difference between internal time (maybe from clock_gettime) and michael@0: * gettimeofday. */ michael@0: struct timeval tv_clock_diff; michael@0: /** Second in which we last updated tv_clock_diff, in monotonic time. */ michael@0: time_t last_updated_clock_diff; michael@0: #endif michael@0: michael@0: #ifndef _EVENT_DISABLE_THREAD_SUPPORT michael@0: /* threading support */ michael@0: /** The thread currently running the event_loop for this base */ michael@0: unsigned long th_owner_id; michael@0: /** A lock to prevent conflicting accesses to this event_base */ michael@0: void *th_base_lock; michael@0: /** The event whose callback is executing right now */ michael@0: struct event *current_event; michael@0: /** A condition that gets signalled when we're done processing an michael@0: * event with waiters on it. */ michael@0: void *current_event_cond; michael@0: /** Number of threads blocking on current_event_cond. */ michael@0: int current_event_waiters; michael@0: #endif michael@0: michael@0: #ifdef WIN32 michael@0: /** IOCP support structure, if IOCP is enabled. */ michael@0: struct event_iocp_port *iocp; michael@0: #endif michael@0: michael@0: /** Flags that this base was configured with */ michael@0: enum event_base_config_flag flags; michael@0: michael@0: /* Notify main thread to wake up break, etc. */ michael@0: /** True if the base already has a pending notify, and we don't need michael@0: * to add any more. */ michael@0: int is_notify_pending; michael@0: /** A socketpair used by some th_notify functions to wake up the main michael@0: * thread. */ michael@0: evutil_socket_t th_notify_fd[2]; michael@0: /** An event used by some th_notify functions to wake up the main michael@0: * thread. */ michael@0: struct event th_notify; michael@0: /** A function used to wake up the main thread from another thread. */ michael@0: int (*th_notify_fn)(struct event_base *base); michael@0: }; michael@0: michael@0: struct event_config_entry { michael@0: TAILQ_ENTRY(event_config_entry) next; michael@0: michael@0: const char *avoid_method; michael@0: }; michael@0: michael@0: /** Internal structure: describes the configuration we want for an event_base michael@0: * that we're about to allocate. */ michael@0: struct event_config { michael@0: TAILQ_HEAD(event_configq, event_config_entry) entries; michael@0: michael@0: int n_cpus_hint; michael@0: enum event_method_feature require_features; michael@0: enum event_base_config_flag flags; michael@0: }; michael@0: michael@0: /* Internal use only: Functions that might be missing from */ michael@0: #if defined(_EVENT_HAVE_SYS_QUEUE_H) && !defined(_EVENT_HAVE_TAILQFOREACH) michael@0: #ifndef TAILQ_FIRST michael@0: #define TAILQ_FIRST(head) ((head)->tqh_first) michael@0: #endif michael@0: #ifndef TAILQ_END michael@0: #define TAILQ_END(head) NULL michael@0: #endif michael@0: #ifndef TAILQ_NEXT michael@0: #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) michael@0: #endif michael@0: michael@0: #ifndef TAILQ_FOREACH michael@0: #define TAILQ_FOREACH(var, head, field) \ michael@0: for ((var) = TAILQ_FIRST(head); \ michael@0: (var) != TAILQ_END(head); \ michael@0: (var) = TAILQ_NEXT(var, field)) michael@0: #endif michael@0: michael@0: #ifndef TAILQ_INSERT_BEFORE michael@0: #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ michael@0: (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ michael@0: (elm)->field.tqe_next = (listelm); \ michael@0: *(listelm)->field.tqe_prev = (elm); \ michael@0: (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ michael@0: } while (0) michael@0: #endif michael@0: #endif /* TAILQ_FOREACH */ michael@0: michael@0: #define N_ACTIVE_CALLBACKS(base) \ michael@0: ((base)->event_count_active + (base)->defer_queue.active_count) michael@0: michael@0: int _evsig_set_handler(struct event_base *base, int evsignal, michael@0: void (*fn)(int)); michael@0: int _evsig_restore_handler(struct event_base *base, int evsignal); michael@0: michael@0: michael@0: void event_active_nolock(struct event *ev, int res, short count); michael@0: michael@0: /* FIXME document. */ michael@0: void event_base_add_virtual(struct event_base *base); michael@0: void event_base_del_virtual(struct event_base *base); michael@0: michael@0: /** For debugging: unless assertions are disabled, verify the referential michael@0: integrity of the internal data structures of 'base'. This operation can michael@0: be expensive. michael@0: michael@0: Returns on success; aborts on failure. michael@0: */ michael@0: void event_base_assert_ok(struct event_base *base); michael@0: michael@0: #ifdef __cplusplus michael@0: } michael@0: #endif michael@0: michael@0: #endif /* _EVENT_INTERNAL_H_ */ michael@0: