Thu, 22 Jan 2015 13:21:57 +0100
Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6
michael@0 | 1 | /* |
michael@0 | 2 | * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu> |
michael@0 | 3 | * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson |
michael@0 | 4 | * |
michael@0 | 5 | * Redistribution and use in source and binary forms, with or without |
michael@0 | 6 | * modification, are permitted provided that the following conditions |
michael@0 | 7 | * are met: |
michael@0 | 8 | * 1. Redistributions of source code must retain the above copyright |
michael@0 | 9 | * notice, this list of conditions and the following disclaimer. |
michael@0 | 10 | * 2. Redistributions in binary form must reproduce the above copyright |
michael@0 | 11 | * notice, this list of conditions and the following disclaimer in the |
michael@0 | 12 | * documentation and/or other materials provided with the distribution. |
michael@0 | 13 | * 3. The name of the author may not be used to endorse or promote products |
michael@0 | 14 | * derived from this software without specific prior written permission. |
michael@0 | 15 | * |
michael@0 | 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
michael@0 | 17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
michael@0 | 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
michael@0 | 19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
michael@0 | 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
michael@0 | 21 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
michael@0 | 22 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
michael@0 | 23 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
michael@0 | 24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
michael@0 | 25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
michael@0 | 26 | */ |
michael@0 | 27 | #ifndef _EVENT_INTERNAL_H_ |
michael@0 | 28 | #define _EVENT_INTERNAL_H_ |
michael@0 | 29 | |
michael@0 | 30 | #ifdef __cplusplus |
michael@0 | 31 | extern "C" { |
michael@0 | 32 | #endif |
michael@0 | 33 | |
michael@0 | 34 | #include "event2/event-config.h" |
michael@0 | 35 | #include <time.h> |
michael@0 | 36 | #include <sys/queue.h> |
michael@0 | 37 | #include "event2/event_struct.h" |
michael@0 | 38 | #include "minheap-internal.h" |
michael@0 | 39 | #include "evsignal-internal.h" |
michael@0 | 40 | #include "mm-internal.h" |
michael@0 | 41 | #include "defer-internal.h" |
michael@0 | 42 | |
michael@0 | 43 | /* map union members back */ |
michael@0 | 44 | |
michael@0 | 45 | /* mutually exclusive */ |
michael@0 | 46 | #define ev_signal_next _ev.ev_signal.ev_signal_next |
michael@0 | 47 | #define ev_io_next _ev.ev_io.ev_io_next |
michael@0 | 48 | #define ev_io_timeout _ev.ev_io.ev_timeout |
michael@0 | 49 | |
michael@0 | 50 | /* used only by signals */ |
michael@0 | 51 | #define ev_ncalls _ev.ev_signal.ev_ncalls |
michael@0 | 52 | #define ev_pncalls _ev.ev_signal.ev_pncalls |
michael@0 | 53 | |
michael@0 | 54 | /* Possible values for ev_closure in struct event. */ |
michael@0 | 55 | #define EV_CLOSURE_NONE 0 |
michael@0 | 56 | #define EV_CLOSURE_SIGNAL 1 |
michael@0 | 57 | #define EV_CLOSURE_PERSIST 2 |
michael@0 | 58 | |
michael@0 | 59 | /** Structure to define the backend of a given event_base. */ |
michael@0 | 60 | struct eventop { |
michael@0 | 61 | /** The name of this backend. */ |
michael@0 | 62 | const char *name; |
michael@0 | 63 | /** Function to set up an event_base to use this backend. It should |
michael@0 | 64 | * create a new structure holding whatever information is needed to |
michael@0 | 65 | * run the backend, and return it. The returned pointer will get |
michael@0 | 66 | * stored by event_init into the event_base.evbase field. On failure, |
michael@0 | 67 | * this function should return NULL. */ |
michael@0 | 68 | void *(*init)(struct event_base *); |
michael@0 | 69 | /** Enable reading/writing on a given fd or signal. 'events' will be |
michael@0 | 70 | * the events that we're trying to enable: one or more of EV_READ, |
michael@0 | 71 | * EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that |
michael@0 | 72 | * were enabled on this fd previously. 'fdinfo' will be a structure |
michael@0 | 73 | * associated with the fd by the evmap; its size is defined by the |
michael@0 | 74 | * fdinfo field below. It will be set to 0 the first time the fd is |
michael@0 | 75 | * added. The function should return 0 on success and -1 on error. |
michael@0 | 76 | */ |
michael@0 | 77 | int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo); |
michael@0 | 78 | /** As "add", except 'events' contains the events we mean to disable. */ |
michael@0 | 79 | int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo); |
michael@0 | 80 | /** Function to implement the core of an event loop. It must see which |
michael@0 | 81 | added events are ready, and cause event_active to be called for each |
michael@0 | 82 | active event (usually via event_io_active or such). It should |
michael@0 | 83 | return 0 on success and -1 on error. |
michael@0 | 84 | */ |
michael@0 | 85 | int (*dispatch)(struct event_base *, struct timeval *); |
michael@0 | 86 | /** Function to clean up and free our data from the event_base. */ |
michael@0 | 87 | void (*dealloc)(struct event_base *); |
michael@0 | 88 | /** Flag: set if we need to reinitialize the event base after we fork. |
michael@0 | 89 | */ |
michael@0 | 90 | int need_reinit; |
michael@0 | 91 | /** Bit-array of supported event_method_features that this backend can |
michael@0 | 92 | * provide. */ |
michael@0 | 93 | enum event_method_feature features; |
michael@0 | 94 | /** Length of the extra information we should record for each fd that |
michael@0 | 95 | has one or more active events. This information is recorded |
michael@0 | 96 | as part of the evmap entry for each fd, and passed as an argument |
michael@0 | 97 | to the add and del functions above. |
michael@0 | 98 | */ |
michael@0 | 99 | size_t fdinfo_len; |
michael@0 | 100 | }; |
michael@0 | 101 | |
michael@0 | 102 | #ifdef WIN32 |
michael@0 | 103 | /* If we're on win32, then file descriptors are not nice low densely packed |
michael@0 | 104 | integers. Instead, they are pointer-like windows handles, and we want to |
michael@0 | 105 | use a hashtable instead of an array to map fds to events. |
michael@0 | 106 | */ |
michael@0 | 107 | #define EVMAP_USE_HT |
michael@0 | 108 | #endif |
michael@0 | 109 | |
michael@0 | 110 | /* #define HT_CACHE_HASH_VALS */ |
michael@0 | 111 | |
michael@0 | 112 | #ifdef EVMAP_USE_HT |
michael@0 | 113 | #include "ht-internal.h" |
michael@0 | 114 | struct event_map_entry; |
michael@0 | 115 | HT_HEAD(event_io_map, event_map_entry); |
michael@0 | 116 | #else |
michael@0 | 117 | #define event_io_map event_signal_map |
michael@0 | 118 | #endif |
michael@0 | 119 | |
michael@0 | 120 | /* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not |
michael@0 | 121 | defined, this structure is also used as event_io_map, which maps fds to a |
michael@0 | 122 | list of events. |
michael@0 | 123 | */ |
michael@0 | 124 | struct event_signal_map { |
michael@0 | 125 | /* An array of evmap_io * or of evmap_signal *; empty entries are |
michael@0 | 126 | * set to NULL. */ |
michael@0 | 127 | void **entries; |
michael@0 | 128 | /* The number of entries available in entries */ |
michael@0 | 129 | int nentries; |
michael@0 | 130 | }; |
michael@0 | 131 | |
michael@0 | 132 | /* A list of events waiting on a given 'common' timeout value. Ordinarily, |
michael@0 | 133 | * events waiting for a timeout wait on a minheap. Sometimes, however, a |
michael@0 | 134 | * queue can be faster. |
michael@0 | 135 | **/ |
michael@0 | 136 | struct common_timeout_list { |
michael@0 | 137 | /* List of events currently waiting in the queue. */ |
michael@0 | 138 | struct event_list events; |
michael@0 | 139 | /* 'magic' timeval used to indicate the duration of events in this |
michael@0 | 140 | * queue. */ |
michael@0 | 141 | struct timeval duration; |
michael@0 | 142 | /* Event that triggers whenever one of the events in the queue is |
michael@0 | 143 | * ready to activate */ |
michael@0 | 144 | struct event timeout_event; |
michael@0 | 145 | /* The event_base that this timeout list is part of */ |
michael@0 | 146 | struct event_base *base; |
michael@0 | 147 | }; |
michael@0 | 148 | |
michael@0 | 149 | /** Mask used to get the real tv_usec value from a common timeout. */ |
michael@0 | 150 | #define COMMON_TIMEOUT_MICROSECONDS_MASK 0x000fffff |
michael@0 | 151 | |
michael@0 | 152 | struct event_change; |
michael@0 | 153 | |
michael@0 | 154 | /* List of 'changes' since the last call to eventop.dispatch. Only maintained |
michael@0 | 155 | * if the backend is using changesets. */ |
michael@0 | 156 | struct event_changelist { |
michael@0 | 157 | struct event_change *changes; |
michael@0 | 158 | int n_changes; |
michael@0 | 159 | int changes_size; |
michael@0 | 160 | }; |
michael@0 | 161 | |
michael@0 | 162 | #ifndef _EVENT_DISABLE_DEBUG_MODE |
michael@0 | 163 | /* Global internal flag: set to one if debug mode is on. */ |
michael@0 | 164 | extern int _event_debug_mode_on; |
michael@0 | 165 | #define EVENT_DEBUG_MODE_IS_ON() (_event_debug_mode_on) |
michael@0 | 166 | #else |
michael@0 | 167 | #define EVENT_DEBUG_MODE_IS_ON() (0) |
michael@0 | 168 | #endif |
michael@0 | 169 | |
michael@0 | 170 | struct event_base { |
michael@0 | 171 | /** Function pointers and other data to describe this event_base's |
michael@0 | 172 | * backend. */ |
michael@0 | 173 | const struct eventop *evsel; |
michael@0 | 174 | /** Pointer to backend-specific data. */ |
michael@0 | 175 | void *evbase; |
michael@0 | 176 | |
michael@0 | 177 | /** List of changes to tell backend about at next dispatch. Only used |
michael@0 | 178 | * by the O(1) backends. */ |
michael@0 | 179 | struct event_changelist changelist; |
michael@0 | 180 | |
michael@0 | 181 | /** Function pointers used to describe the backend that this event_base |
michael@0 | 182 | * uses for signals */ |
michael@0 | 183 | const struct eventop *evsigsel; |
michael@0 | 184 | /** Data to implement the common signal handelr code. */ |
michael@0 | 185 | struct evsig_info sig; |
michael@0 | 186 | |
michael@0 | 187 | /** Number of virtual events */ |
michael@0 | 188 | int virtual_event_count; |
michael@0 | 189 | /** Number of total events added to this event_base */ |
michael@0 | 190 | int event_count; |
michael@0 | 191 | /** Number of total events active in this event_base */ |
michael@0 | 192 | int event_count_active; |
michael@0 | 193 | |
michael@0 | 194 | /** Set if we should terminate the loop once we're done processing |
michael@0 | 195 | * events. */ |
michael@0 | 196 | int event_gotterm; |
michael@0 | 197 | /** Set if we should terminate the loop immediately */ |
michael@0 | 198 | int event_break; |
michael@0 | 199 | /** Set if we should start a new instance of the loop immediately. */ |
michael@0 | 200 | int event_continue; |
michael@0 | 201 | |
michael@0 | 202 | /** The currently running priority of events */ |
michael@0 | 203 | int event_running_priority; |
michael@0 | 204 | |
michael@0 | 205 | /** Set if we're running the event_base_loop function, to prevent |
michael@0 | 206 | * reentrant invocation. */ |
michael@0 | 207 | int running_loop; |
michael@0 | 208 | |
michael@0 | 209 | /* Active event management. */ |
michael@0 | 210 | /** An array of nactivequeues queues for active events (ones that |
michael@0 | 211 | * have triggered, and whose callbacks need to be called). Low |
michael@0 | 212 | * priority numbers are more important, and stall higher ones. |
michael@0 | 213 | */ |
michael@0 | 214 | struct event_list *activequeues; |
michael@0 | 215 | /** The length of the activequeues array */ |
michael@0 | 216 | int nactivequeues; |
michael@0 | 217 | |
michael@0 | 218 | /* common timeout logic */ |
michael@0 | 219 | |
michael@0 | 220 | /** An array of common_timeout_list* for all of the common timeout |
michael@0 | 221 | * values we know. */ |
michael@0 | 222 | struct common_timeout_list **common_timeout_queues; |
michael@0 | 223 | /** The number of entries used in common_timeout_queues */ |
michael@0 | 224 | int n_common_timeouts; |
michael@0 | 225 | /** The total size of common_timeout_queues. */ |
michael@0 | 226 | int n_common_timeouts_allocated; |
michael@0 | 227 | |
michael@0 | 228 | /** List of defered_cb that are active. We run these after the active |
michael@0 | 229 | * events. */ |
michael@0 | 230 | struct deferred_cb_queue defer_queue; |
michael@0 | 231 | |
michael@0 | 232 | /** Mapping from file descriptors to enabled (added) events */ |
michael@0 | 233 | struct event_io_map io; |
michael@0 | 234 | |
michael@0 | 235 | /** Mapping from signal numbers to enabled (added) events. */ |
michael@0 | 236 | struct event_signal_map sigmap; |
michael@0 | 237 | |
michael@0 | 238 | /** All events that have been enabled (added) in this event_base */ |
michael@0 | 239 | struct event_list eventqueue; |
michael@0 | 240 | |
michael@0 | 241 | /** Stored timeval; used to detect when time is running backwards. */ |
michael@0 | 242 | struct timeval event_tv; |
michael@0 | 243 | |
michael@0 | 244 | /** Priority queue of events with timeouts. */ |
michael@0 | 245 | struct min_heap timeheap; |
michael@0 | 246 | |
michael@0 | 247 | /** Stored timeval: used to avoid calling gettimeofday/clock_gettime |
michael@0 | 248 | * too often. */ |
michael@0 | 249 | struct timeval tv_cache; |
michael@0 | 250 | |
michael@0 | 251 | #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) |
michael@0 | 252 | /** Difference between internal time (maybe from clock_gettime) and |
michael@0 | 253 | * gettimeofday. */ |
michael@0 | 254 | struct timeval tv_clock_diff; |
michael@0 | 255 | /** Second in which we last updated tv_clock_diff, in monotonic time. */ |
michael@0 | 256 | time_t last_updated_clock_diff; |
michael@0 | 257 | #endif |
michael@0 | 258 | |
michael@0 | 259 | #ifndef _EVENT_DISABLE_THREAD_SUPPORT |
michael@0 | 260 | /* threading support */ |
michael@0 | 261 | /** The thread currently running the event_loop for this base */ |
michael@0 | 262 | unsigned long th_owner_id; |
michael@0 | 263 | /** A lock to prevent conflicting accesses to this event_base */ |
michael@0 | 264 | void *th_base_lock; |
michael@0 | 265 | /** The event whose callback is executing right now */ |
michael@0 | 266 | struct event *current_event; |
michael@0 | 267 | /** A condition that gets signalled when we're done processing an |
michael@0 | 268 | * event with waiters on it. */ |
michael@0 | 269 | void *current_event_cond; |
michael@0 | 270 | /** Number of threads blocking on current_event_cond. */ |
michael@0 | 271 | int current_event_waiters; |
michael@0 | 272 | #endif |
michael@0 | 273 | |
michael@0 | 274 | #ifdef WIN32 |
michael@0 | 275 | /** IOCP support structure, if IOCP is enabled. */ |
michael@0 | 276 | struct event_iocp_port *iocp; |
michael@0 | 277 | #endif |
michael@0 | 278 | |
michael@0 | 279 | /** Flags that this base was configured with */ |
michael@0 | 280 | enum event_base_config_flag flags; |
michael@0 | 281 | |
michael@0 | 282 | /* Notify main thread to wake up break, etc. */ |
michael@0 | 283 | /** True if the base already has a pending notify, and we don't need |
michael@0 | 284 | * to add any more. */ |
michael@0 | 285 | int is_notify_pending; |
michael@0 | 286 | /** A socketpair used by some th_notify functions to wake up the main |
michael@0 | 287 | * thread. */ |
michael@0 | 288 | evutil_socket_t th_notify_fd[2]; |
michael@0 | 289 | /** An event used by some th_notify functions to wake up the main |
michael@0 | 290 | * thread. */ |
michael@0 | 291 | struct event th_notify; |
michael@0 | 292 | /** A function used to wake up the main thread from another thread. */ |
michael@0 | 293 | int (*th_notify_fn)(struct event_base *base); |
michael@0 | 294 | }; |
michael@0 | 295 | |
michael@0 | 296 | struct event_config_entry { |
michael@0 | 297 | TAILQ_ENTRY(event_config_entry) next; |
michael@0 | 298 | |
michael@0 | 299 | const char *avoid_method; |
michael@0 | 300 | }; |
michael@0 | 301 | |
michael@0 | 302 | /** Internal structure: describes the configuration we want for an event_base |
michael@0 | 303 | * that we're about to allocate. */ |
michael@0 | 304 | struct event_config { |
michael@0 | 305 | TAILQ_HEAD(event_configq, event_config_entry) entries; |
michael@0 | 306 | |
michael@0 | 307 | int n_cpus_hint; |
michael@0 | 308 | enum event_method_feature require_features; |
michael@0 | 309 | enum event_base_config_flag flags; |
michael@0 | 310 | }; |
michael@0 | 311 | |
michael@0 | 312 | /* Internal use only: Functions that might be missing from <sys/queue.h> */ |
michael@0 | 313 | #if defined(_EVENT_HAVE_SYS_QUEUE_H) && !defined(_EVENT_HAVE_TAILQFOREACH) |
michael@0 | 314 | #ifndef TAILQ_FIRST |
michael@0 | 315 | #define TAILQ_FIRST(head) ((head)->tqh_first) |
michael@0 | 316 | #endif |
michael@0 | 317 | #ifndef TAILQ_END |
michael@0 | 318 | #define TAILQ_END(head) NULL |
michael@0 | 319 | #endif |
michael@0 | 320 | #ifndef TAILQ_NEXT |
michael@0 | 321 | #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) |
michael@0 | 322 | #endif |
michael@0 | 323 | |
michael@0 | 324 | #ifndef TAILQ_FOREACH |
michael@0 | 325 | #define TAILQ_FOREACH(var, head, field) \ |
michael@0 | 326 | for ((var) = TAILQ_FIRST(head); \ |
michael@0 | 327 | (var) != TAILQ_END(head); \ |
michael@0 | 328 | (var) = TAILQ_NEXT(var, field)) |
michael@0 | 329 | #endif |
michael@0 | 330 | |
michael@0 | 331 | #ifndef TAILQ_INSERT_BEFORE |
michael@0 | 332 | #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ |
michael@0 | 333 | (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ |
michael@0 | 334 | (elm)->field.tqe_next = (listelm); \ |
michael@0 | 335 | *(listelm)->field.tqe_prev = (elm); \ |
michael@0 | 336 | (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ |
michael@0 | 337 | } while (0) |
michael@0 | 338 | #endif |
michael@0 | 339 | #endif /* TAILQ_FOREACH */ |
michael@0 | 340 | |
michael@0 | 341 | #define N_ACTIVE_CALLBACKS(base) \ |
michael@0 | 342 | ((base)->event_count_active + (base)->defer_queue.active_count) |
michael@0 | 343 | |
michael@0 | 344 | int _evsig_set_handler(struct event_base *base, int evsignal, |
michael@0 | 345 | void (*fn)(int)); |
michael@0 | 346 | int _evsig_restore_handler(struct event_base *base, int evsignal); |
michael@0 | 347 | |
michael@0 | 348 | |
michael@0 | 349 | void event_active_nolock(struct event *ev, int res, short count); |
michael@0 | 350 | |
michael@0 | 351 | /* FIXME document. */ |
michael@0 | 352 | void event_base_add_virtual(struct event_base *base); |
michael@0 | 353 | void event_base_del_virtual(struct event_base *base); |
michael@0 | 354 | |
michael@0 | 355 | /** For debugging: unless assertions are disabled, verify the referential |
michael@0 | 356 | integrity of the internal data structures of 'base'. This operation can |
michael@0 | 357 | be expensive. |
michael@0 | 358 | |
michael@0 | 359 | Returns on success; aborts on failure. |
michael@0 | 360 | */ |
michael@0 | 361 | void event_base_assert_ok(struct event_base *base); |
michael@0 | 362 | |
michael@0 | 363 | #ifdef __cplusplus |
michael@0 | 364 | } |
michael@0 | 365 | #endif |
michael@0 | 366 | |
michael@0 | 367 | #endif /* _EVENT_INTERNAL_H_ */ |
michael@0 | 368 |