|
1 /* |
|
2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu> |
|
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson |
|
4 * |
|
5 * Redistribution and use in source and binary forms, with or without |
|
6 * modification, are permitted provided that the following conditions |
|
7 * are met: |
|
8 * 1. Redistributions of source code must retain the above copyright |
|
9 * notice, this list of conditions and the following disclaimer. |
|
10 * 2. Redistributions in binary form must reproduce the above copyright |
|
11 * notice, this list of conditions and the following disclaimer in the |
|
12 * documentation and/or other materials provided with the distribution. |
|
13 * 3. The name of the author may not be used to endorse or promote products |
|
14 * derived from this software without specific prior written permission. |
|
15 * |
|
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
|
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
|
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
|
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
|
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
|
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
|
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
|
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
|
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
26 */ |
|
27 #ifndef _EVENT_INTERNAL_H_ |
|
28 #define _EVENT_INTERNAL_H_ |
|
29 |
|
30 #ifdef __cplusplus |
|
31 extern "C" { |
|
32 #endif |
|
33 |
|
34 #include "event2/event-config.h" |
|
35 #include <time.h> |
|
36 #include <sys/queue.h> |
|
37 #include "event2/event_struct.h" |
|
38 #include "minheap-internal.h" |
|
39 #include "evsignal-internal.h" |
|
40 #include "mm-internal.h" |
|
41 #include "defer-internal.h" |
|
42 |
|
43 /* map union members back */ |
|
44 |
|
45 /* mutually exclusive */ |
|
46 #define ev_signal_next _ev.ev_signal.ev_signal_next |
|
47 #define ev_io_next _ev.ev_io.ev_io_next |
|
48 #define ev_io_timeout _ev.ev_io.ev_timeout |
|
49 |
|
50 /* used only by signals */ |
|
51 #define ev_ncalls _ev.ev_signal.ev_ncalls |
|
52 #define ev_pncalls _ev.ev_signal.ev_pncalls |
|
53 |
|
54 /* Possible values for ev_closure in struct event. */ |
|
55 #define EV_CLOSURE_NONE 0 |
|
56 #define EV_CLOSURE_SIGNAL 1 |
|
57 #define EV_CLOSURE_PERSIST 2 |
|
58 |
|
59 /** Structure to define the backend of a given event_base. */ |
|
60 struct eventop { |
|
61 /** The name of this backend. */ |
|
62 const char *name; |
|
63 /** Function to set up an event_base to use this backend. It should |
|
64 * create a new structure holding whatever information is needed to |
|
65 * run the backend, and return it. The returned pointer will get |
|
66 * stored by event_init into the event_base.evbase field. On failure, |
|
67 * this function should return NULL. */ |
|
68 void *(*init)(struct event_base *); |
|
69 /** Enable reading/writing on a given fd or signal. 'events' will be |
|
70 * the events that we're trying to enable: one or more of EV_READ, |
|
71 * EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that |
|
72 * were enabled on this fd previously. 'fdinfo' will be a structure |
|
73 * associated with the fd by the evmap; its size is defined by the |
|
74 * fdinfo field below. It will be set to 0 the first time the fd is |
|
75 * added. The function should return 0 on success and -1 on error. |
|
76 */ |
|
77 int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo); |
|
78 /** As "add", except 'events' contains the events we mean to disable. */ |
|
79 int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo); |
|
80 /** Function to implement the core of an event loop. It must see which |
|
81 added events are ready, and cause event_active to be called for each |
|
82 active event (usually via event_io_active or such). It should |
|
83 return 0 on success and -1 on error. |
|
84 */ |
|
85 int (*dispatch)(struct event_base *, struct timeval *); |
|
86 /** Function to clean up and free our data from the event_base. */ |
|
87 void (*dealloc)(struct event_base *); |
|
88 /** Flag: set if we need to reinitialize the event base after we fork. |
|
89 */ |
|
90 int need_reinit; |
|
91 /** Bit-array of supported event_method_features that this backend can |
|
92 * provide. */ |
|
93 enum event_method_feature features; |
|
94 /** Length of the extra information we should record for each fd that |
|
95 has one or more active events. This information is recorded |
|
96 as part of the evmap entry for each fd, and passed as an argument |
|
97 to the add and del functions above. |
|
98 */ |
|
99 size_t fdinfo_len; |
|
100 }; |
|
101 |
|
102 #ifdef WIN32 |
|
103 /* If we're on win32, then file descriptors are not nice low densely packed |
|
104 integers. Instead, they are pointer-like windows handles, and we want to |
|
105 use a hashtable instead of an array to map fds to events. |
|
106 */ |
|
107 #define EVMAP_USE_HT |
|
108 #endif |
|
109 |
|
110 /* #define HT_CACHE_HASH_VALS */ |
|
111 |
|
112 #ifdef EVMAP_USE_HT |
|
113 #include "ht-internal.h" |
|
114 struct event_map_entry; |
|
115 HT_HEAD(event_io_map, event_map_entry); |
|
116 #else |
|
117 #define event_io_map event_signal_map |
|
118 #endif |
|
119 |
|
120 /* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not |
|
121 defined, this structure is also used as event_io_map, which maps fds to a |
|
122 list of events. |
|
123 */ |
|
124 struct event_signal_map { |
|
125 /* An array of evmap_io * or of evmap_signal *; empty entries are |
|
126 * set to NULL. */ |
|
127 void **entries; |
|
128 /* The number of entries available in entries */ |
|
129 int nentries; |
|
130 }; |
|
131 |
|
132 /* A list of events waiting on a given 'common' timeout value. Ordinarily, |
|
133 * events waiting for a timeout wait on a minheap. Sometimes, however, a |
|
134 * queue can be faster. |
|
135 **/ |
|
136 struct common_timeout_list { |
|
137 /* List of events currently waiting in the queue. */ |
|
138 struct event_list events; |
|
139 /* 'magic' timeval used to indicate the duration of events in this |
|
140 * queue. */ |
|
141 struct timeval duration; |
|
142 /* Event that triggers whenever one of the events in the queue is |
|
143 * ready to activate */ |
|
144 struct event timeout_event; |
|
145 /* The event_base that this timeout list is part of */ |
|
146 struct event_base *base; |
|
147 }; |
|
148 |
|
149 /** Mask used to get the real tv_usec value from a common timeout. */ |
|
150 #define COMMON_TIMEOUT_MICROSECONDS_MASK 0x000fffff |
|
151 |
|
152 struct event_change; |
|
153 |
|
154 /* List of 'changes' since the last call to eventop.dispatch. Only maintained |
|
155 * if the backend is using changesets. */ |
|
156 struct event_changelist { |
|
157 struct event_change *changes; |
|
158 int n_changes; |
|
159 int changes_size; |
|
160 }; |
|
161 |
|
162 #ifndef _EVENT_DISABLE_DEBUG_MODE |
|
163 /* Global internal flag: set to one if debug mode is on. */ |
|
164 extern int _event_debug_mode_on; |
|
165 #define EVENT_DEBUG_MODE_IS_ON() (_event_debug_mode_on) |
|
166 #else |
|
167 #define EVENT_DEBUG_MODE_IS_ON() (0) |
|
168 #endif |
|
169 |
|
170 struct event_base { |
|
171 /** Function pointers and other data to describe this event_base's |
|
172 * backend. */ |
|
173 const struct eventop *evsel; |
|
174 /** Pointer to backend-specific data. */ |
|
175 void *evbase; |
|
176 |
|
177 /** List of changes to tell backend about at next dispatch. Only used |
|
178 * by the O(1) backends. */ |
|
179 struct event_changelist changelist; |
|
180 |
|
181 /** Function pointers used to describe the backend that this event_base |
|
182 * uses for signals */ |
|
183 const struct eventop *evsigsel; |
|
184 /** Data to implement the common signal handelr code. */ |
|
185 struct evsig_info sig; |
|
186 |
|
187 /** Number of virtual events */ |
|
188 int virtual_event_count; |
|
189 /** Number of total events added to this event_base */ |
|
190 int event_count; |
|
191 /** Number of total events active in this event_base */ |
|
192 int event_count_active; |
|
193 |
|
194 /** Set if we should terminate the loop once we're done processing |
|
195 * events. */ |
|
196 int event_gotterm; |
|
197 /** Set if we should terminate the loop immediately */ |
|
198 int event_break; |
|
199 /** Set if we should start a new instance of the loop immediately. */ |
|
200 int event_continue; |
|
201 |
|
202 /** The currently running priority of events */ |
|
203 int event_running_priority; |
|
204 |
|
205 /** Set if we're running the event_base_loop function, to prevent |
|
206 * reentrant invocation. */ |
|
207 int running_loop; |
|
208 |
|
209 /* Active event management. */ |
|
210 /** An array of nactivequeues queues for active events (ones that |
|
211 * have triggered, and whose callbacks need to be called). Low |
|
212 * priority numbers are more important, and stall higher ones. |
|
213 */ |
|
214 struct event_list *activequeues; |
|
215 /** The length of the activequeues array */ |
|
216 int nactivequeues; |
|
217 |
|
218 /* common timeout logic */ |
|
219 |
|
220 /** An array of common_timeout_list* for all of the common timeout |
|
221 * values we know. */ |
|
222 struct common_timeout_list **common_timeout_queues; |
|
223 /** The number of entries used in common_timeout_queues */ |
|
224 int n_common_timeouts; |
|
225 /** The total size of common_timeout_queues. */ |
|
226 int n_common_timeouts_allocated; |
|
227 |
|
228 /** List of defered_cb that are active. We run these after the active |
|
229 * events. */ |
|
230 struct deferred_cb_queue defer_queue; |
|
231 |
|
232 /** Mapping from file descriptors to enabled (added) events */ |
|
233 struct event_io_map io; |
|
234 |
|
235 /** Mapping from signal numbers to enabled (added) events. */ |
|
236 struct event_signal_map sigmap; |
|
237 |
|
238 /** All events that have been enabled (added) in this event_base */ |
|
239 struct event_list eventqueue; |
|
240 |
|
241 /** Stored timeval; used to detect when time is running backwards. */ |
|
242 struct timeval event_tv; |
|
243 |
|
244 /** Priority queue of events with timeouts. */ |
|
245 struct min_heap timeheap; |
|
246 |
|
247 /** Stored timeval: used to avoid calling gettimeofday/clock_gettime |
|
248 * too often. */ |
|
249 struct timeval tv_cache; |
|
250 |
|
251 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) |
|
252 /** Difference between internal time (maybe from clock_gettime) and |
|
253 * gettimeofday. */ |
|
254 struct timeval tv_clock_diff; |
|
255 /** Second in which we last updated tv_clock_diff, in monotonic time. */ |
|
256 time_t last_updated_clock_diff; |
|
257 #endif |
|
258 |
|
259 #ifndef _EVENT_DISABLE_THREAD_SUPPORT |
|
260 /* threading support */ |
|
261 /** The thread currently running the event_loop for this base */ |
|
262 unsigned long th_owner_id; |
|
263 /** A lock to prevent conflicting accesses to this event_base */ |
|
264 void *th_base_lock; |
|
265 /** The event whose callback is executing right now */ |
|
266 struct event *current_event; |
|
267 /** A condition that gets signalled when we're done processing an |
|
268 * event with waiters on it. */ |
|
269 void *current_event_cond; |
|
270 /** Number of threads blocking on current_event_cond. */ |
|
271 int current_event_waiters; |
|
272 #endif |
|
273 |
|
274 #ifdef WIN32 |
|
275 /** IOCP support structure, if IOCP is enabled. */ |
|
276 struct event_iocp_port *iocp; |
|
277 #endif |
|
278 |
|
279 /** Flags that this base was configured with */ |
|
280 enum event_base_config_flag flags; |
|
281 |
|
282 /* Notify main thread to wake up break, etc. */ |
|
283 /** True if the base already has a pending notify, and we don't need |
|
284 * to add any more. */ |
|
285 int is_notify_pending; |
|
286 /** A socketpair used by some th_notify functions to wake up the main |
|
287 * thread. */ |
|
288 evutil_socket_t th_notify_fd[2]; |
|
289 /** An event used by some th_notify functions to wake up the main |
|
290 * thread. */ |
|
291 struct event th_notify; |
|
292 /** A function used to wake up the main thread from another thread. */ |
|
293 int (*th_notify_fn)(struct event_base *base); |
|
294 }; |
|
295 |
|
296 struct event_config_entry { |
|
297 TAILQ_ENTRY(event_config_entry) next; |
|
298 |
|
299 const char *avoid_method; |
|
300 }; |
|
301 |
|
302 /** Internal structure: describes the configuration we want for an event_base |
|
303 * that we're about to allocate. */ |
|
304 struct event_config { |
|
305 TAILQ_HEAD(event_configq, event_config_entry) entries; |
|
306 |
|
307 int n_cpus_hint; |
|
308 enum event_method_feature require_features; |
|
309 enum event_base_config_flag flags; |
|
310 }; |
|
311 |
|
312 /* Internal use only: Functions that might be missing from <sys/queue.h> */ |
|
313 #if defined(_EVENT_HAVE_SYS_QUEUE_H) && !defined(_EVENT_HAVE_TAILQFOREACH) |
|
314 #ifndef TAILQ_FIRST |
|
315 #define TAILQ_FIRST(head) ((head)->tqh_first) |
|
316 #endif |
|
317 #ifndef TAILQ_END |
|
318 #define TAILQ_END(head) NULL |
|
319 #endif |
|
320 #ifndef TAILQ_NEXT |
|
321 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) |
|
322 #endif |
|
323 |
|
324 #ifndef TAILQ_FOREACH |
|
325 #define TAILQ_FOREACH(var, head, field) \ |
|
326 for ((var) = TAILQ_FIRST(head); \ |
|
327 (var) != TAILQ_END(head); \ |
|
328 (var) = TAILQ_NEXT(var, field)) |
|
329 #endif |
|
330 |
|
331 #ifndef TAILQ_INSERT_BEFORE |
|
332 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ |
|
333 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ |
|
334 (elm)->field.tqe_next = (listelm); \ |
|
335 *(listelm)->field.tqe_prev = (elm); \ |
|
336 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ |
|
337 } while (0) |
|
338 #endif |
|
339 #endif /* TAILQ_FOREACH */ |
|
340 |
|
341 #define N_ACTIVE_CALLBACKS(base) \ |
|
342 ((base)->event_count_active + (base)->defer_queue.active_count) |
|
343 |
|
344 int _evsig_set_handler(struct event_base *base, int evsignal, |
|
345 void (*fn)(int)); |
|
346 int _evsig_restore_handler(struct event_base *base, int evsignal); |
|
347 |
|
348 |
|
349 void event_active_nolock(struct event *ev, int res, short count); |
|
350 |
|
351 /* FIXME document. */ |
|
352 void event_base_add_virtual(struct event_base *base); |
|
353 void event_base_del_virtual(struct event_base *base); |
|
354 |
|
355 /** For debugging: unless assertions are disabled, verify the referential |
|
356 integrity of the internal data structures of 'base'. This operation can |
|
357 be expensive. |
|
358 |
|
359 Returns on success; aborts on failure. |
|
360 */ |
|
361 void event_base_assert_ok(struct event_base *base); |
|
362 |
|
363 #ifdef __cplusplus |
|
364 } |
|
365 #endif |
|
366 |
|
367 #endif /* _EVENT_INTERNAL_H_ */ |
|
368 |