ipc/chromium/src/third_party/libevent/event-internal.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/ipc/chromium/src/third_party/libevent/event-internal.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,368 @@
     1.4 +/*
     1.5 + * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
     1.6 + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
     1.7 + *
     1.8 + * Redistribution and use in source and binary forms, with or without
     1.9 + * modification, are permitted provided that the following conditions
    1.10 + * are met:
    1.11 + * 1. Redistributions of source code must retain the above copyright
    1.12 + *    notice, this list of conditions and the following disclaimer.
    1.13 + * 2. Redistributions in binary form must reproduce the above copyright
    1.14 + *    notice, this list of conditions and the following disclaimer in the
    1.15 + *    documentation and/or other materials provided with the distribution.
    1.16 + * 3. The name of the author may not be used to endorse or promote products
    1.17 + *    derived from this software without specific prior written permission.
    1.18 + *
    1.19 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
    1.20 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
    1.21 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
    1.22 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
    1.23 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
    1.24 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    1.25 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    1.26 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    1.27 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
    1.28 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    1.29 + */
    1.30 +#ifndef _EVENT_INTERNAL_H_
    1.31 +#define _EVENT_INTERNAL_H_
    1.32 +
    1.33 +#ifdef __cplusplus
    1.34 +extern "C" {
    1.35 +#endif
    1.36 +
    1.37 +#include "event2/event-config.h"
    1.38 +#include <time.h>
    1.39 +#include <sys/queue.h>
    1.40 +#include "event2/event_struct.h"
    1.41 +#include "minheap-internal.h"
    1.42 +#include "evsignal-internal.h"
    1.43 +#include "mm-internal.h"
    1.44 +#include "defer-internal.h"
    1.45 +
    1.46 +/* map union members back */
    1.47 +
    1.48 +/* mutually exclusive */
    1.49 +#define ev_signal_next	_ev.ev_signal.ev_signal_next
    1.50 +#define ev_io_next	_ev.ev_io.ev_io_next
    1.51 +#define ev_io_timeout	_ev.ev_io.ev_timeout
    1.52 +
    1.53 +/* used only by signals */
    1.54 +#define ev_ncalls	_ev.ev_signal.ev_ncalls
    1.55 +#define ev_pncalls	_ev.ev_signal.ev_pncalls
    1.56 +
    1.57 +/* Possible values for ev_closure in struct event. */
    1.58 +#define EV_CLOSURE_NONE 0
    1.59 +#define EV_CLOSURE_SIGNAL 1
    1.60 +#define EV_CLOSURE_PERSIST 2
    1.61 +
    1.62 +/** Structure to define the backend of a given event_base. */
    1.63 +struct eventop {
    1.64 +	/** The name of this backend. */
    1.65 +	const char *name;
    1.66 +	/** Function to set up an event_base to use this backend.  It should
    1.67 +	 * create a new structure holding whatever information is needed to
    1.68 +	 * run the backend, and return it.  The returned pointer will get
    1.69 +	 * stored by event_init into the event_base.evbase field.  On failure,
    1.70 +	 * this function should return NULL. */
    1.71 +	void *(*init)(struct event_base *);
    1.72 +	/** Enable reading/writing on a given fd or signal.  'events' will be
    1.73 +	 * the events that we're trying to enable: one or more of EV_READ,
    1.74 +	 * EV_WRITE, EV_SIGNAL, and EV_ET.  'old' will be those events that
    1.75 +	 * were enabled on this fd previously.  'fdinfo' will be a structure
    1.76 +	 * associated with the fd by the evmap; its size is defined by the
    1.77 +	 * fdinfo field below.  It will be set to 0 the first time the fd is
    1.78 +	 * added.  The function should return 0 on success and -1 on error.
    1.79 +	 */
    1.80 +	int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
    1.81 +	/** As "add", except 'events' contains the events we mean to disable. */
    1.82 +	int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
    1.83 +	/** Function to implement the core of an event loop.  It must see which
    1.84 +	    added events are ready, and cause event_active to be called for each
    1.85 +	    active event (usually via event_io_active or such).  It should
    1.86 +	    return 0 on success and -1 on error.
    1.87 +	 */
    1.88 +	int (*dispatch)(struct event_base *, struct timeval *);
    1.89 +	/** Function to clean up and free our data from the event_base. */
    1.90 +	void (*dealloc)(struct event_base *);
    1.91 +	/** Flag: set if we need to reinitialize the event base after we fork.
    1.92 +	 */
    1.93 +	int need_reinit;
    1.94 +	/** Bit-array of supported event_method_features that this backend can
    1.95 +	 * provide. */
    1.96 +	enum event_method_feature features;
    1.97 +	/** Length of the extra information we should record for each fd that
    1.98 +	    has one or more active events.  This information is recorded
    1.99 +	    as part of the evmap entry for each fd, and passed as an argument
   1.100 +	    to the add and del functions above.
   1.101 +	 */
   1.102 +	size_t fdinfo_len;
   1.103 +};
   1.104 +
   1.105 +#ifdef WIN32
   1.106 +/* If we're on win32, then file descriptors are not nice low densely packed
   1.107 +   integers.  Instead, they are pointer-like windows handles, and we want to
   1.108 +   use a hashtable instead of an array to map fds to events.
   1.109 +*/
   1.110 +#define EVMAP_USE_HT
   1.111 +#endif
   1.112 +
   1.113 +/* #define HT_CACHE_HASH_VALS */
   1.114 +
   1.115 +#ifdef EVMAP_USE_HT
   1.116 +#include "ht-internal.h"
   1.117 +struct event_map_entry;
   1.118 +HT_HEAD(event_io_map, event_map_entry);
   1.119 +#else
   1.120 +#define event_io_map event_signal_map
   1.121 +#endif
   1.122 +
   1.123 +/* Used to map signal numbers to a list of events.  If EVMAP_USE_HT is not
   1.124 +   defined, this structure is also used as event_io_map, which maps fds to a
   1.125 +   list of events.
   1.126 +*/
   1.127 +struct event_signal_map {
   1.128 +	/* An array of evmap_io * or of evmap_signal *; empty entries are
   1.129 +	 * set to NULL. */
   1.130 +	void **entries;
   1.131 +	/* The number of entries available in entries */
   1.132 +	int nentries;
   1.133 +};
   1.134 +
   1.135 +/* A list of events waiting on a given 'common' timeout value.  Ordinarily,
   1.136 + * events waiting for a timeout wait on a minheap.  Sometimes, however, a
   1.137 + * queue can be faster.
   1.138 + **/
   1.139 +struct common_timeout_list {
   1.140 +	/* List of events currently waiting in the queue. */
   1.141 +	struct event_list events;
   1.142 +	/* 'magic' timeval used to indicate the duration of events in this
   1.143 +	 * queue. */
   1.144 +	struct timeval duration;
   1.145 +	/* Event that triggers whenever one of the events in the queue is
   1.146 +	 * ready to activate */
   1.147 +	struct event timeout_event;
   1.148 +	/* The event_base that this timeout list is part of */
   1.149 +	struct event_base *base;
   1.150 +};
   1.151 +
   1.152 +/** Mask used to get the real tv_usec value from a common timeout. */
   1.153 +#define COMMON_TIMEOUT_MICROSECONDS_MASK       0x000fffff
   1.154 +
   1.155 +struct event_change;
   1.156 +
   1.157 +/* List of 'changes' since the last call to eventop.dispatch.  Only maintained
   1.158 + * if the backend is using changesets. */
   1.159 +struct event_changelist {
   1.160 +	struct event_change *changes;
   1.161 +	int n_changes;
   1.162 +	int changes_size;
   1.163 +};
   1.164 +
   1.165 +#ifndef _EVENT_DISABLE_DEBUG_MODE
   1.166 +/* Global internal flag: set to one if debug mode is on. */
   1.167 +extern int _event_debug_mode_on;
   1.168 +#define EVENT_DEBUG_MODE_IS_ON() (_event_debug_mode_on)
   1.169 +#else
   1.170 +#define EVENT_DEBUG_MODE_IS_ON() (0)
   1.171 +#endif
   1.172 +
   1.173 +struct event_base {
   1.174 +	/** Function pointers and other data to describe this event_base's
   1.175 +	 * backend. */
   1.176 +	const struct eventop *evsel;
   1.177 +	/** Pointer to backend-specific data. */
   1.178 +	void *evbase;
   1.179 +
   1.180 +	/** List of changes to tell backend about at next dispatch.  Only used
   1.181 +	 * by the O(1) backends. */
   1.182 +	struct event_changelist changelist;
   1.183 +
   1.184 +	/** Function pointers used to describe the backend that this event_base
   1.185 +	 * uses for signals */
   1.186 +	const struct eventop *evsigsel;
   1.187 +	/** Data to implement the common signal handelr code. */
   1.188 +	struct evsig_info sig;
   1.189 +
   1.190 +	/** Number of virtual events */
   1.191 +	int virtual_event_count;
   1.192 +	/** Number of total events added to this event_base */
   1.193 +	int event_count;
   1.194 +	/** Number of total events active in this event_base */
   1.195 +	int event_count_active;
   1.196 +
   1.197 +	/** Set if we should terminate the loop once we're done processing
   1.198 +	 * events. */
   1.199 +	int event_gotterm;
   1.200 +	/** Set if we should terminate the loop immediately */
   1.201 +	int event_break;
   1.202 +	/** Set if we should start a new instance of the loop immediately. */
   1.203 +	int event_continue;
   1.204 +
   1.205 +	/** The currently running priority of events */
   1.206 +	int event_running_priority;
   1.207 +
   1.208 +	/** Set if we're running the event_base_loop function, to prevent
   1.209 +	 * reentrant invocation. */
   1.210 +	int running_loop;
   1.211 +
   1.212 +	/* Active event management. */
   1.213 +	/** An array of nactivequeues queues for active events (ones that
   1.214 +	 * have triggered, and whose callbacks need to be called).  Low
   1.215 +	 * priority numbers are more important, and stall higher ones.
   1.216 +	 */
   1.217 +	struct event_list *activequeues;
   1.218 +	/** The length of the activequeues array */
   1.219 +	int nactivequeues;
   1.220 +
   1.221 +	/* common timeout logic */
   1.222 +
   1.223 +	/** An array of common_timeout_list* for all of the common timeout
   1.224 +	 * values we know. */
   1.225 +	struct common_timeout_list **common_timeout_queues;
   1.226 +	/** The number of entries used in common_timeout_queues */
   1.227 +	int n_common_timeouts;
   1.228 +	/** The total size of common_timeout_queues. */
   1.229 +	int n_common_timeouts_allocated;
   1.230 +
   1.231 +	/** List of defered_cb that are active.  We run these after the active
   1.232 +	 * events. */
   1.233 +	struct deferred_cb_queue defer_queue;
   1.234 +
   1.235 +	/** Mapping from file descriptors to enabled (added) events */
   1.236 +	struct event_io_map io;
   1.237 +
   1.238 +	/** Mapping from signal numbers to enabled (added) events. */
   1.239 +	struct event_signal_map sigmap;
   1.240 +
   1.241 +	/** All events that have been enabled (added) in this event_base */
   1.242 +	struct event_list eventqueue;
   1.243 +
   1.244 +	/** Stored timeval; used to detect when time is running backwards. */
   1.245 +	struct timeval event_tv;
   1.246 +
   1.247 +	/** Priority queue of events with timeouts. */
   1.248 +	struct min_heap timeheap;
   1.249 +
   1.250 +	/** Stored timeval: used to avoid calling gettimeofday/clock_gettime
   1.251 +	 * too often. */
   1.252 +	struct timeval tv_cache;
   1.253 +
   1.254 +#if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
   1.255 +	/** Difference between internal time (maybe from clock_gettime) and
   1.256 +	 * gettimeofday. */
   1.257 +	struct timeval tv_clock_diff;
   1.258 +	/** Second in which we last updated tv_clock_diff, in monotonic time. */
   1.259 +	time_t last_updated_clock_diff;
   1.260 +#endif
   1.261 +
   1.262 +#ifndef _EVENT_DISABLE_THREAD_SUPPORT
   1.263 +	/* threading support */
   1.264 +	/** The thread currently running the event_loop for this base */
   1.265 +	unsigned long th_owner_id;
   1.266 +	/** A lock to prevent conflicting accesses to this event_base */
   1.267 +	void *th_base_lock;
   1.268 +	/** The event whose callback is executing right now */
   1.269 +	struct event *current_event;
   1.270 +	/** A condition that gets signalled when we're done processing an
   1.271 +	 * event with waiters on it. */
   1.272 +	void *current_event_cond;
   1.273 +	/** Number of threads blocking on current_event_cond. */
   1.274 +	int current_event_waiters;
   1.275 +#endif
   1.276 +
   1.277 +#ifdef WIN32
   1.278 +	/** IOCP support structure, if IOCP is enabled. */
   1.279 +	struct event_iocp_port *iocp;
   1.280 +#endif
   1.281 +
   1.282 +	/** Flags that this base was configured with */
   1.283 +	enum event_base_config_flag flags;
   1.284 +
   1.285 +	/* Notify main thread to wake up break, etc. */
   1.286 +	/** True if the base already has a pending notify, and we don't need
   1.287 +	 * to add any more. */
   1.288 +	int is_notify_pending;
   1.289 +	/** A socketpair used by some th_notify functions to wake up the main
   1.290 +	 * thread. */
   1.291 +	evutil_socket_t th_notify_fd[2];
   1.292 +	/** An event used by some th_notify functions to wake up the main
   1.293 +	 * thread. */
   1.294 +	struct event th_notify;
   1.295 +	/** A function used to wake up the main thread from another thread. */
   1.296 +	int (*th_notify_fn)(struct event_base *base);
   1.297 +};
   1.298 +
   1.299 +struct event_config_entry {
   1.300 +	TAILQ_ENTRY(event_config_entry) next;
   1.301 +
   1.302 +	const char *avoid_method;
   1.303 +};
   1.304 +
   1.305 +/** Internal structure: describes the configuration we want for an event_base
   1.306 + * that we're about to allocate. */
   1.307 +struct event_config {
   1.308 +	TAILQ_HEAD(event_configq, event_config_entry) entries;
   1.309 +
   1.310 +	int n_cpus_hint;
   1.311 +	enum event_method_feature require_features;
   1.312 +	enum event_base_config_flag flags;
   1.313 +};
   1.314 +
   1.315 +/* Internal use only: Functions that might be missing from <sys/queue.h> */
   1.316 +#if defined(_EVENT_HAVE_SYS_QUEUE_H) && !defined(_EVENT_HAVE_TAILQFOREACH)
   1.317 +#ifndef TAILQ_FIRST
   1.318 +#define	TAILQ_FIRST(head)		((head)->tqh_first)
   1.319 +#endif
   1.320 +#ifndef TAILQ_END
   1.321 +#define	TAILQ_END(head)			NULL
   1.322 +#endif
   1.323 +#ifndef TAILQ_NEXT
   1.324 +#define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
   1.325 +#endif
   1.326 +
   1.327 +#ifndef TAILQ_FOREACH
   1.328 +#define TAILQ_FOREACH(var, head, field)					\
   1.329 +	for ((var) = TAILQ_FIRST(head);					\
   1.330 +	     (var) != TAILQ_END(head);					\
   1.331 +	     (var) = TAILQ_NEXT(var, field))
   1.332 +#endif
   1.333 +
   1.334 +#ifndef TAILQ_INSERT_BEFORE
   1.335 +#define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
   1.336 +	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
   1.337 +	(elm)->field.tqe_next = (listelm);				\
   1.338 +	*(listelm)->field.tqe_prev = (elm);				\
   1.339 +	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
   1.340 +} while (0)
   1.341 +#endif
   1.342 +#endif /* TAILQ_FOREACH */
   1.343 +
   1.344 +#define N_ACTIVE_CALLBACKS(base)					\
   1.345 +	((base)->event_count_active + (base)->defer_queue.active_count)
   1.346 +
   1.347 +int _evsig_set_handler(struct event_base *base, int evsignal,
   1.348 +			  void (*fn)(int));
   1.349 +int _evsig_restore_handler(struct event_base *base, int evsignal);
   1.350 +
   1.351 +
   1.352 +void event_active_nolock(struct event *ev, int res, short count);
   1.353 +
   1.354 +/* FIXME document. */
   1.355 +void event_base_add_virtual(struct event_base *base);
   1.356 +void event_base_del_virtual(struct event_base *base);
   1.357 +
   1.358 +/** For debugging: unless assertions are disabled, verify the referential
   1.359 +    integrity of the internal data structures of 'base'.  This operation can
   1.360 +    be expensive.
   1.361 +
   1.362 +    Returns on success; aborts on failure.
   1.363 +*/
   1.364 +void event_base_assert_ok(struct event_base *base);
   1.365 +
   1.366 +#ifdef __cplusplus
   1.367 +}
   1.368 +#endif
   1.369 +
   1.370 +#endif /* _EVENT_INTERNAL_H_ */
   1.371 +

mercurial