Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* |
michael@0 | 2 | * Copyright (c) 2008-2012 Niels Provos and Nick Mathewson |
michael@0 | 3 | * |
michael@0 | 4 | * Redistribution and use in source and binary forms, with or without |
michael@0 | 5 | * modification, are permitted provided that the following conditions |
michael@0 | 6 | * are met: |
michael@0 | 7 | * 1. Redistributions of source code must retain the above copyright |
michael@0 | 8 | * notice, this list of conditions and the following disclaimer. |
michael@0 | 9 | * 2. Redistributions in binary form must reproduce the above copyright |
michael@0 | 10 | * notice, this list of conditions and the following disclaimer in the |
michael@0 | 11 | * documentation and/or other materials provided with the distribution. |
michael@0 | 12 | * 3. The name of the author may not be used to endorse or promote products |
michael@0 | 13 | * derived from this software without specific prior written permission. |
michael@0 | 14 | * |
michael@0 | 15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
michael@0 | 16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
michael@0 | 17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
michael@0 | 18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
michael@0 | 19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
michael@0 | 20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
michael@0 | 21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
michael@0 | 22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
michael@0 | 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
michael@0 | 24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
michael@0 | 25 | */ |
michael@0 | 26 | #ifndef _BUFFEREVENT_INTERNAL_H_ |
michael@0 | 27 | #define _BUFFEREVENT_INTERNAL_H_ |
michael@0 | 28 | |
michael@0 | 29 | #ifdef __cplusplus |
michael@0 | 30 | extern "C" { |
michael@0 | 31 | #endif |
michael@0 | 32 | |
michael@0 | 33 | #include "event2/event-config.h" |
michael@0 | 34 | #include "event2/util.h" |
michael@0 | 35 | #include "defer-internal.h" |
michael@0 | 36 | #include "evthread-internal.h" |
michael@0 | 37 | #include "event2/thread.h" |
michael@0 | 38 | #include "ratelim-internal.h" |
michael@0 | 39 | #include "event2/bufferevent_struct.h" |
michael@0 | 40 | |
michael@0 | 41 | /* These flags are reasons that we might be declining to actually enable |
michael@0 | 42 | reading or writing on a bufferevent. |
michael@0 | 43 | */ |
michael@0 | 44 | |
michael@0 | 45 | /* On a all bufferevents, for reading: used when we have read up to the |
michael@0 | 46 | watermark value. |
michael@0 | 47 | |
michael@0 | 48 | On a filtering bufferevent, for writing: used when the underlying |
michael@0 | 49 | bufferevent's write buffer has been filled up to its watermark |
michael@0 | 50 | value. |
michael@0 | 51 | */ |
michael@0 | 52 | #define BEV_SUSPEND_WM 0x01 |
michael@0 | 53 | /* On a base bufferevent: when we have emptied a bandwidth buckets */ |
michael@0 | 54 | #define BEV_SUSPEND_BW 0x02 |
michael@0 | 55 | /* On a base bufferevent: when we have emptied the group's bandwidth bucket. */ |
michael@0 | 56 | #define BEV_SUSPEND_BW_GROUP 0x04 |
michael@0 | 57 | /* On a socket bufferevent: can't do any operations while we're waiting for |
michael@0 | 58 | * name lookup to finish. */ |
michael@0 | 59 | #define BEV_SUSPEND_LOOKUP 0x08 |
michael@0 | 60 | /* On a base bufferevent, for reading: used when a filter has choked this |
michael@0 | 61 | * (underlying) bufferevent because it has stopped reading from it. */ |
michael@0 | 62 | #define BEV_SUSPEND_FILT_READ 0x10 |
michael@0 | 63 | |
michael@0 | 64 | typedef ev_uint16_t bufferevent_suspend_flags; |
michael@0 | 65 | |
michael@0 | 66 | struct bufferevent_rate_limit_group { |
michael@0 | 67 | /** List of all members in the group */ |
michael@0 | 68 | TAILQ_HEAD(rlim_group_member_list, bufferevent_private) members; |
michael@0 | 69 | /** Current limits for the group. */ |
michael@0 | 70 | struct ev_token_bucket rate_limit; |
michael@0 | 71 | struct ev_token_bucket_cfg rate_limit_cfg; |
michael@0 | 72 | |
michael@0 | 73 | /** True iff we don't want to read from any member of the group.until |
michael@0 | 74 | * the token bucket refills. */ |
michael@0 | 75 | unsigned read_suspended : 1; |
michael@0 | 76 | /** True iff we don't want to write from any member of the group.until |
michael@0 | 77 | * the token bucket refills. */ |
michael@0 | 78 | unsigned write_suspended : 1; |
michael@0 | 79 | /** True iff we were unable to suspend one of the bufferevents in the |
michael@0 | 80 | * group for reading the last time we tried, and we should try |
michael@0 | 81 | * again. */ |
michael@0 | 82 | unsigned pending_unsuspend_read : 1; |
michael@0 | 83 | /** True iff we were unable to suspend one of the bufferevents in the |
michael@0 | 84 | * group for writing the last time we tried, and we should try |
michael@0 | 85 | * again. */ |
michael@0 | 86 | unsigned pending_unsuspend_write : 1; |
michael@0 | 87 | |
michael@0 | 88 | /*@{*/ |
michael@0 | 89 | /** Total number of bytes read or written in this group since last |
michael@0 | 90 | * reset. */ |
michael@0 | 91 | ev_uint64_t total_read; |
michael@0 | 92 | ev_uint64_t total_written; |
michael@0 | 93 | /*@}*/ |
michael@0 | 94 | |
michael@0 | 95 | /** The number of bufferevents in the group. */ |
michael@0 | 96 | int n_members; |
michael@0 | 97 | |
michael@0 | 98 | /** The smallest number of bytes that any member of the group should |
michael@0 | 99 | * be limited to read or write at a time. */ |
michael@0 | 100 | ev_ssize_t min_share; |
michael@0 | 101 | ev_ssize_t configured_min_share; |
michael@0 | 102 | |
michael@0 | 103 | /** Timeout event that goes off once a tick, when the bucket is ready |
michael@0 | 104 | * to refill. */ |
michael@0 | 105 | struct event master_refill_event; |
michael@0 | 106 | /** Lock to protect the members of this group. This lock should nest |
michael@0 | 107 | * within every bufferevent lock: if you are holding this lock, do |
michael@0 | 108 | * not assume you can lock another bufferevent. */ |
michael@0 | 109 | void *lock; |
michael@0 | 110 | }; |
michael@0 | 111 | |
michael@0 | 112 | /** Fields for rate-limiting a single bufferevent. */ |
michael@0 | 113 | struct bufferevent_rate_limit { |
michael@0 | 114 | /* Linked-list elements for storing this bufferevent_private in a |
michael@0 | 115 | * group. |
michael@0 | 116 | * |
michael@0 | 117 | * Note that this field is supposed to be protected by the group |
michael@0 | 118 | * lock */ |
michael@0 | 119 | TAILQ_ENTRY(bufferevent_private) next_in_group; |
michael@0 | 120 | /** The rate-limiting group for this bufferevent, or NULL if it is |
michael@0 | 121 | * only rate-limited on its own. */ |
michael@0 | 122 | struct bufferevent_rate_limit_group *group; |
michael@0 | 123 | |
michael@0 | 124 | /* This bufferevent's current limits. */ |
michael@0 | 125 | struct ev_token_bucket limit; |
michael@0 | 126 | /* Pointer to the rate-limit configuration for this bufferevent. |
michael@0 | 127 | * Can be shared. XXX reference-count this? */ |
michael@0 | 128 | struct ev_token_bucket_cfg *cfg; |
michael@0 | 129 | |
michael@0 | 130 | /* Timeout event used when one this bufferevent's buckets are |
michael@0 | 131 | * empty. */ |
michael@0 | 132 | struct event refill_bucket_event; |
michael@0 | 133 | }; |
michael@0 | 134 | |
michael@0 | 135 | /** Parts of the bufferevent structure that are shared among all bufferevent |
michael@0 | 136 | * types, but not exposed in bufferevent_struct.h. */ |
michael@0 | 137 | struct bufferevent_private { |
michael@0 | 138 | /** The underlying bufferevent structure. */ |
michael@0 | 139 | struct bufferevent bev; |
michael@0 | 140 | |
michael@0 | 141 | /** Evbuffer callback to enforce watermarks on input. */ |
michael@0 | 142 | struct evbuffer_cb_entry *read_watermarks_cb; |
michael@0 | 143 | |
michael@0 | 144 | /** If set, we should free the lock when we free the bufferevent. */ |
michael@0 | 145 | unsigned own_lock : 1; |
michael@0 | 146 | |
michael@0 | 147 | /** Flag: set if we have deferred callbacks and a read callback is |
michael@0 | 148 | * pending. */ |
michael@0 | 149 | unsigned readcb_pending : 1; |
michael@0 | 150 | /** Flag: set if we have deferred callbacks and a write callback is |
michael@0 | 151 | * pending. */ |
michael@0 | 152 | unsigned writecb_pending : 1; |
michael@0 | 153 | /** Flag: set if we are currently busy connecting. */ |
michael@0 | 154 | unsigned connecting : 1; |
michael@0 | 155 | /** Flag: set if a connect failed prematurely; this is a hack for |
michael@0 | 156 | * getting around the bufferevent abstraction. */ |
michael@0 | 157 | unsigned connection_refused : 1; |
michael@0 | 158 | /** Set to the events pending if we have deferred callbacks and |
michael@0 | 159 | * an events callback is pending. */ |
michael@0 | 160 | short eventcb_pending; |
michael@0 | 161 | |
michael@0 | 162 | /** If set, read is suspended until one or more conditions are over. |
michael@0 | 163 | * The actual value here is a bitfield of those conditions; see the |
michael@0 | 164 | * BEV_SUSPEND_* flags above. */ |
michael@0 | 165 | bufferevent_suspend_flags read_suspended; |
michael@0 | 166 | |
michael@0 | 167 | /** If set, writing is suspended until one or more conditions are over. |
michael@0 | 168 | * The actual value here is a bitfield of those conditions; see the |
michael@0 | 169 | * BEV_SUSPEND_* flags above. */ |
michael@0 | 170 | bufferevent_suspend_flags write_suspended; |
michael@0 | 171 | |
michael@0 | 172 | /** Set to the current socket errno if we have deferred callbacks and |
michael@0 | 173 | * an events callback is pending. */ |
michael@0 | 174 | int errno_pending; |
michael@0 | 175 | |
michael@0 | 176 | /** The DNS error code for bufferevent_socket_connect_hostname */ |
michael@0 | 177 | int dns_error; |
michael@0 | 178 | |
michael@0 | 179 | /** Used to implement deferred callbacks */ |
michael@0 | 180 | struct deferred_cb deferred; |
michael@0 | 181 | |
michael@0 | 182 | /** The options this bufferevent was constructed with */ |
michael@0 | 183 | enum bufferevent_options options; |
michael@0 | 184 | |
michael@0 | 185 | /** Current reference count for this bufferevent. */ |
michael@0 | 186 | int refcnt; |
michael@0 | 187 | |
michael@0 | 188 | /** Lock for this bufferevent. Shared by the inbuf and the outbuf. |
michael@0 | 189 | * If NULL, locking is disabled. */ |
michael@0 | 190 | void *lock; |
michael@0 | 191 | |
michael@0 | 192 | /** Rate-limiting information for this bufferevent */ |
michael@0 | 193 | struct bufferevent_rate_limit *rate_limiting; |
michael@0 | 194 | }; |
michael@0 | 195 | |
michael@0 | 196 | /** Possible operations for a control callback. */ |
michael@0 | 197 | enum bufferevent_ctrl_op { |
michael@0 | 198 | BEV_CTRL_SET_FD, |
michael@0 | 199 | BEV_CTRL_GET_FD, |
michael@0 | 200 | BEV_CTRL_GET_UNDERLYING, |
michael@0 | 201 | BEV_CTRL_CANCEL_ALL |
michael@0 | 202 | }; |
michael@0 | 203 | |
michael@0 | 204 | /** Possible data types for a control callback */ |
michael@0 | 205 | union bufferevent_ctrl_data { |
michael@0 | 206 | void *ptr; |
michael@0 | 207 | evutil_socket_t fd; |
michael@0 | 208 | }; |
michael@0 | 209 | |
michael@0 | 210 | /** |
michael@0 | 211 | Implementation table for a bufferevent: holds function pointers and other |
michael@0 | 212 | information to make the various bufferevent types work. |
michael@0 | 213 | */ |
michael@0 | 214 | struct bufferevent_ops { |
michael@0 | 215 | /** The name of the bufferevent's type. */ |
michael@0 | 216 | const char *type; |
michael@0 | 217 | /** At what offset into the implementation type will we find a |
michael@0 | 218 | bufferevent structure? |
michael@0 | 219 | |
michael@0 | 220 | Example: if the type is implemented as |
michael@0 | 221 | struct bufferevent_x { |
michael@0 | 222 | int extra_data; |
michael@0 | 223 | struct bufferevent bev; |
michael@0 | 224 | } |
michael@0 | 225 | then mem_offset should be offsetof(struct bufferevent_x, bev) |
michael@0 | 226 | */ |
michael@0 | 227 | off_t mem_offset; |
michael@0 | 228 | |
michael@0 | 229 | /** Enables one or more of EV_READ|EV_WRITE on a bufferevent. Does |
michael@0 | 230 | not need to adjust the 'enabled' field. Returns 0 on success, -1 |
michael@0 | 231 | on failure. |
michael@0 | 232 | */ |
michael@0 | 233 | int (*enable)(struct bufferevent *, short); |
michael@0 | 234 | |
michael@0 | 235 | /** Disables one or more of EV_READ|EV_WRITE on a bufferevent. Does |
michael@0 | 236 | not need to adjust the 'enabled' field. Returns 0 on success, -1 |
michael@0 | 237 | on failure. |
michael@0 | 238 | */ |
michael@0 | 239 | int (*disable)(struct bufferevent *, short); |
michael@0 | 240 | |
michael@0 | 241 | /** Free any storage and deallocate any extra data or structures used |
michael@0 | 242 | in this implementation. |
michael@0 | 243 | */ |
michael@0 | 244 | void (*destruct)(struct bufferevent *); |
michael@0 | 245 | |
michael@0 | 246 | /** Called when the timeouts on the bufferevent have changed.*/ |
michael@0 | 247 | int (*adj_timeouts)(struct bufferevent *); |
michael@0 | 248 | |
michael@0 | 249 | /** Called to flush data. */ |
michael@0 | 250 | int (*flush)(struct bufferevent *, short, enum bufferevent_flush_mode); |
michael@0 | 251 | |
michael@0 | 252 | /** Called to access miscellaneous fields. */ |
michael@0 | 253 | int (*ctrl)(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *); |
michael@0 | 254 | |
michael@0 | 255 | }; |
michael@0 | 256 | |
michael@0 | 257 | extern const struct bufferevent_ops bufferevent_ops_socket; |
michael@0 | 258 | extern const struct bufferevent_ops bufferevent_ops_filter; |
michael@0 | 259 | extern const struct bufferevent_ops bufferevent_ops_pair; |
michael@0 | 260 | |
michael@0 | 261 | #define BEV_IS_SOCKET(bevp) ((bevp)->be_ops == &bufferevent_ops_socket) |
michael@0 | 262 | #define BEV_IS_FILTER(bevp) ((bevp)->be_ops == &bufferevent_ops_filter) |
michael@0 | 263 | #define BEV_IS_PAIR(bevp) ((bevp)->be_ops == &bufferevent_ops_pair) |
michael@0 | 264 | |
michael@0 | 265 | #ifdef WIN32 |
michael@0 | 266 | extern const struct bufferevent_ops bufferevent_ops_async; |
michael@0 | 267 | #define BEV_IS_ASYNC(bevp) ((bevp)->be_ops == &bufferevent_ops_async) |
michael@0 | 268 | #else |
michael@0 | 269 | #define BEV_IS_ASYNC(bevp) 0 |
michael@0 | 270 | #endif |
michael@0 | 271 | |
michael@0 | 272 | /** Initialize the shared parts of a bufferevent. */ |
michael@0 | 273 | int bufferevent_init_common(struct bufferevent_private *, struct event_base *, const struct bufferevent_ops *, enum bufferevent_options options); |
michael@0 | 274 | |
michael@0 | 275 | /** For internal use: temporarily stop all reads on bufev, until the conditions |
michael@0 | 276 | * in 'what' are over. */ |
michael@0 | 277 | void bufferevent_suspend_read(struct bufferevent *bufev, bufferevent_suspend_flags what); |
michael@0 | 278 | /** For internal use: clear the conditions 'what' on bufev, and re-enable |
michael@0 | 279 | * reading if there are no conditions left. */ |
michael@0 | 280 | void bufferevent_unsuspend_read(struct bufferevent *bufev, bufferevent_suspend_flags what); |
michael@0 | 281 | |
michael@0 | 282 | /** For internal use: temporarily stop all writes on bufev, until the conditions |
michael@0 | 283 | * in 'what' are over. */ |
michael@0 | 284 | void bufferevent_suspend_write(struct bufferevent *bufev, bufferevent_suspend_flags what); |
michael@0 | 285 | /** For internal use: clear the conditions 'what' on bufev, and re-enable |
michael@0 | 286 | * writing if there are no conditions left. */ |
michael@0 | 287 | void bufferevent_unsuspend_write(struct bufferevent *bufev, bufferevent_suspend_flags what); |
michael@0 | 288 | |
michael@0 | 289 | #define bufferevent_wm_suspend_read(b) \ |
michael@0 | 290 | bufferevent_suspend_read((b), BEV_SUSPEND_WM) |
michael@0 | 291 | #define bufferevent_wm_unsuspend_read(b) \ |
michael@0 | 292 | bufferevent_unsuspend_read((b), BEV_SUSPEND_WM) |
michael@0 | 293 | |
michael@0 | 294 | /* |
michael@0 | 295 | Disable a bufferevent. Equivalent to bufferevent_disable(), but |
michael@0 | 296 | first resets 'connecting' flag to force EV_WRITE down for sure. |
michael@0 | 297 | |
michael@0 | 298 | XXXX this method will go away in the future; try not to add new users. |
michael@0 | 299 | See comment in evhttp_connection_reset() for discussion. |
michael@0 | 300 | |
michael@0 | 301 | @param bufev the bufferevent to be disabled |
michael@0 | 302 | @param event any combination of EV_READ | EV_WRITE. |
michael@0 | 303 | @return 0 if successful, or -1 if an error occurred |
michael@0 | 304 | @see bufferevent_disable() |
michael@0 | 305 | */ |
michael@0 | 306 | int bufferevent_disable_hard(struct bufferevent *bufev, short event); |
michael@0 | 307 | |
michael@0 | 308 | /** Internal: Set up locking on a bufferevent. If lock is set, use it. |
michael@0 | 309 | * Otherwise, use a new lock. */ |
michael@0 | 310 | int bufferevent_enable_locking(struct bufferevent *bufev, void *lock); |
michael@0 | 311 | /** Internal: Increment the reference count on bufev. */ |
michael@0 | 312 | void bufferevent_incref(struct bufferevent *bufev); |
michael@0 | 313 | /** Internal: Lock bufev and increase its reference count. |
michael@0 | 314 | * unlocking it otherwise. */ |
michael@0 | 315 | void _bufferevent_incref_and_lock(struct bufferevent *bufev); |
michael@0 | 316 | /** Internal: Decrement the reference count on bufev. Returns 1 if it freed |
michael@0 | 317 | * the bufferevent.*/ |
michael@0 | 318 | int bufferevent_decref(struct bufferevent *bufev); |
michael@0 | 319 | /** Internal: Drop the reference count on bufev, freeing as necessary, and |
michael@0 | 320 | * unlocking it otherwise. Returns 1 if it freed the bufferevent. */ |
michael@0 | 321 | int _bufferevent_decref_and_unlock(struct bufferevent *bufev); |
michael@0 | 322 | |
michael@0 | 323 | /** Internal: If callbacks are deferred and we have a read callback, schedule |
michael@0 | 324 | * a readcb. Otherwise just run the readcb. */ |
michael@0 | 325 | void _bufferevent_run_readcb(struct bufferevent *bufev); |
michael@0 | 326 | /** Internal: If callbacks are deferred and we have a write callback, schedule |
michael@0 | 327 | * a writecb. Otherwise just run the writecb. */ |
michael@0 | 328 | void _bufferevent_run_writecb(struct bufferevent *bufev); |
michael@0 | 329 | /** Internal: If callbacks are deferred and we have an eventcb, schedule |
michael@0 | 330 | * it to run with events "what". Otherwise just run the eventcb. */ |
michael@0 | 331 | void _bufferevent_run_eventcb(struct bufferevent *bufev, short what); |
michael@0 | 332 | |
michael@0 | 333 | /** Internal: Add the event 'ev' with timeout tv, unless tv is set to 0, in |
michael@0 | 334 | * which case add ev with no timeout. */ |
michael@0 | 335 | int _bufferevent_add_event(struct event *ev, const struct timeval *tv); |
michael@0 | 336 | |
michael@0 | 337 | /* ========= |
michael@0 | 338 | * These next functions implement timeouts for bufferevents that aren't doing |
michael@0 | 339 | * anything else with ev_read and ev_write, to handle timeouts. |
michael@0 | 340 | * ========= */ |
michael@0 | 341 | /** Internal use: Set up the ev_read and ev_write callbacks so that |
michael@0 | 342 | * the other "generic_timeout" functions will work on it. Call this from |
michael@0 | 343 | * the constructor function. */ |
michael@0 | 344 | void _bufferevent_init_generic_timeout_cbs(struct bufferevent *bev); |
michael@0 | 345 | /** Internal use: Delete the ev_read and ev_write callbacks if they're pending. |
michael@0 | 346 | * Call this from the destructor function. */ |
michael@0 | 347 | int _bufferevent_del_generic_timeout_cbs(struct bufferevent *bev); |
michael@0 | 348 | /** Internal use: Add or delete the generic timeout events as appropriate. |
michael@0 | 349 | * (If an event is enabled and a timeout is set, we add the event. Otherwise |
michael@0 | 350 | * we delete it.) Call this from anything that changes the timeout values, |
michael@0 | 351 | * that enabled EV_READ or EV_WRITE, or that disables EV_READ or EV_WRITE. */ |
michael@0 | 352 | int _bufferevent_generic_adj_timeouts(struct bufferevent *bev); |
michael@0 | 353 | |
michael@0 | 354 | /** Internal use: We have just successfully read data into an inbuf, so |
michael@0 | 355 | * reset the read timeout (if any). */ |
michael@0 | 356 | #define BEV_RESET_GENERIC_READ_TIMEOUT(bev) \ |
michael@0 | 357 | do { \ |
michael@0 | 358 | if (evutil_timerisset(&(bev)->timeout_read)) \ |
michael@0 | 359 | event_add(&(bev)->ev_read, &(bev)->timeout_read); \ |
michael@0 | 360 | } while (0) |
michael@0 | 361 | /** Internal use: We have just successfully written data from an inbuf, so |
michael@0 | 362 | * reset the read timeout (if any). */ |
michael@0 | 363 | #define BEV_RESET_GENERIC_WRITE_TIMEOUT(bev) \ |
michael@0 | 364 | do { \ |
michael@0 | 365 | if (evutil_timerisset(&(bev)->timeout_write)) \ |
michael@0 | 366 | event_add(&(bev)->ev_write, &(bev)->timeout_write); \ |
michael@0 | 367 | } while (0) |
michael@0 | 368 | #define BEV_DEL_GENERIC_READ_TIMEOUT(bev) \ |
michael@0 | 369 | event_del(&(bev)->ev_read) |
michael@0 | 370 | #define BEV_DEL_GENERIC_WRITE_TIMEOUT(bev) \ |
michael@0 | 371 | event_del(&(bev)->ev_write) |
michael@0 | 372 | |
michael@0 | 373 | |
michael@0 | 374 | /** Internal: Given a bufferevent, return its corresponding |
michael@0 | 375 | * bufferevent_private. */ |
michael@0 | 376 | #define BEV_UPCAST(b) EVUTIL_UPCAST((b), struct bufferevent_private, bev) |
michael@0 | 377 | |
michael@0 | 378 | #ifdef _EVENT_DISABLE_THREAD_SUPPORT |
michael@0 | 379 | #define BEV_LOCK(b) _EVUTIL_NIL_STMT |
michael@0 | 380 | #define BEV_UNLOCK(b) _EVUTIL_NIL_STMT |
michael@0 | 381 | #else |
michael@0 | 382 | /** Internal: Grab the lock (if any) on a bufferevent */ |
michael@0 | 383 | #define BEV_LOCK(b) do { \ |
michael@0 | 384 | struct bufferevent_private *locking = BEV_UPCAST(b); \ |
michael@0 | 385 | EVLOCK_LOCK(locking->lock, 0); \ |
michael@0 | 386 | } while (0) |
michael@0 | 387 | |
michael@0 | 388 | /** Internal: Release the lock (if any) on a bufferevent */ |
michael@0 | 389 | #define BEV_UNLOCK(b) do { \ |
michael@0 | 390 | struct bufferevent_private *locking = BEV_UPCAST(b); \ |
michael@0 | 391 | EVLOCK_UNLOCK(locking->lock, 0); \ |
michael@0 | 392 | } while (0) |
michael@0 | 393 | #endif |
michael@0 | 394 | |
michael@0 | 395 | |
michael@0 | 396 | /* ==== For rate-limiting. */ |
michael@0 | 397 | |
michael@0 | 398 | int _bufferevent_decrement_write_buckets(struct bufferevent_private *bev, |
michael@0 | 399 | ev_ssize_t bytes); |
michael@0 | 400 | int _bufferevent_decrement_read_buckets(struct bufferevent_private *bev, |
michael@0 | 401 | ev_ssize_t bytes); |
michael@0 | 402 | ev_ssize_t _bufferevent_get_read_max(struct bufferevent_private *bev); |
michael@0 | 403 | ev_ssize_t _bufferevent_get_write_max(struct bufferevent_private *bev); |
michael@0 | 404 | |
michael@0 | 405 | #ifdef __cplusplus |
michael@0 | 406 | } |
michael@0 | 407 | #endif |
michael@0 | 408 | |
michael@0 | 409 | |
michael@0 | 410 | #endif /* _BUFFEREVENT_INTERNAL_H_ */ |