ipc/chromium/src/third_party/libevent/bufferevent.c

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /*
michael@0 2 * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
michael@0 3 * Copyright (c) 2007-2012 Niels Provos, Nick Mathewson
michael@0 4 *
michael@0 5 * Redistribution and use in source and binary forms, with or without
michael@0 6 * modification, are permitted provided that the following conditions
michael@0 7 * are met:
michael@0 8 * 1. Redistributions of source code must retain the above copyright
michael@0 9 * notice, this list of conditions and the following disclaimer.
michael@0 10 * 2. Redistributions in binary form must reproduce the above copyright
michael@0 11 * notice, this list of conditions and the following disclaimer in the
michael@0 12 * documentation and/or other materials provided with the distribution.
michael@0 13 * 3. The name of the author may not be used to endorse or promote products
michael@0 14 * derived from this software without specific prior written permission.
michael@0 15 *
michael@0 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
michael@0 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
michael@0 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
michael@0 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
michael@0 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
michael@0 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
michael@0 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
michael@0 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
michael@0 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
michael@0 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
michael@0 26 */
michael@0 27
michael@0 28 #include <sys/types.h>
michael@0 29
michael@0 30 #include "event2/event-config.h"
michael@0 31
michael@0 32 #ifdef _EVENT_HAVE_SYS_TIME_H
michael@0 33 #include <sys/time.h>
michael@0 34 #endif
michael@0 35
michael@0 36 #include <errno.h>
michael@0 37 #include <stdio.h>
michael@0 38 #include <stdlib.h>
michael@0 39 #include <string.h>
michael@0 40 #ifdef _EVENT_HAVE_STDARG_H
michael@0 41 #include <stdarg.h>
michael@0 42 #endif
michael@0 43
michael@0 44 #ifdef WIN32
michael@0 45 #include <winsock2.h>
michael@0 46 #endif
michael@0 47 #include <errno.h>
michael@0 48
michael@0 49 #include "event2/util.h"
michael@0 50 #include "event2/buffer.h"
michael@0 51 #include "event2/buffer_compat.h"
michael@0 52 #include "event2/bufferevent.h"
michael@0 53 #include "event2/bufferevent_struct.h"
michael@0 54 #include "event2/bufferevent_compat.h"
michael@0 55 #include "event2/event.h"
michael@0 56 #include "log-internal.h"
michael@0 57 #include "mm-internal.h"
michael@0 58 #include "bufferevent-internal.h"
michael@0 59 #include "evbuffer-internal.h"
michael@0 60 #include "util-internal.h"
michael@0 61
michael@0 62 static void _bufferevent_cancel_all(struct bufferevent *bev);
michael@0 63
michael@0 64
michael@0 65 void
michael@0 66 bufferevent_suspend_read(struct bufferevent *bufev, bufferevent_suspend_flags what)
michael@0 67 {
michael@0 68 struct bufferevent_private *bufev_private =
michael@0 69 EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
michael@0 70 BEV_LOCK(bufev);
michael@0 71 if (!bufev_private->read_suspended)
michael@0 72 bufev->be_ops->disable(bufev, EV_READ);
michael@0 73 bufev_private->read_suspended |= what;
michael@0 74 BEV_UNLOCK(bufev);
michael@0 75 }
michael@0 76
michael@0 77 void
michael@0 78 bufferevent_unsuspend_read(struct bufferevent *bufev, bufferevent_suspend_flags what)
michael@0 79 {
michael@0 80 struct bufferevent_private *bufev_private =
michael@0 81 EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
michael@0 82 BEV_LOCK(bufev);
michael@0 83 bufev_private->read_suspended &= ~what;
michael@0 84 if (!bufev_private->read_suspended && (bufev->enabled & EV_READ))
michael@0 85 bufev->be_ops->enable(bufev, EV_READ);
michael@0 86 BEV_UNLOCK(bufev);
michael@0 87 }
michael@0 88
michael@0 89 void
michael@0 90 bufferevent_suspend_write(struct bufferevent *bufev, bufferevent_suspend_flags what)
michael@0 91 {
michael@0 92 struct bufferevent_private *bufev_private =
michael@0 93 EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
michael@0 94 BEV_LOCK(bufev);
michael@0 95 if (!bufev_private->write_suspended)
michael@0 96 bufev->be_ops->disable(bufev, EV_WRITE);
michael@0 97 bufev_private->write_suspended |= what;
michael@0 98 BEV_UNLOCK(bufev);
michael@0 99 }
michael@0 100
michael@0 101 void
michael@0 102 bufferevent_unsuspend_write(struct bufferevent *bufev, bufferevent_suspend_flags what)
michael@0 103 {
michael@0 104 struct bufferevent_private *bufev_private =
michael@0 105 EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
michael@0 106 BEV_LOCK(bufev);
michael@0 107 bufev_private->write_suspended &= ~what;
michael@0 108 if (!bufev_private->write_suspended && (bufev->enabled & EV_WRITE))
michael@0 109 bufev->be_ops->enable(bufev, EV_WRITE);
michael@0 110 BEV_UNLOCK(bufev);
michael@0 111 }
michael@0 112
michael@0 113
michael@0 114 /* Callback to implement watermarks on the input buffer. Only enabled
michael@0 115 * if the watermark is set. */
michael@0 116 static void
michael@0 117 bufferevent_inbuf_wm_cb(struct evbuffer *buf,
michael@0 118 const struct evbuffer_cb_info *cbinfo,
michael@0 119 void *arg)
michael@0 120 {
michael@0 121 struct bufferevent *bufev = arg;
michael@0 122 size_t size;
michael@0 123
michael@0 124 size = evbuffer_get_length(buf);
michael@0 125
michael@0 126 if (size >= bufev->wm_read.high)
michael@0 127 bufferevent_wm_suspend_read(bufev);
michael@0 128 else
michael@0 129 bufferevent_wm_unsuspend_read(bufev);
michael@0 130 }
michael@0 131
michael@0 132 static void
michael@0 133 bufferevent_run_deferred_callbacks_locked(struct deferred_cb *_, void *arg)
michael@0 134 {
michael@0 135 struct bufferevent_private *bufev_private = arg;
michael@0 136 struct bufferevent *bufev = &bufev_private->bev;
michael@0 137
michael@0 138 BEV_LOCK(bufev);
michael@0 139 if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&
michael@0 140 bufev->errorcb) {
michael@0 141 /* The "connected" happened before any reads or writes, so
michael@0 142 send it first. */
michael@0 143 bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;
michael@0 144 bufev->errorcb(bufev, BEV_EVENT_CONNECTED, bufev->cbarg);
michael@0 145 }
michael@0 146 if (bufev_private->readcb_pending && bufev->readcb) {
michael@0 147 bufev_private->readcb_pending = 0;
michael@0 148 bufev->readcb(bufev, bufev->cbarg);
michael@0 149 }
michael@0 150 if (bufev_private->writecb_pending && bufev->writecb) {
michael@0 151 bufev_private->writecb_pending = 0;
michael@0 152 bufev->writecb(bufev, bufev->cbarg);
michael@0 153 }
michael@0 154 if (bufev_private->eventcb_pending && bufev->errorcb) {
michael@0 155 short what = bufev_private->eventcb_pending;
michael@0 156 int err = bufev_private->errno_pending;
michael@0 157 bufev_private->eventcb_pending = 0;
michael@0 158 bufev_private->errno_pending = 0;
michael@0 159 EVUTIL_SET_SOCKET_ERROR(err);
michael@0 160 bufev->errorcb(bufev, what, bufev->cbarg);
michael@0 161 }
michael@0 162 _bufferevent_decref_and_unlock(bufev);
michael@0 163 }
michael@0 164
michael@0 165 static void
michael@0 166 bufferevent_run_deferred_callbacks_unlocked(struct deferred_cb *_, void *arg)
michael@0 167 {
michael@0 168 struct bufferevent_private *bufev_private = arg;
michael@0 169 struct bufferevent *bufev = &bufev_private->bev;
michael@0 170
michael@0 171 BEV_LOCK(bufev);
michael@0 172 #define UNLOCKED(stmt) \
michael@0 173 do { BEV_UNLOCK(bufev); stmt; BEV_LOCK(bufev); } while(0)
michael@0 174
michael@0 175 if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&
michael@0 176 bufev->errorcb) {
michael@0 177 /* The "connected" happened before any reads or writes, so
michael@0 178 send it first. */
michael@0 179 bufferevent_event_cb errorcb = bufev->errorcb;
michael@0 180 void *cbarg = bufev->cbarg;
michael@0 181 bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;
michael@0 182 UNLOCKED(errorcb(bufev, BEV_EVENT_CONNECTED, cbarg));
michael@0 183 }
michael@0 184 if (bufev_private->readcb_pending && bufev->readcb) {
michael@0 185 bufferevent_data_cb readcb = bufev->readcb;
michael@0 186 void *cbarg = bufev->cbarg;
michael@0 187 bufev_private->readcb_pending = 0;
michael@0 188 UNLOCKED(readcb(bufev, cbarg));
michael@0 189 }
michael@0 190 if (bufev_private->writecb_pending && bufev->writecb) {
michael@0 191 bufferevent_data_cb writecb = bufev->writecb;
michael@0 192 void *cbarg = bufev->cbarg;
michael@0 193 bufev_private->writecb_pending = 0;
michael@0 194 UNLOCKED(writecb(bufev, cbarg));
michael@0 195 }
michael@0 196 if (bufev_private->eventcb_pending && bufev->errorcb) {
michael@0 197 bufferevent_event_cb errorcb = bufev->errorcb;
michael@0 198 void *cbarg = bufev->cbarg;
michael@0 199 short what = bufev_private->eventcb_pending;
michael@0 200 int err = bufev_private->errno_pending;
michael@0 201 bufev_private->eventcb_pending = 0;
michael@0 202 bufev_private->errno_pending = 0;
michael@0 203 EVUTIL_SET_SOCKET_ERROR(err);
michael@0 204 UNLOCKED(errorcb(bufev,what,cbarg));
michael@0 205 }
michael@0 206 _bufferevent_decref_and_unlock(bufev);
michael@0 207 #undef UNLOCKED
michael@0 208 }
michael@0 209
michael@0 210 #define SCHEDULE_DEFERRED(bevp) \
michael@0 211 do { \
michael@0 212 bufferevent_incref(&(bevp)->bev); \
michael@0 213 event_deferred_cb_schedule( \
michael@0 214 event_base_get_deferred_cb_queue((bevp)->bev.ev_base), \
michael@0 215 &(bevp)->deferred); \
michael@0 216 } while (0)
michael@0 217
michael@0 218
michael@0 219 void
michael@0 220 _bufferevent_run_readcb(struct bufferevent *bufev)
michael@0 221 {
michael@0 222 /* Requires that we hold the lock and a reference */
michael@0 223 struct bufferevent_private *p =
michael@0 224 EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
michael@0 225 if (bufev->readcb == NULL)
michael@0 226 return;
michael@0 227 if (p->options & BEV_OPT_DEFER_CALLBACKS) {
michael@0 228 p->readcb_pending = 1;
michael@0 229 if (!p->deferred.queued)
michael@0 230 SCHEDULE_DEFERRED(p);
michael@0 231 } else {
michael@0 232 bufev->readcb(bufev, bufev->cbarg);
michael@0 233 }
michael@0 234 }
michael@0 235
michael@0 236 void
michael@0 237 _bufferevent_run_writecb(struct bufferevent *bufev)
michael@0 238 {
michael@0 239 /* Requires that we hold the lock and a reference */
michael@0 240 struct bufferevent_private *p =
michael@0 241 EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
michael@0 242 if (bufev->writecb == NULL)
michael@0 243 return;
michael@0 244 if (p->options & BEV_OPT_DEFER_CALLBACKS) {
michael@0 245 p->writecb_pending = 1;
michael@0 246 if (!p->deferred.queued)
michael@0 247 SCHEDULE_DEFERRED(p);
michael@0 248 } else {
michael@0 249 bufev->writecb(bufev, bufev->cbarg);
michael@0 250 }
michael@0 251 }
michael@0 252
michael@0 253 void
michael@0 254 _bufferevent_run_eventcb(struct bufferevent *bufev, short what)
michael@0 255 {
michael@0 256 /* Requires that we hold the lock and a reference */
michael@0 257 struct bufferevent_private *p =
michael@0 258 EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
michael@0 259 if (bufev->errorcb == NULL)
michael@0 260 return;
michael@0 261 if (p->options & BEV_OPT_DEFER_CALLBACKS) {
michael@0 262 p->eventcb_pending |= what;
michael@0 263 p->errno_pending = EVUTIL_SOCKET_ERROR();
michael@0 264 if (!p->deferred.queued)
michael@0 265 SCHEDULE_DEFERRED(p);
michael@0 266 } else {
michael@0 267 bufev->errorcb(bufev, what, bufev->cbarg);
michael@0 268 }
michael@0 269 }
michael@0 270
michael@0 271 int
michael@0 272 bufferevent_init_common(struct bufferevent_private *bufev_private,
michael@0 273 struct event_base *base,
michael@0 274 const struct bufferevent_ops *ops,
michael@0 275 enum bufferevent_options options)
michael@0 276 {
michael@0 277 struct bufferevent *bufev = &bufev_private->bev;
michael@0 278
michael@0 279 if (!bufev->input) {
michael@0 280 if ((bufev->input = evbuffer_new()) == NULL)
michael@0 281 return -1;
michael@0 282 }
michael@0 283
michael@0 284 if (!bufev->output) {
michael@0 285 if ((bufev->output = evbuffer_new()) == NULL) {
michael@0 286 evbuffer_free(bufev->input);
michael@0 287 return -1;
michael@0 288 }
michael@0 289 }
michael@0 290
michael@0 291 bufev_private->refcnt = 1;
michael@0 292 bufev->ev_base = base;
michael@0 293
michael@0 294 /* Disable timeouts. */
michael@0 295 evutil_timerclear(&bufev->timeout_read);
michael@0 296 evutil_timerclear(&bufev->timeout_write);
michael@0 297
michael@0 298 bufev->be_ops = ops;
michael@0 299
michael@0 300 /*
michael@0 301 * Set to EV_WRITE so that using bufferevent_write is going to
michael@0 302 * trigger a callback. Reading needs to be explicitly enabled
michael@0 303 * because otherwise no data will be available.
michael@0 304 */
michael@0 305 bufev->enabled = EV_WRITE;
michael@0 306
michael@0 307 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
michael@0 308 if (options & BEV_OPT_THREADSAFE) {
michael@0 309 if (bufferevent_enable_locking(bufev, NULL) < 0) {
michael@0 310 /* cleanup */
michael@0 311 evbuffer_free(bufev->input);
michael@0 312 evbuffer_free(bufev->output);
michael@0 313 bufev->input = NULL;
michael@0 314 bufev->output = NULL;
michael@0 315 return -1;
michael@0 316 }
michael@0 317 }
michael@0 318 #endif
michael@0 319 if ((options & (BEV_OPT_DEFER_CALLBACKS|BEV_OPT_UNLOCK_CALLBACKS))
michael@0 320 == BEV_OPT_UNLOCK_CALLBACKS) {
michael@0 321 event_warnx("UNLOCK_CALLBACKS requires DEFER_CALLBACKS");
michael@0 322 return -1;
michael@0 323 }
michael@0 324 if (options & BEV_OPT_DEFER_CALLBACKS) {
michael@0 325 if (options & BEV_OPT_UNLOCK_CALLBACKS)
michael@0 326 event_deferred_cb_init(&bufev_private->deferred,
michael@0 327 bufferevent_run_deferred_callbacks_unlocked,
michael@0 328 bufev_private);
michael@0 329 else
michael@0 330 event_deferred_cb_init(&bufev_private->deferred,
michael@0 331 bufferevent_run_deferred_callbacks_locked,
michael@0 332 bufev_private);
michael@0 333 }
michael@0 334
michael@0 335 bufev_private->options = options;
michael@0 336
michael@0 337 evbuffer_set_parent(bufev->input, bufev);
michael@0 338 evbuffer_set_parent(bufev->output, bufev);
michael@0 339
michael@0 340 return 0;
michael@0 341 }
michael@0 342
michael@0 343 void
michael@0 344 bufferevent_setcb(struct bufferevent *bufev,
michael@0 345 bufferevent_data_cb readcb, bufferevent_data_cb writecb,
michael@0 346 bufferevent_event_cb eventcb, void *cbarg)
michael@0 347 {
michael@0 348 BEV_LOCK(bufev);
michael@0 349
michael@0 350 bufev->readcb = readcb;
michael@0 351 bufev->writecb = writecb;
michael@0 352 bufev->errorcb = eventcb;
michael@0 353
michael@0 354 bufev->cbarg = cbarg;
michael@0 355 BEV_UNLOCK(bufev);
michael@0 356 }
michael@0 357
michael@0 358 struct evbuffer *
michael@0 359 bufferevent_get_input(struct bufferevent *bufev)
michael@0 360 {
michael@0 361 return bufev->input;
michael@0 362 }
michael@0 363
michael@0 364 struct evbuffer *
michael@0 365 bufferevent_get_output(struct bufferevent *bufev)
michael@0 366 {
michael@0 367 return bufev->output;
michael@0 368 }
michael@0 369
michael@0 370 struct event_base *
michael@0 371 bufferevent_get_base(struct bufferevent *bufev)
michael@0 372 {
michael@0 373 return bufev->ev_base;
michael@0 374 }
michael@0 375
michael@0 376 int
michael@0 377 bufferevent_write(struct bufferevent *bufev, const void *data, size_t size)
michael@0 378 {
michael@0 379 if (evbuffer_add(bufev->output, data, size) == -1)
michael@0 380 return (-1);
michael@0 381
michael@0 382 return 0;
michael@0 383 }
michael@0 384
michael@0 385 int
michael@0 386 bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf)
michael@0 387 {
michael@0 388 if (evbuffer_add_buffer(bufev->output, buf) == -1)
michael@0 389 return (-1);
michael@0 390
michael@0 391 return 0;
michael@0 392 }
michael@0 393
michael@0 394 size_t
michael@0 395 bufferevent_read(struct bufferevent *bufev, void *data, size_t size)
michael@0 396 {
michael@0 397 return (evbuffer_remove(bufev->input, data, size));
michael@0 398 }
michael@0 399
michael@0 400 int
michael@0 401 bufferevent_read_buffer(struct bufferevent *bufev, struct evbuffer *buf)
michael@0 402 {
michael@0 403 return (evbuffer_add_buffer(buf, bufev->input));
michael@0 404 }
michael@0 405
michael@0 406 int
michael@0 407 bufferevent_enable(struct bufferevent *bufev, short event)
michael@0 408 {
michael@0 409 struct bufferevent_private *bufev_private =
michael@0 410 EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
michael@0 411 short impl_events = event;
michael@0 412 int r = 0;
michael@0 413
michael@0 414 _bufferevent_incref_and_lock(bufev);
michael@0 415 if (bufev_private->read_suspended)
michael@0 416 impl_events &= ~EV_READ;
michael@0 417 if (bufev_private->write_suspended)
michael@0 418 impl_events &= ~EV_WRITE;
michael@0 419
michael@0 420 bufev->enabled |= event;
michael@0 421
michael@0 422 if (impl_events && bufev->be_ops->enable(bufev, impl_events) < 0)
michael@0 423 r = -1;
michael@0 424
michael@0 425 _bufferevent_decref_and_unlock(bufev);
michael@0 426 return r;
michael@0 427 }
michael@0 428
michael@0 429 int
michael@0 430 bufferevent_set_timeouts(struct bufferevent *bufev,
michael@0 431 const struct timeval *tv_read,
michael@0 432 const struct timeval *tv_write)
michael@0 433 {
michael@0 434 int r = 0;
michael@0 435 BEV_LOCK(bufev);
michael@0 436 if (tv_read) {
michael@0 437 bufev->timeout_read = *tv_read;
michael@0 438 } else {
michael@0 439 evutil_timerclear(&bufev->timeout_read);
michael@0 440 }
michael@0 441 if (tv_write) {
michael@0 442 bufev->timeout_write = *tv_write;
michael@0 443 } else {
michael@0 444 evutil_timerclear(&bufev->timeout_write);
michael@0 445 }
michael@0 446
michael@0 447 if (bufev->be_ops->adj_timeouts)
michael@0 448 r = bufev->be_ops->adj_timeouts(bufev);
michael@0 449 BEV_UNLOCK(bufev);
michael@0 450
michael@0 451 return r;
michael@0 452 }
michael@0 453
michael@0 454
michael@0 455 /* Obsolete; use bufferevent_set_timeouts */
michael@0 456 void
michael@0 457 bufferevent_settimeout(struct bufferevent *bufev,
michael@0 458 int timeout_read, int timeout_write)
michael@0 459 {
michael@0 460 struct timeval tv_read, tv_write;
michael@0 461 struct timeval *ptv_read = NULL, *ptv_write = NULL;
michael@0 462
michael@0 463 memset(&tv_read, 0, sizeof(tv_read));
michael@0 464 memset(&tv_write, 0, sizeof(tv_write));
michael@0 465
michael@0 466 if (timeout_read) {
michael@0 467 tv_read.tv_sec = timeout_read;
michael@0 468 ptv_read = &tv_read;
michael@0 469 }
michael@0 470 if (timeout_write) {
michael@0 471 tv_write.tv_sec = timeout_write;
michael@0 472 ptv_write = &tv_write;
michael@0 473 }
michael@0 474
michael@0 475 bufferevent_set_timeouts(bufev, ptv_read, ptv_write);
michael@0 476 }
michael@0 477
michael@0 478
michael@0 479 int
michael@0 480 bufferevent_disable_hard(struct bufferevent *bufev, short event)
michael@0 481 {
michael@0 482 int r = 0;
michael@0 483 struct bufferevent_private *bufev_private =
michael@0 484 EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
michael@0 485
michael@0 486 BEV_LOCK(bufev);
michael@0 487 bufev->enabled &= ~event;
michael@0 488
michael@0 489 bufev_private->connecting = 0;
michael@0 490 if (bufev->be_ops->disable(bufev, event) < 0)
michael@0 491 r = -1;
michael@0 492
michael@0 493 BEV_UNLOCK(bufev);
michael@0 494 return r;
michael@0 495 }
michael@0 496
michael@0 497 int
michael@0 498 bufferevent_disable(struct bufferevent *bufev, short event)
michael@0 499 {
michael@0 500 int r = 0;
michael@0 501
michael@0 502 BEV_LOCK(bufev);
michael@0 503 bufev->enabled &= ~event;
michael@0 504
michael@0 505 if (bufev->be_ops->disable(bufev, event) < 0)
michael@0 506 r = -1;
michael@0 507
michael@0 508 BEV_UNLOCK(bufev);
michael@0 509 return r;
michael@0 510 }
michael@0 511
michael@0 512 /*
michael@0 513 * Sets the water marks
michael@0 514 */
michael@0 515
michael@0 516 void
michael@0 517 bufferevent_setwatermark(struct bufferevent *bufev, short events,
michael@0 518 size_t lowmark, size_t highmark)
michael@0 519 {
michael@0 520 struct bufferevent_private *bufev_private =
michael@0 521 EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
michael@0 522
michael@0 523 BEV_LOCK(bufev);
michael@0 524 if (events & EV_WRITE) {
michael@0 525 bufev->wm_write.low = lowmark;
michael@0 526 bufev->wm_write.high = highmark;
michael@0 527 }
michael@0 528
michael@0 529 if (events & EV_READ) {
michael@0 530 bufev->wm_read.low = lowmark;
michael@0 531 bufev->wm_read.high = highmark;
michael@0 532
michael@0 533 if (highmark) {
michael@0 534 /* There is now a new high-water mark for read.
michael@0 535 enable the callback if needed, and see if we should
michael@0 536 suspend/bufferevent_wm_unsuspend. */
michael@0 537
michael@0 538 if (bufev_private->read_watermarks_cb == NULL) {
michael@0 539 bufev_private->read_watermarks_cb =
michael@0 540 evbuffer_add_cb(bufev->input,
michael@0 541 bufferevent_inbuf_wm_cb,
michael@0 542 bufev);
michael@0 543 }
michael@0 544 evbuffer_cb_set_flags(bufev->input,
michael@0 545 bufev_private->read_watermarks_cb,
michael@0 546 EVBUFFER_CB_ENABLED|EVBUFFER_CB_NODEFER);
michael@0 547
michael@0 548 if (evbuffer_get_length(bufev->input) > highmark)
michael@0 549 bufferevent_wm_suspend_read(bufev);
michael@0 550 else if (evbuffer_get_length(bufev->input) < highmark)
michael@0 551 bufferevent_wm_unsuspend_read(bufev);
michael@0 552 } else {
michael@0 553 /* There is now no high-water mark for read. */
michael@0 554 if (bufev_private->read_watermarks_cb)
michael@0 555 evbuffer_cb_clear_flags(bufev->input,
michael@0 556 bufev_private->read_watermarks_cb,
michael@0 557 EVBUFFER_CB_ENABLED);
michael@0 558 bufferevent_wm_unsuspend_read(bufev);
michael@0 559 }
michael@0 560 }
michael@0 561 BEV_UNLOCK(bufev);
michael@0 562 }
michael@0 563
michael@0 564 int
michael@0 565 bufferevent_flush(struct bufferevent *bufev,
michael@0 566 short iotype,
michael@0 567 enum bufferevent_flush_mode mode)
michael@0 568 {
michael@0 569 int r = -1;
michael@0 570 BEV_LOCK(bufev);
michael@0 571 if (bufev->be_ops->flush)
michael@0 572 r = bufev->be_ops->flush(bufev, iotype, mode);
michael@0 573 BEV_UNLOCK(bufev);
michael@0 574 return r;
michael@0 575 }
michael@0 576
michael@0 577 void
michael@0 578 _bufferevent_incref_and_lock(struct bufferevent *bufev)
michael@0 579 {
michael@0 580 struct bufferevent_private *bufev_private =
michael@0 581 BEV_UPCAST(bufev);
michael@0 582 BEV_LOCK(bufev);
michael@0 583 ++bufev_private->refcnt;
michael@0 584 }
michael@0 585
michael@0 586 #if 0
michael@0 587 static void
michael@0 588 _bufferevent_transfer_lock_ownership(struct bufferevent *donor,
michael@0 589 struct bufferevent *recipient)
michael@0 590 {
michael@0 591 struct bufferevent_private *d = BEV_UPCAST(donor);
michael@0 592 struct bufferevent_private *r = BEV_UPCAST(recipient);
michael@0 593 if (d->lock != r->lock)
michael@0 594 return;
michael@0 595 if (r->own_lock)
michael@0 596 return;
michael@0 597 if (d->own_lock) {
michael@0 598 d->own_lock = 0;
michael@0 599 r->own_lock = 1;
michael@0 600 }
michael@0 601 }
michael@0 602 #endif
michael@0 603
michael@0 604 int
michael@0 605 _bufferevent_decref_and_unlock(struct bufferevent *bufev)
michael@0 606 {
michael@0 607 struct bufferevent_private *bufev_private =
michael@0 608 EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
michael@0 609 struct bufferevent *underlying;
michael@0 610
michael@0 611 EVUTIL_ASSERT(bufev_private->refcnt > 0);
michael@0 612
michael@0 613 if (--bufev_private->refcnt) {
michael@0 614 BEV_UNLOCK(bufev);
michael@0 615 return 0;
michael@0 616 }
michael@0 617
michael@0 618 underlying = bufferevent_get_underlying(bufev);
michael@0 619
michael@0 620 /* Clean up the shared info */
michael@0 621 if (bufev->be_ops->destruct)
michael@0 622 bufev->be_ops->destruct(bufev);
michael@0 623
michael@0 624 /* XXX what happens if refcnt for these buffers is > 1?
michael@0 625 * The buffers can share a lock with this bufferevent object,
michael@0 626 * but the lock might be destroyed below. */
michael@0 627 /* evbuffer will free the callbacks */
michael@0 628 evbuffer_free(bufev->input);
michael@0 629 evbuffer_free(bufev->output);
michael@0 630
michael@0 631 if (bufev_private->rate_limiting) {
michael@0 632 if (bufev_private->rate_limiting->group)
michael@0 633 bufferevent_remove_from_rate_limit_group_internal(bufev,0);
michael@0 634 if (event_initialized(&bufev_private->rate_limiting->refill_bucket_event))
michael@0 635 event_del(&bufev_private->rate_limiting->refill_bucket_event);
michael@0 636 event_debug_unassign(&bufev_private->rate_limiting->refill_bucket_event);
michael@0 637 mm_free(bufev_private->rate_limiting);
michael@0 638 bufev_private->rate_limiting = NULL;
michael@0 639 }
michael@0 640
michael@0 641 event_debug_unassign(&bufev->ev_read);
michael@0 642 event_debug_unassign(&bufev->ev_write);
michael@0 643
michael@0 644 BEV_UNLOCK(bufev);
michael@0 645 if (bufev_private->own_lock)
michael@0 646 EVTHREAD_FREE_LOCK(bufev_private->lock,
michael@0 647 EVTHREAD_LOCKTYPE_RECURSIVE);
michael@0 648
michael@0 649 /* Free the actual allocated memory. */
michael@0 650 mm_free(((char*)bufev) - bufev->be_ops->mem_offset);
michael@0 651
michael@0 652 /* Release the reference to underlying now that we no longer need the
michael@0 653 * reference to it. We wait this long mainly in case our lock is
michael@0 654 * shared with underlying.
michael@0 655 *
michael@0 656 * The 'destruct' function will also drop a reference to underlying
michael@0 657 * if BEV_OPT_CLOSE_ON_FREE is set.
michael@0 658 *
michael@0 659 * XXX Should we/can we just refcount evbuffer/bufferevent locks?
michael@0 660 * It would probably save us some headaches.
michael@0 661 */
michael@0 662 if (underlying)
michael@0 663 bufferevent_decref(underlying);
michael@0 664
michael@0 665 return 1;
michael@0 666 }
michael@0 667
michael@0 668 int
michael@0 669 bufferevent_decref(struct bufferevent *bufev)
michael@0 670 {
michael@0 671 BEV_LOCK(bufev);
michael@0 672 return _bufferevent_decref_and_unlock(bufev);
michael@0 673 }
michael@0 674
michael@0 675 void
michael@0 676 bufferevent_free(struct bufferevent *bufev)
michael@0 677 {
michael@0 678 BEV_LOCK(bufev);
michael@0 679 bufferevent_setcb(bufev, NULL, NULL, NULL, NULL);
michael@0 680 _bufferevent_cancel_all(bufev);
michael@0 681 _bufferevent_decref_and_unlock(bufev);
michael@0 682 }
michael@0 683
michael@0 684 void
michael@0 685 bufferevent_incref(struct bufferevent *bufev)
michael@0 686 {
michael@0 687 struct bufferevent_private *bufev_private =
michael@0 688 EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
michael@0 689
michael@0 690 BEV_LOCK(bufev);
michael@0 691 ++bufev_private->refcnt;
michael@0 692 BEV_UNLOCK(bufev);
michael@0 693 }
michael@0 694
michael@0 695 int
michael@0 696 bufferevent_enable_locking(struct bufferevent *bufev, void *lock)
michael@0 697 {
michael@0 698 #ifdef _EVENT_DISABLE_THREAD_SUPPORT
michael@0 699 return -1;
michael@0 700 #else
michael@0 701 struct bufferevent *underlying;
michael@0 702
michael@0 703 if (BEV_UPCAST(bufev)->lock)
michael@0 704 return -1;
michael@0 705 underlying = bufferevent_get_underlying(bufev);
michael@0 706
michael@0 707 if (!lock && underlying && BEV_UPCAST(underlying)->lock) {
michael@0 708 lock = BEV_UPCAST(underlying)->lock;
michael@0 709 BEV_UPCAST(bufev)->lock = lock;
michael@0 710 BEV_UPCAST(bufev)->own_lock = 0;
michael@0 711 } else if (!lock) {
michael@0 712 EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
michael@0 713 if (!lock)
michael@0 714 return -1;
michael@0 715 BEV_UPCAST(bufev)->lock = lock;
michael@0 716 BEV_UPCAST(bufev)->own_lock = 1;
michael@0 717 } else {
michael@0 718 BEV_UPCAST(bufev)->lock = lock;
michael@0 719 BEV_UPCAST(bufev)->own_lock = 0;
michael@0 720 }
michael@0 721 evbuffer_enable_locking(bufev->input, lock);
michael@0 722 evbuffer_enable_locking(bufev->output, lock);
michael@0 723
michael@0 724 if (underlying && !BEV_UPCAST(underlying)->lock)
michael@0 725 bufferevent_enable_locking(underlying, lock);
michael@0 726
michael@0 727 return 0;
michael@0 728 #endif
michael@0 729 }
michael@0 730
michael@0 731 int
michael@0 732 bufferevent_setfd(struct bufferevent *bev, evutil_socket_t fd)
michael@0 733 {
michael@0 734 union bufferevent_ctrl_data d;
michael@0 735 int res = -1;
michael@0 736 d.fd = fd;
michael@0 737 BEV_LOCK(bev);
michael@0 738 if (bev->be_ops->ctrl)
michael@0 739 res = bev->be_ops->ctrl(bev, BEV_CTRL_SET_FD, &d);
michael@0 740 BEV_UNLOCK(bev);
michael@0 741 return res;
michael@0 742 }
michael@0 743
michael@0 744 evutil_socket_t
michael@0 745 bufferevent_getfd(struct bufferevent *bev)
michael@0 746 {
michael@0 747 union bufferevent_ctrl_data d;
michael@0 748 int res = -1;
michael@0 749 d.fd = -1;
michael@0 750 BEV_LOCK(bev);
michael@0 751 if (bev->be_ops->ctrl)
michael@0 752 res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_FD, &d);
michael@0 753 BEV_UNLOCK(bev);
michael@0 754 return (res<0) ? -1 : d.fd;
michael@0 755 }
michael@0 756
michael@0 757 static void
michael@0 758 _bufferevent_cancel_all(struct bufferevent *bev)
michael@0 759 {
michael@0 760 union bufferevent_ctrl_data d;
michael@0 761 memset(&d, 0, sizeof(d));
michael@0 762 BEV_LOCK(bev);
michael@0 763 if (bev->be_ops->ctrl)
michael@0 764 bev->be_ops->ctrl(bev, BEV_CTRL_CANCEL_ALL, &d);
michael@0 765 BEV_UNLOCK(bev);
michael@0 766 }
michael@0 767
michael@0 768 short
michael@0 769 bufferevent_get_enabled(struct bufferevent *bufev)
michael@0 770 {
michael@0 771 short r;
michael@0 772 BEV_LOCK(bufev);
michael@0 773 r = bufev->enabled;
michael@0 774 BEV_UNLOCK(bufev);
michael@0 775 return r;
michael@0 776 }
michael@0 777
michael@0 778 struct bufferevent *
michael@0 779 bufferevent_get_underlying(struct bufferevent *bev)
michael@0 780 {
michael@0 781 union bufferevent_ctrl_data d;
michael@0 782 int res = -1;
michael@0 783 d.ptr = NULL;
michael@0 784 BEV_LOCK(bev);
michael@0 785 if (bev->be_ops->ctrl)
michael@0 786 res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_UNDERLYING, &d);
michael@0 787 BEV_UNLOCK(bev);
michael@0 788 return (res<0) ? NULL : d.ptr;
michael@0 789 }
michael@0 790
michael@0 791 static void
michael@0 792 bufferevent_generic_read_timeout_cb(evutil_socket_t fd, short event, void *ctx)
michael@0 793 {
michael@0 794 struct bufferevent *bev = ctx;
michael@0 795 _bufferevent_incref_and_lock(bev);
michael@0 796 bufferevent_disable(bev, EV_READ);
michael@0 797 _bufferevent_run_eventcb(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING);
michael@0 798 _bufferevent_decref_and_unlock(bev);
michael@0 799 }
michael@0 800 static void
michael@0 801 bufferevent_generic_write_timeout_cb(evutil_socket_t fd, short event, void *ctx)
michael@0 802 {
michael@0 803 struct bufferevent *bev = ctx;
michael@0 804 _bufferevent_incref_and_lock(bev);
michael@0 805 bufferevent_disable(bev, EV_WRITE);
michael@0 806 _bufferevent_run_eventcb(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING);
michael@0 807 _bufferevent_decref_and_unlock(bev);
michael@0 808 }
michael@0 809
michael@0 810 void
michael@0 811 _bufferevent_init_generic_timeout_cbs(struct bufferevent *bev)
michael@0 812 {
michael@0 813 evtimer_assign(&bev->ev_read, bev->ev_base,
michael@0 814 bufferevent_generic_read_timeout_cb, bev);
michael@0 815 evtimer_assign(&bev->ev_write, bev->ev_base,
michael@0 816 bufferevent_generic_write_timeout_cb, bev);
michael@0 817 }
michael@0 818
michael@0 819 int
michael@0 820 _bufferevent_del_generic_timeout_cbs(struct bufferevent *bev)
michael@0 821 {
michael@0 822 int r1,r2;
michael@0 823 r1 = event_del(&bev->ev_read);
michael@0 824 r2 = event_del(&bev->ev_write);
michael@0 825 if (r1<0 || r2<0)
michael@0 826 return -1;
michael@0 827 return 0;
michael@0 828 }
michael@0 829
michael@0 830 int
michael@0 831 _bufferevent_generic_adj_timeouts(struct bufferevent *bev)
michael@0 832 {
michael@0 833 const short enabled = bev->enabled;
michael@0 834 struct bufferevent_private *bev_p =
michael@0 835 EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
michael@0 836 int r1=0, r2=0;
michael@0 837 if ((enabled & EV_READ) && !bev_p->read_suspended &&
michael@0 838 evutil_timerisset(&bev->timeout_read))
michael@0 839 r1 = event_add(&bev->ev_read, &bev->timeout_read);
michael@0 840 else
michael@0 841 r1 = event_del(&bev->ev_read);
michael@0 842
michael@0 843 if ((enabled & EV_WRITE) && !bev_p->write_suspended &&
michael@0 844 evutil_timerisset(&bev->timeout_write) &&
michael@0 845 evbuffer_get_length(bev->output))
michael@0 846 r2 = event_add(&bev->ev_write, &bev->timeout_write);
michael@0 847 else
michael@0 848 r2 = event_del(&bev->ev_write);
michael@0 849 if (r1 < 0 || r2 < 0)
michael@0 850 return -1;
michael@0 851 return 0;
michael@0 852 }
michael@0 853
michael@0 854 int
michael@0 855 _bufferevent_add_event(struct event *ev, const struct timeval *tv)
michael@0 856 {
michael@0 857 if (tv->tv_sec == 0 && tv->tv_usec == 0)
michael@0 858 return event_add(ev, NULL);
michael@0 859 else
michael@0 860 return event_add(ev, tv);
michael@0 861 }
michael@0 862
michael@0 863 /* For use by user programs only; internally, we should be calling
michael@0 864 either _bufferevent_incref_and_lock(), or BEV_LOCK. */
michael@0 865 void
michael@0 866 bufferevent_lock(struct bufferevent *bev)
michael@0 867 {
michael@0 868 _bufferevent_incref_and_lock(bev);
michael@0 869 }
michael@0 870
michael@0 871 void
michael@0 872 bufferevent_unlock(struct bufferevent *bev)
michael@0 873 {
michael@0 874 _bufferevent_decref_and_unlock(bev);
michael@0 875 }

mercurial