ipc/chromium/src/third_party/libevent/bufferevent_async.c

Thu, 15 Jan 2015 21:03:48 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 15 Jan 2015 21:03:48 +0100
branch
TOR_BUG_9701
changeset 11
deefc01c0e14
permissions
-rw-r--r--

Integrate friendly tips from Tor colleagues to make (or not) 4.5 alpha 3;
This includes removal of overloaded (but unused) methods, and addition of
a overlooked call to DataStruct::SetData(nsISupports, uint32_t, bool.)

michael@0 1 /*
michael@0 2 * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
michael@0 3 *
michael@0 4 * All rights reserved.
michael@0 5 *
michael@0 6 * Redistribution and use in source and binary forms, with or without
michael@0 7 * modification, are permitted provided that the following conditions
michael@0 8 * are met:
michael@0 9 * 1. Redistributions of source code must retain the above copyright
michael@0 10 * notice, this list of conditions and the following disclaimer.
michael@0 11 * 2. Redistributions in binary form must reproduce the above copyright
michael@0 12 * notice, this list of conditions and the following disclaimer in the
michael@0 13 * documentation and/or other materials provided with the distribution.
michael@0 14 * 3. The name of the author may not be used to endorse or promote products
michael@0 15 * derived from this software without specific prior written permission.
michael@0 16 *
michael@0 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
michael@0 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
michael@0 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
michael@0 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
michael@0 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
michael@0 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
michael@0 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
michael@0 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
michael@0 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
michael@0 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
michael@0 27 */
michael@0 28
michael@0 29 #include "event2/event-config.h"
michael@0 30
michael@0 31 #ifdef _EVENT_HAVE_SYS_TIME_H
michael@0 32 #include <sys/time.h>
michael@0 33 #endif
michael@0 34
michael@0 35 #include <errno.h>
michael@0 36 #include <stdio.h>
michael@0 37 #include <stdlib.h>
michael@0 38 #include <string.h>
michael@0 39 #ifdef _EVENT_HAVE_STDARG_H
michael@0 40 #include <stdarg.h>
michael@0 41 #endif
michael@0 42 #ifdef _EVENT_HAVE_UNISTD_H
michael@0 43 #include <unistd.h>
michael@0 44 #endif
michael@0 45
michael@0 46 #ifdef WIN32
michael@0 47 #include <winsock2.h>
michael@0 48 #include <ws2tcpip.h>
michael@0 49 #endif
michael@0 50
michael@0 51 #include <sys/queue.h>
michael@0 52
michael@0 53 #include "event2/util.h"
michael@0 54 #include "event2/bufferevent.h"
michael@0 55 #include "event2/buffer.h"
michael@0 56 #include "event2/bufferevent_struct.h"
michael@0 57 #include "event2/event.h"
michael@0 58 #include "event2/util.h"
michael@0 59 #include "event-internal.h"
michael@0 60 #include "log-internal.h"
michael@0 61 #include "mm-internal.h"
michael@0 62 #include "bufferevent-internal.h"
michael@0 63 #include "util-internal.h"
michael@0 64 #include "iocp-internal.h"
michael@0 65
michael@0 66 #ifndef SO_UPDATE_CONNECT_CONTEXT
michael@0 67 /* Mingw is sometimes missing this */
michael@0 68 #define SO_UPDATE_CONNECT_CONTEXT 0x7010
michael@0 69 #endif
michael@0 70
michael@0 71 /* prototypes */
michael@0 72 static int be_async_enable(struct bufferevent *, short);
michael@0 73 static int be_async_disable(struct bufferevent *, short);
michael@0 74 static void be_async_destruct(struct bufferevent *);
michael@0 75 static int be_async_flush(struct bufferevent *, short, enum bufferevent_flush_mode);
michael@0 76 static int be_async_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
michael@0 77
michael@0 78 struct bufferevent_async {
michael@0 79 struct bufferevent_private bev;
michael@0 80 struct event_overlapped connect_overlapped;
michael@0 81 struct event_overlapped read_overlapped;
michael@0 82 struct event_overlapped write_overlapped;
michael@0 83 size_t read_in_progress;
michael@0 84 size_t write_in_progress;
michael@0 85 unsigned ok : 1;
michael@0 86 unsigned read_added : 1;
michael@0 87 unsigned write_added : 1;
michael@0 88 };
michael@0 89
michael@0 90 const struct bufferevent_ops bufferevent_ops_async = {
michael@0 91 "socket_async",
michael@0 92 evutil_offsetof(struct bufferevent_async, bev.bev),
michael@0 93 be_async_enable,
michael@0 94 be_async_disable,
michael@0 95 be_async_destruct,
michael@0 96 _bufferevent_generic_adj_timeouts,
michael@0 97 be_async_flush,
michael@0 98 be_async_ctrl,
michael@0 99 };
michael@0 100
michael@0 101 static inline struct bufferevent_async *
michael@0 102 upcast(struct bufferevent *bev)
michael@0 103 {
michael@0 104 struct bufferevent_async *bev_a;
michael@0 105 if (bev->be_ops != &bufferevent_ops_async)
michael@0 106 return NULL;
michael@0 107 bev_a = EVUTIL_UPCAST(bev, struct bufferevent_async, bev.bev);
michael@0 108 return bev_a;
michael@0 109 }
michael@0 110
michael@0 111 static inline struct bufferevent_async *
michael@0 112 upcast_connect(struct event_overlapped *eo)
michael@0 113 {
michael@0 114 struct bufferevent_async *bev_a;
michael@0 115 bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, connect_overlapped);
michael@0 116 EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
michael@0 117 return bev_a;
michael@0 118 }
michael@0 119
michael@0 120 static inline struct bufferevent_async *
michael@0 121 upcast_read(struct event_overlapped *eo)
michael@0 122 {
michael@0 123 struct bufferevent_async *bev_a;
michael@0 124 bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, read_overlapped);
michael@0 125 EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
michael@0 126 return bev_a;
michael@0 127 }
michael@0 128
michael@0 129 static inline struct bufferevent_async *
michael@0 130 upcast_write(struct event_overlapped *eo)
michael@0 131 {
michael@0 132 struct bufferevent_async *bev_a;
michael@0 133 bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, write_overlapped);
michael@0 134 EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
michael@0 135 return bev_a;
michael@0 136 }
michael@0 137
michael@0 138 static void
michael@0 139 bev_async_del_write(struct bufferevent_async *beva)
michael@0 140 {
michael@0 141 struct bufferevent *bev = &beva->bev.bev;
michael@0 142
michael@0 143 if (beva->write_added) {
michael@0 144 beva->write_added = 0;
michael@0 145 event_base_del_virtual(bev->ev_base);
michael@0 146 }
michael@0 147 }
michael@0 148
michael@0 149 static void
michael@0 150 bev_async_del_read(struct bufferevent_async *beva)
michael@0 151 {
michael@0 152 struct bufferevent *bev = &beva->bev.bev;
michael@0 153
michael@0 154 if (beva->read_added) {
michael@0 155 beva->read_added = 0;
michael@0 156 event_base_del_virtual(bev->ev_base);
michael@0 157 }
michael@0 158 }
michael@0 159
michael@0 160 static void
michael@0 161 bev_async_add_write(struct bufferevent_async *beva)
michael@0 162 {
michael@0 163 struct bufferevent *bev = &beva->bev.bev;
michael@0 164
michael@0 165 if (!beva->write_added) {
michael@0 166 beva->write_added = 1;
michael@0 167 event_base_add_virtual(bev->ev_base);
michael@0 168 }
michael@0 169 }
michael@0 170
michael@0 171 static void
michael@0 172 bev_async_add_read(struct bufferevent_async *beva)
michael@0 173 {
michael@0 174 struct bufferevent *bev = &beva->bev.bev;
michael@0 175
michael@0 176 if (!beva->read_added) {
michael@0 177 beva->read_added = 1;
michael@0 178 event_base_add_virtual(bev->ev_base);
michael@0 179 }
michael@0 180 }
michael@0 181
michael@0 182 static void
michael@0 183 bev_async_consider_writing(struct bufferevent_async *beva)
michael@0 184 {
michael@0 185 size_t at_most;
michael@0 186 int limit;
michael@0 187 struct bufferevent *bev = &beva->bev.bev;
michael@0 188
michael@0 189 /* Don't write if there's a write in progress, or we do not
michael@0 190 * want to write, or when there's nothing left to write. */
michael@0 191 if (beva->write_in_progress || beva->bev.connecting)
michael@0 192 return;
michael@0 193 if (!beva->ok || !(bev->enabled&EV_WRITE) ||
michael@0 194 !evbuffer_get_length(bev->output)) {
michael@0 195 bev_async_del_write(beva);
michael@0 196 return;
michael@0 197 }
michael@0 198
michael@0 199 at_most = evbuffer_get_length(bev->output);
michael@0 200
michael@0 201 /* This is safe so long as bufferevent_get_write_max never returns
michael@0 202 * more than INT_MAX. That's true for now. XXXX */
michael@0 203 limit = (int)_bufferevent_get_write_max(&beva->bev);
michael@0 204 if (at_most >= (size_t)limit && limit >= 0)
michael@0 205 at_most = limit;
michael@0 206
michael@0 207 if (beva->bev.write_suspended) {
michael@0 208 bev_async_del_write(beva);
michael@0 209 return;
michael@0 210 }
michael@0 211
michael@0 212 /* XXXX doesn't respect low-water mark very well. */
michael@0 213 bufferevent_incref(bev);
michael@0 214 if (evbuffer_launch_write(bev->output, at_most,
michael@0 215 &beva->write_overlapped)) {
michael@0 216 bufferevent_decref(bev);
michael@0 217 beva->ok = 0;
michael@0 218 _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
michael@0 219 } else {
michael@0 220 beva->write_in_progress = at_most;
michael@0 221 _bufferevent_decrement_write_buckets(&beva->bev, at_most);
michael@0 222 bev_async_add_write(beva);
michael@0 223 }
michael@0 224 }
michael@0 225
michael@0 226 static void
michael@0 227 bev_async_consider_reading(struct bufferevent_async *beva)
michael@0 228 {
michael@0 229 size_t cur_size;
michael@0 230 size_t read_high;
michael@0 231 size_t at_most;
michael@0 232 int limit;
michael@0 233 struct bufferevent *bev = &beva->bev.bev;
michael@0 234
michael@0 235 /* Don't read if there is a read in progress, or we do not
michael@0 236 * want to read. */
michael@0 237 if (beva->read_in_progress || beva->bev.connecting)
michael@0 238 return;
michael@0 239 if (!beva->ok || !(bev->enabled&EV_READ)) {
michael@0 240 bev_async_del_read(beva);
michael@0 241 return;
michael@0 242 }
michael@0 243
michael@0 244 /* Don't read if we're full */
michael@0 245 cur_size = evbuffer_get_length(bev->input);
michael@0 246 read_high = bev->wm_read.high;
michael@0 247 if (read_high) {
michael@0 248 if (cur_size >= read_high) {
michael@0 249 bev_async_del_read(beva);
michael@0 250 return;
michael@0 251 }
michael@0 252 at_most = read_high - cur_size;
michael@0 253 } else {
michael@0 254 at_most = 16384; /* FIXME totally magic. */
michael@0 255 }
michael@0 256
michael@0 257 /* XXXX This over-commits. */
michael@0 258 /* XXXX see also not above on cast on _bufferevent_get_write_max() */
michael@0 259 limit = (int)_bufferevent_get_read_max(&beva->bev);
michael@0 260 if (at_most >= (size_t)limit && limit >= 0)
michael@0 261 at_most = limit;
michael@0 262
michael@0 263 if (beva->bev.read_suspended) {
michael@0 264 bev_async_del_read(beva);
michael@0 265 return;
michael@0 266 }
michael@0 267
michael@0 268 bufferevent_incref(bev);
michael@0 269 if (evbuffer_launch_read(bev->input, at_most, &beva->read_overlapped)) {
michael@0 270 beva->ok = 0;
michael@0 271 _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
michael@0 272 bufferevent_decref(bev);
michael@0 273 } else {
michael@0 274 beva->read_in_progress = at_most;
michael@0 275 _bufferevent_decrement_read_buckets(&beva->bev, at_most);
michael@0 276 bev_async_add_read(beva);
michael@0 277 }
michael@0 278
michael@0 279 return;
michael@0 280 }
michael@0 281
michael@0 282 static void
michael@0 283 be_async_outbuf_callback(struct evbuffer *buf,
michael@0 284 const struct evbuffer_cb_info *cbinfo,
michael@0 285 void *arg)
michael@0 286 {
michael@0 287 struct bufferevent *bev = arg;
michael@0 288 struct bufferevent_async *bev_async = upcast(bev);
michael@0 289
michael@0 290 /* If we added data to the outbuf and were not writing before,
michael@0 291 * we may want to write now. */
michael@0 292
michael@0 293 _bufferevent_incref_and_lock(bev);
michael@0 294
michael@0 295 if (cbinfo->n_added)
michael@0 296 bev_async_consider_writing(bev_async);
michael@0 297
michael@0 298 _bufferevent_decref_and_unlock(bev);
michael@0 299 }
michael@0 300
michael@0 301 static void
michael@0 302 be_async_inbuf_callback(struct evbuffer *buf,
michael@0 303 const struct evbuffer_cb_info *cbinfo,
michael@0 304 void *arg)
michael@0 305 {
michael@0 306 struct bufferevent *bev = arg;
michael@0 307 struct bufferevent_async *bev_async = upcast(bev);
michael@0 308
michael@0 309 /* If we drained data from the inbuf and were not reading before,
michael@0 310 * we may want to read now */
michael@0 311
michael@0 312 _bufferevent_incref_and_lock(bev);
michael@0 313
michael@0 314 if (cbinfo->n_deleted)
michael@0 315 bev_async_consider_reading(bev_async);
michael@0 316
michael@0 317 _bufferevent_decref_and_unlock(bev);
michael@0 318 }
michael@0 319
michael@0 320 static int
michael@0 321 be_async_enable(struct bufferevent *buf, short what)
michael@0 322 {
michael@0 323 struct bufferevent_async *bev_async = upcast(buf);
michael@0 324
michael@0 325 if (!bev_async->ok)
michael@0 326 return -1;
michael@0 327
michael@0 328 if (bev_async->bev.connecting) {
michael@0 329 /* Don't launch anything during connection attempts. */
michael@0 330 return 0;
michael@0 331 }
michael@0 332
michael@0 333 if (what & EV_READ)
michael@0 334 BEV_RESET_GENERIC_READ_TIMEOUT(buf);
michael@0 335 if (what & EV_WRITE)
michael@0 336 BEV_RESET_GENERIC_WRITE_TIMEOUT(buf);
michael@0 337
michael@0 338 /* If we newly enable reading or writing, and we aren't reading or
michael@0 339 writing already, consider launching a new read or write. */
michael@0 340
michael@0 341 if (what & EV_READ)
michael@0 342 bev_async_consider_reading(bev_async);
michael@0 343 if (what & EV_WRITE)
michael@0 344 bev_async_consider_writing(bev_async);
michael@0 345 return 0;
michael@0 346 }
michael@0 347
michael@0 348 static int
michael@0 349 be_async_disable(struct bufferevent *bev, short what)
michael@0 350 {
michael@0 351 struct bufferevent_async *bev_async = upcast(bev);
michael@0 352 /* XXXX If we disable reading or writing, we may want to consider
michael@0 353 * canceling any in-progress read or write operation, though it might
michael@0 354 * not work. */
michael@0 355
michael@0 356 if (what & EV_READ) {
michael@0 357 BEV_DEL_GENERIC_READ_TIMEOUT(bev);
michael@0 358 bev_async_del_read(bev_async);
michael@0 359 }
michael@0 360 if (what & EV_WRITE) {
michael@0 361 BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
michael@0 362 bev_async_del_write(bev_async);
michael@0 363 }
michael@0 364
michael@0 365 return 0;
michael@0 366 }
michael@0 367
michael@0 368 static void
michael@0 369 be_async_destruct(struct bufferevent *bev)
michael@0 370 {
michael@0 371 struct bufferevent_async *bev_async = upcast(bev);
michael@0 372 struct bufferevent_private *bev_p = BEV_UPCAST(bev);
michael@0 373 evutil_socket_t fd;
michael@0 374
michael@0 375 EVUTIL_ASSERT(!upcast(bev)->write_in_progress &&
michael@0 376 !upcast(bev)->read_in_progress);
michael@0 377
michael@0 378 bev_async_del_read(bev_async);
michael@0 379 bev_async_del_write(bev_async);
michael@0 380
michael@0 381 fd = _evbuffer_overlapped_get_fd(bev->input);
michael@0 382 if (bev_p->options & BEV_OPT_CLOSE_ON_FREE) {
michael@0 383 /* XXXX possible double-close */
michael@0 384 evutil_closesocket(fd);
michael@0 385 }
michael@0 386 /* delete this in case non-blocking connect was used */
michael@0 387 if (event_initialized(&bev->ev_write)) {
michael@0 388 event_del(&bev->ev_write);
michael@0 389 _bufferevent_del_generic_timeout_cbs(bev);
michael@0 390 }
michael@0 391 }
michael@0 392
michael@0 393 /* GetQueuedCompletionStatus doesn't reliably yield WSA error codes, so
michael@0 394 * we use WSAGetOverlappedResult to translate. */
michael@0 395 static void
michael@0 396 bev_async_set_wsa_error(struct bufferevent *bev, struct event_overlapped *eo)
michael@0 397 {
michael@0 398 DWORD bytes, flags;
michael@0 399 evutil_socket_t fd;
michael@0 400
michael@0 401 fd = _evbuffer_overlapped_get_fd(bev->input);
michael@0 402 WSAGetOverlappedResult(fd, &eo->overlapped, &bytes, FALSE, &flags);
michael@0 403 }
michael@0 404
michael@0 405 static int
michael@0 406 be_async_flush(struct bufferevent *bev, short what,
michael@0 407 enum bufferevent_flush_mode mode)
michael@0 408 {
michael@0 409 return 0;
michael@0 410 }
michael@0 411
michael@0 412 static void
michael@0 413 connect_complete(struct event_overlapped *eo, ev_uintptr_t key,
michael@0 414 ev_ssize_t nbytes, int ok)
michael@0 415 {
michael@0 416 struct bufferevent_async *bev_a = upcast_connect(eo);
michael@0 417 struct bufferevent *bev = &bev_a->bev.bev;
michael@0 418 evutil_socket_t sock;
michael@0 419
michael@0 420 BEV_LOCK(bev);
michael@0 421
michael@0 422 EVUTIL_ASSERT(bev_a->bev.connecting);
michael@0 423 bev_a->bev.connecting = 0;
michael@0 424 sock = _evbuffer_overlapped_get_fd(bev_a->bev.bev.input);
michael@0 425 /* XXXX Handle error? */
michael@0 426 setsockopt(sock, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0);
michael@0 427
michael@0 428 if (ok)
michael@0 429 bufferevent_async_set_connected(bev);
michael@0 430 else
michael@0 431 bev_async_set_wsa_error(bev, eo);
michael@0 432
michael@0 433 _bufferevent_run_eventcb(bev,
michael@0 434 ok? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR);
michael@0 435
michael@0 436 event_base_del_virtual(bev->ev_base);
michael@0 437
michael@0 438 _bufferevent_decref_and_unlock(bev);
michael@0 439 }
michael@0 440
michael@0 441 static void
michael@0 442 read_complete(struct event_overlapped *eo, ev_uintptr_t key,
michael@0 443 ev_ssize_t nbytes, int ok)
michael@0 444 {
michael@0 445 struct bufferevent_async *bev_a = upcast_read(eo);
michael@0 446 struct bufferevent *bev = &bev_a->bev.bev;
michael@0 447 short what = BEV_EVENT_READING;
michael@0 448 ev_ssize_t amount_unread;
michael@0 449 BEV_LOCK(bev);
michael@0 450 EVUTIL_ASSERT(bev_a->read_in_progress);
michael@0 451
michael@0 452 amount_unread = bev_a->read_in_progress - nbytes;
michael@0 453 evbuffer_commit_read(bev->input, nbytes);
michael@0 454 bev_a->read_in_progress = 0;
michael@0 455 if (amount_unread)
michael@0 456 _bufferevent_decrement_read_buckets(&bev_a->bev, -amount_unread);
michael@0 457
michael@0 458 if (!ok)
michael@0 459 bev_async_set_wsa_error(bev, eo);
michael@0 460
michael@0 461 if (bev_a->ok) {
michael@0 462 if (ok && nbytes) {
michael@0 463 BEV_RESET_GENERIC_READ_TIMEOUT(bev);
michael@0 464 if (evbuffer_get_length(bev->input) >= bev->wm_read.low)
michael@0 465 _bufferevent_run_readcb(bev);
michael@0 466 bev_async_consider_reading(bev_a);
michael@0 467 } else if (!ok) {
michael@0 468 what |= BEV_EVENT_ERROR;
michael@0 469 bev_a->ok = 0;
michael@0 470 _bufferevent_run_eventcb(bev, what);
michael@0 471 } else if (!nbytes) {
michael@0 472 what |= BEV_EVENT_EOF;
michael@0 473 bev_a->ok = 0;
michael@0 474 _bufferevent_run_eventcb(bev, what);
michael@0 475 }
michael@0 476 }
michael@0 477
michael@0 478 _bufferevent_decref_and_unlock(bev);
michael@0 479 }
michael@0 480
michael@0 481 static void
michael@0 482 write_complete(struct event_overlapped *eo, ev_uintptr_t key,
michael@0 483 ev_ssize_t nbytes, int ok)
michael@0 484 {
michael@0 485 struct bufferevent_async *bev_a = upcast_write(eo);
michael@0 486 struct bufferevent *bev = &bev_a->bev.bev;
michael@0 487 short what = BEV_EVENT_WRITING;
michael@0 488 ev_ssize_t amount_unwritten;
michael@0 489
michael@0 490 BEV_LOCK(bev);
michael@0 491 EVUTIL_ASSERT(bev_a->write_in_progress);
michael@0 492
michael@0 493 amount_unwritten = bev_a->write_in_progress - nbytes;
michael@0 494 evbuffer_commit_write(bev->output, nbytes);
michael@0 495 bev_a->write_in_progress = 0;
michael@0 496
michael@0 497 if (amount_unwritten)
michael@0 498 _bufferevent_decrement_write_buckets(&bev_a->bev,
michael@0 499 -amount_unwritten);
michael@0 500
michael@0 501
michael@0 502 if (!ok)
michael@0 503 bev_async_set_wsa_error(bev, eo);
michael@0 504
michael@0 505 if (bev_a->ok) {
michael@0 506 if (ok && nbytes) {
michael@0 507 BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
michael@0 508 if (evbuffer_get_length(bev->output) <=
michael@0 509 bev->wm_write.low)
michael@0 510 _bufferevent_run_writecb(bev);
michael@0 511 bev_async_consider_writing(bev_a);
michael@0 512 } else if (!ok) {
michael@0 513 what |= BEV_EVENT_ERROR;
michael@0 514 bev_a->ok = 0;
michael@0 515 _bufferevent_run_eventcb(bev, what);
michael@0 516 } else if (!nbytes) {
michael@0 517 what |= BEV_EVENT_EOF;
michael@0 518 bev_a->ok = 0;
michael@0 519 _bufferevent_run_eventcb(bev, what);
michael@0 520 }
michael@0 521 }
michael@0 522
michael@0 523 _bufferevent_decref_and_unlock(bev);
michael@0 524 }
michael@0 525
michael@0 526 struct bufferevent *
michael@0 527 bufferevent_async_new(struct event_base *base,
michael@0 528 evutil_socket_t fd, int options)
michael@0 529 {
michael@0 530 struct bufferevent_async *bev_a;
michael@0 531 struct bufferevent *bev;
michael@0 532 struct event_iocp_port *iocp;
michael@0 533
michael@0 534 options |= BEV_OPT_THREADSAFE;
michael@0 535
michael@0 536 if (!(iocp = event_base_get_iocp(base)))
michael@0 537 return NULL;
michael@0 538
michael@0 539 if (fd >= 0 && event_iocp_port_associate(iocp, fd, 1)<0) {
michael@0 540 int err = GetLastError();
michael@0 541 /* We may have alrady associated this fd with a port.
michael@0 542 * Let's hope it's this port, and that the error code
michael@0 543 * for doing this neer changes. */
michael@0 544 if (err != ERROR_INVALID_PARAMETER)
michael@0 545 return NULL;
michael@0 546 }
michael@0 547
michael@0 548 if (!(bev_a = mm_calloc(1, sizeof(struct bufferevent_async))))
michael@0 549 return NULL;
michael@0 550
michael@0 551 bev = &bev_a->bev.bev;
michael@0 552 if (!(bev->input = evbuffer_overlapped_new(fd))) {
michael@0 553 mm_free(bev_a);
michael@0 554 return NULL;
michael@0 555 }
michael@0 556 if (!(bev->output = evbuffer_overlapped_new(fd))) {
michael@0 557 evbuffer_free(bev->input);
michael@0 558 mm_free(bev_a);
michael@0 559 return NULL;
michael@0 560 }
michael@0 561
michael@0 562 if (bufferevent_init_common(&bev_a->bev, base, &bufferevent_ops_async,
michael@0 563 options)<0)
michael@0 564 goto err;
michael@0 565
michael@0 566 evbuffer_add_cb(bev->input, be_async_inbuf_callback, bev);
michael@0 567 evbuffer_add_cb(bev->output, be_async_outbuf_callback, bev);
michael@0 568
michael@0 569 event_overlapped_init(&bev_a->connect_overlapped, connect_complete);
michael@0 570 event_overlapped_init(&bev_a->read_overlapped, read_complete);
michael@0 571 event_overlapped_init(&bev_a->write_overlapped, write_complete);
michael@0 572
michael@0 573 bev_a->ok = fd >= 0;
michael@0 574 if (bev_a->ok)
michael@0 575 _bufferevent_init_generic_timeout_cbs(bev);
michael@0 576
michael@0 577 return bev;
michael@0 578 err:
michael@0 579 bufferevent_free(&bev_a->bev.bev);
michael@0 580 return NULL;
michael@0 581 }
michael@0 582
michael@0 583 void
michael@0 584 bufferevent_async_set_connected(struct bufferevent *bev)
michael@0 585 {
michael@0 586 struct bufferevent_async *bev_async = upcast(bev);
michael@0 587 bev_async->ok = 1;
michael@0 588 _bufferevent_init_generic_timeout_cbs(bev);
michael@0 589 /* Now's a good time to consider reading/writing */
michael@0 590 be_async_enable(bev, bev->enabled);
michael@0 591 }
michael@0 592
michael@0 593 int
michael@0 594 bufferevent_async_can_connect(struct bufferevent *bev)
michael@0 595 {
michael@0 596 const struct win32_extension_fns *ext =
michael@0 597 event_get_win32_extension_fns();
michael@0 598
michael@0 599 if (BEV_IS_ASYNC(bev) &&
michael@0 600 event_base_get_iocp(bev->ev_base) &&
michael@0 601 ext && ext->ConnectEx)
michael@0 602 return 1;
michael@0 603
michael@0 604 return 0;
michael@0 605 }
michael@0 606
michael@0 607 int
michael@0 608 bufferevent_async_connect(struct bufferevent *bev, evutil_socket_t fd,
michael@0 609 const struct sockaddr *sa, int socklen)
michael@0 610 {
michael@0 611 BOOL rc;
michael@0 612 struct bufferevent_async *bev_async = upcast(bev);
michael@0 613 struct sockaddr_storage ss;
michael@0 614 const struct win32_extension_fns *ext =
michael@0 615 event_get_win32_extension_fns();
michael@0 616
michael@0 617 EVUTIL_ASSERT(ext && ext->ConnectEx && fd >= 0 && sa != NULL);
michael@0 618
michael@0 619 /* ConnectEx() requires that the socket be bound to an address
michael@0 620 * with bind() before using, otherwise it will fail. We attempt
michael@0 621 * to issue a bind() here, taking into account that the error
michael@0 622 * code is set to WSAEINVAL when the socket is already bound. */
michael@0 623 memset(&ss, 0, sizeof(ss));
michael@0 624 if (sa->sa_family == AF_INET) {
michael@0 625 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
michael@0 626 sin->sin_family = AF_INET;
michael@0 627 sin->sin_addr.s_addr = INADDR_ANY;
michael@0 628 } else if (sa->sa_family == AF_INET6) {
michael@0 629 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
michael@0 630 sin6->sin6_family = AF_INET6;
michael@0 631 sin6->sin6_addr = in6addr_any;
michael@0 632 } else {
michael@0 633 /* Well, the user will have to bind() */
michael@0 634 return -1;
michael@0 635 }
michael@0 636 if (bind(fd, (struct sockaddr *)&ss, sizeof(ss)) < 0 &&
michael@0 637 WSAGetLastError() != WSAEINVAL)
michael@0 638 return -1;
michael@0 639
michael@0 640 event_base_add_virtual(bev->ev_base);
michael@0 641 bufferevent_incref(bev);
michael@0 642 rc = ext->ConnectEx(fd, sa, socklen, NULL, 0, NULL,
michael@0 643 &bev_async->connect_overlapped.overlapped);
michael@0 644 if (rc || WSAGetLastError() == ERROR_IO_PENDING)
michael@0 645 return 0;
michael@0 646
michael@0 647 event_base_del_virtual(bev->ev_base);
michael@0 648 bufferevent_decref(bev);
michael@0 649
michael@0 650 return -1;
michael@0 651 }
michael@0 652
michael@0 653 static int
michael@0 654 be_async_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
michael@0 655 union bufferevent_ctrl_data *data)
michael@0 656 {
michael@0 657 switch (op) {
michael@0 658 case BEV_CTRL_GET_FD:
michael@0 659 data->fd = _evbuffer_overlapped_get_fd(bev->input);
michael@0 660 return 0;
michael@0 661 case BEV_CTRL_SET_FD: {
michael@0 662 struct event_iocp_port *iocp;
michael@0 663
michael@0 664 if (data->fd == _evbuffer_overlapped_get_fd(bev->input))
michael@0 665 return 0;
michael@0 666 if (!(iocp = event_base_get_iocp(bev->ev_base)))
michael@0 667 return -1;
michael@0 668 if (event_iocp_port_associate(iocp, data->fd, 1) < 0)
michael@0 669 return -1;
michael@0 670 _evbuffer_overlapped_set_fd(bev->input, data->fd);
michael@0 671 _evbuffer_overlapped_set_fd(bev->output, data->fd);
michael@0 672 return 0;
michael@0 673 }
michael@0 674 case BEV_CTRL_CANCEL_ALL: {
michael@0 675 struct bufferevent_async *bev_a = upcast(bev);
michael@0 676 evutil_socket_t fd = _evbuffer_overlapped_get_fd(bev->input);
michael@0 677 if (fd != (evutil_socket_t)INVALID_SOCKET &&
michael@0 678 (bev_a->bev.options & BEV_OPT_CLOSE_ON_FREE)) {
michael@0 679 closesocket(fd);
michael@0 680 }
michael@0 681 bev_a->ok = 0;
michael@0 682 return 0;
michael@0 683 }
michael@0 684 case BEV_CTRL_GET_UNDERLYING:
michael@0 685 default:
michael@0 686 return -1;
michael@0 687 }
michael@0 688 }
michael@0 689
michael@0 690

mercurial