ipc/chromium/src/third_party/libevent/buffer.c

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /*
michael@0 2 * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
michael@0 3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
michael@0 4 *
michael@0 5 * Redistribution and use in source and binary forms, with or without
michael@0 6 * modification, are permitted provided that the following conditions
michael@0 7 * are met:
michael@0 8 * 1. Redistributions of source code must retain the above copyright
michael@0 9 * notice, this list of conditions and the following disclaimer.
michael@0 10 * 2. Redistributions in binary form must reproduce the above copyright
michael@0 11 * notice, this list of conditions and the following disclaimer in the
michael@0 12 * documentation and/or other materials provided with the distribution.
michael@0 13 * 3. The name of the author may not be used to endorse or promote products
michael@0 14 * derived from this software without specific prior written permission.
michael@0 15 *
michael@0 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
michael@0 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
michael@0 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
michael@0 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
michael@0 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
michael@0 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
michael@0 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
michael@0 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
michael@0 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
michael@0 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
michael@0 26 */
michael@0 27
michael@0 28 #include "event2/event-config.h"
michael@0 29
michael@0 30 #ifdef WIN32
michael@0 31 #include <winsock2.h>
michael@0 32 #include <windows.h>
michael@0 33 #include <io.h>
michael@0 34 #endif
michael@0 35
michael@0 36 #ifdef _EVENT_HAVE_VASPRINTF
michael@0 37 /* If we have vasprintf, we need to define this before we include stdio.h. */
michael@0 38 #define _GNU_SOURCE
michael@0 39 #endif
michael@0 40
michael@0 41 #include <sys/types.h>
michael@0 42
michael@0 43 #ifdef _EVENT_HAVE_SYS_TIME_H
michael@0 44 #include <sys/time.h>
michael@0 45 #endif
michael@0 46
michael@0 47 #ifdef _EVENT_HAVE_SYS_SOCKET_H
michael@0 48 #include <sys/socket.h>
michael@0 49 #endif
michael@0 50
michael@0 51 #ifdef _EVENT_HAVE_SYS_UIO_H
michael@0 52 #include <sys/uio.h>
michael@0 53 #endif
michael@0 54
michael@0 55 #ifdef _EVENT_HAVE_SYS_IOCTL_H
michael@0 56 #include <sys/ioctl.h>
michael@0 57 #endif
michael@0 58
michael@0 59 #ifdef _EVENT_HAVE_SYS_MMAN_H
michael@0 60 #include <sys/mman.h>
michael@0 61 #endif
michael@0 62
michael@0 63 #ifdef _EVENT_HAVE_SYS_SENDFILE_H
michael@0 64 #include <sys/sendfile.h>
michael@0 65 #endif
michael@0 66
michael@0 67 #include <errno.h>
michael@0 68 #include <stdio.h>
michael@0 69 #include <stdlib.h>
michael@0 70 #include <string.h>
michael@0 71 #ifdef _EVENT_HAVE_STDARG_H
michael@0 72 #include <stdarg.h>
michael@0 73 #endif
michael@0 74 #ifdef _EVENT_HAVE_UNISTD_H
michael@0 75 #include <unistd.h>
michael@0 76 #endif
michael@0 77 #include <limits.h>
michael@0 78
michael@0 79 #include "event2/event.h"
michael@0 80 #include "event2/buffer.h"
michael@0 81 #include "event2/buffer_compat.h"
michael@0 82 #include "event2/bufferevent.h"
michael@0 83 #include "event2/bufferevent_compat.h"
michael@0 84 #include "event2/bufferevent_struct.h"
michael@0 85 #include "event2/thread.h"
michael@0 86 #include "event2/event-config.h"
michael@0 87 #include "log-internal.h"
michael@0 88 #include "mm-internal.h"
michael@0 89 #include "util-internal.h"
michael@0 90 #include "evthread-internal.h"
michael@0 91 #include "evbuffer-internal.h"
michael@0 92 #include "bufferevent-internal.h"
michael@0 93
michael@0 94 /* some systems do not have MAP_FAILED */
michael@0 95 #ifndef MAP_FAILED
michael@0 96 #define MAP_FAILED ((void *)-1)
michael@0 97 #endif
michael@0 98
michael@0 99 /* send file support */
michael@0 100 #if defined(_EVENT_HAVE_SYS_SENDFILE_H) && defined(_EVENT_HAVE_SENDFILE) && defined(__linux__)
michael@0 101 #define USE_SENDFILE 1
michael@0 102 #define SENDFILE_IS_LINUX 1
michael@0 103 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__FreeBSD__)
michael@0 104 #define USE_SENDFILE 1
michael@0 105 #define SENDFILE_IS_FREEBSD 1
michael@0 106 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__APPLE__)
michael@0 107 #define USE_SENDFILE 1
michael@0 108 #define SENDFILE_IS_MACOSX 1
michael@0 109 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
michael@0 110 #define USE_SENDFILE 1
michael@0 111 #define SENDFILE_IS_SOLARIS 1
michael@0 112 #endif
michael@0 113
michael@0 114 #ifdef USE_SENDFILE
michael@0 115 static int use_sendfile = 1;
michael@0 116 #endif
michael@0 117 #ifdef _EVENT_HAVE_MMAP
michael@0 118 static int use_mmap = 1;
michael@0 119 #endif
michael@0 120
michael@0 121
michael@0 122 /* Mask of user-selectable callback flags. */
michael@0 123 #define EVBUFFER_CB_USER_FLAGS 0xffff
michael@0 124 /* Mask of all internal-use-only flags. */
michael@0 125 #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000
michael@0 126
michael@0 127 /* Flag set if the callback is using the cb_obsolete function pointer */
michael@0 128 #define EVBUFFER_CB_OBSOLETE 0x00040000
michael@0 129
michael@0 130 /* evbuffer_chain support */
michael@0 131 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
michael@0 132 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
michael@0 133 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
michael@0 134
michael@0 135 #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
michael@0 136 #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
michael@0 137
michael@0 138 static void evbuffer_chain_align(struct evbuffer_chain *chain);
michael@0 139 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain,
michael@0 140 size_t datalen);
michael@0 141 static void evbuffer_deferred_callback(struct deferred_cb *cb, void *arg);
michael@0 142 static int evbuffer_ptr_memcmp(const struct evbuffer *buf,
michael@0 143 const struct evbuffer_ptr *pos, const char *mem, size_t len);
michael@0 144 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf,
michael@0 145 size_t datlen);
michael@0 146
michael@0 147 #ifdef WIN32
michael@0 148 static int evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd,
michael@0 149 ev_ssize_t howmuch);
michael@0 150 #else
michael@0 151 #define evbuffer_readfile evbuffer_read
michael@0 152 #endif
michael@0 153
michael@0 154 static struct evbuffer_chain *
michael@0 155 evbuffer_chain_new(size_t size)
michael@0 156 {
michael@0 157 struct evbuffer_chain *chain;
michael@0 158 size_t to_alloc;
michael@0 159
michael@0 160 size += EVBUFFER_CHAIN_SIZE;
michael@0 161
michael@0 162 /* get the next largest memory that can hold the buffer */
michael@0 163 to_alloc = MIN_BUFFER_SIZE;
michael@0 164 while (to_alloc < size)
michael@0 165 to_alloc <<= 1;
michael@0 166
michael@0 167 /* we get everything in one chunk */
michael@0 168 if ((chain = mm_malloc(to_alloc)) == NULL)
michael@0 169 return (NULL);
michael@0 170
michael@0 171 memset(chain, 0, EVBUFFER_CHAIN_SIZE);
michael@0 172
michael@0 173 chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;
michael@0 174
michael@0 175 /* this way we can manipulate the buffer to different addresses,
michael@0 176 * which is required for mmap for example.
michael@0 177 */
michael@0 178 chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain);
michael@0 179
michael@0 180 return (chain);
michael@0 181 }
michael@0 182
michael@0 183 static inline void
michael@0 184 evbuffer_chain_free(struct evbuffer_chain *chain)
michael@0 185 {
michael@0 186 if (CHAIN_PINNED(chain)) {
michael@0 187 chain->flags |= EVBUFFER_DANGLING;
michael@0 188 return;
michael@0 189 }
michael@0 190 if (chain->flags & (EVBUFFER_MMAP|EVBUFFER_SENDFILE|
michael@0 191 EVBUFFER_REFERENCE)) {
michael@0 192 if (chain->flags & EVBUFFER_REFERENCE) {
michael@0 193 struct evbuffer_chain_reference *info =
michael@0 194 EVBUFFER_CHAIN_EXTRA(
michael@0 195 struct evbuffer_chain_reference,
michael@0 196 chain);
michael@0 197 if (info->cleanupfn)
michael@0 198 (*info->cleanupfn)(chain->buffer,
michael@0 199 chain->buffer_len,
michael@0 200 info->extra);
michael@0 201 }
michael@0 202 #ifdef _EVENT_HAVE_MMAP
michael@0 203 if (chain->flags & EVBUFFER_MMAP) {
michael@0 204 struct evbuffer_chain_fd *info =
michael@0 205 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
michael@0 206 chain);
michael@0 207 if (munmap(chain->buffer, chain->buffer_len) == -1)
michael@0 208 event_warn("%s: munmap failed", __func__);
michael@0 209 if (close(info->fd) == -1)
michael@0 210 event_warn("%s: close(%d) failed",
michael@0 211 __func__, info->fd);
michael@0 212 }
michael@0 213 #endif
michael@0 214 #ifdef USE_SENDFILE
michael@0 215 if (chain->flags & EVBUFFER_SENDFILE) {
michael@0 216 struct evbuffer_chain_fd *info =
michael@0 217 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
michael@0 218 chain);
michael@0 219 if (close(info->fd) == -1)
michael@0 220 event_warn("%s: close(%d) failed",
michael@0 221 __func__, info->fd);
michael@0 222 }
michael@0 223 #endif
michael@0 224 }
michael@0 225
michael@0 226 mm_free(chain);
michael@0 227 }
michael@0 228
michael@0 229 static void
michael@0 230 evbuffer_free_all_chains(struct evbuffer_chain *chain)
michael@0 231 {
michael@0 232 struct evbuffer_chain *next;
michael@0 233 for (; chain; chain = next) {
michael@0 234 next = chain->next;
michael@0 235 evbuffer_chain_free(chain);
michael@0 236 }
michael@0 237 }
michael@0 238
michael@0 239 #ifndef NDEBUG
michael@0 240 static int
michael@0 241 evbuffer_chains_all_empty(struct evbuffer_chain *chain)
michael@0 242 {
michael@0 243 for (; chain; chain = chain->next) {
michael@0 244 if (chain->off)
michael@0 245 return 0;
michael@0 246 }
michael@0 247 return 1;
michael@0 248 }
michael@0 249 #else
michael@0 250 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
michael@0 251 "unused variable" warnings. */
michael@0 252 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) {
michael@0 253 return 1;
michael@0 254 }
michael@0 255 #endif
michael@0 256
michael@0 257 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
michael@0 258 * to replacing them all with a new chain. Return a pointer to the place
michael@0 259 * where the new chain will go.
michael@0 260 *
michael@0 261 * Internal; requires lock. The caller must fix up buf->last and buf->first
michael@0 262 * as needed; they might have been freed.
michael@0 263 */
michael@0 264 static struct evbuffer_chain **
michael@0 265 evbuffer_free_trailing_empty_chains(struct evbuffer *buf)
michael@0 266 {
michael@0 267 struct evbuffer_chain **ch = buf->last_with_datap;
michael@0 268 /* Find the first victim chain. It might be *last_with_datap */
michael@0 269 while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
michael@0 270 ch = &(*ch)->next;
michael@0 271 if (*ch) {
michael@0 272 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
michael@0 273 evbuffer_free_all_chains(*ch);
michael@0 274 *ch = NULL;
michael@0 275 }
michael@0 276 return ch;
michael@0 277 }
michael@0 278
michael@0 279 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
michael@0 280 * chains as necessary. Requires lock. Does not schedule callbacks.
michael@0 281 */
michael@0 282 static void
michael@0 283 evbuffer_chain_insert(struct evbuffer *buf,
michael@0 284 struct evbuffer_chain *chain)
michael@0 285 {
michael@0 286 ASSERT_EVBUFFER_LOCKED(buf);
michael@0 287 if (*buf->last_with_datap == NULL) {
michael@0 288 /* There are no chains data on the buffer at all. */
michael@0 289 EVUTIL_ASSERT(buf->last_with_datap == &buf->first);
michael@0 290 EVUTIL_ASSERT(buf->first == NULL);
michael@0 291 buf->first = buf->last = chain;
michael@0 292 } else {
michael@0 293 struct evbuffer_chain **ch = buf->last_with_datap;
michael@0 294 /* Find the first victim chain. It might be *last_with_datap */
michael@0 295 while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
michael@0 296 ch = &(*ch)->next;
michael@0 297 if (*ch == NULL) {
michael@0 298 /* There is no victim; just append this new chain. */
michael@0 299 buf->last->next = chain;
michael@0 300 if (chain->off)
michael@0 301 buf->last_with_datap = &buf->last->next;
michael@0 302 } else {
michael@0 303 /* Replace all victim chains with this chain. */
michael@0 304 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
michael@0 305 evbuffer_free_all_chains(*ch);
michael@0 306 *ch = chain;
michael@0 307 }
michael@0 308 buf->last = chain;
michael@0 309 }
michael@0 310 buf->total_len += chain->off;
michael@0 311 }
michael@0 312
michael@0 313 static inline struct evbuffer_chain *
michael@0 314 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
michael@0 315 {
michael@0 316 struct evbuffer_chain *chain;
michael@0 317 if ((chain = evbuffer_chain_new(datlen)) == NULL)
michael@0 318 return NULL;
michael@0 319 evbuffer_chain_insert(buf, chain);
michael@0 320 return chain;
michael@0 321 }
michael@0 322
michael@0 323 void
michael@0 324 _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag)
michael@0 325 {
michael@0 326 EVUTIL_ASSERT((chain->flags & flag) == 0);
michael@0 327 chain->flags |= flag;
michael@0 328 }
michael@0 329
michael@0 330 void
michael@0 331 _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag)
michael@0 332 {
michael@0 333 EVUTIL_ASSERT((chain->flags & flag) != 0);
michael@0 334 chain->flags &= ~flag;
michael@0 335 if (chain->flags & EVBUFFER_DANGLING)
michael@0 336 evbuffer_chain_free(chain);
michael@0 337 }
michael@0 338
michael@0 339 struct evbuffer *
michael@0 340 evbuffer_new(void)
michael@0 341 {
michael@0 342 struct evbuffer *buffer;
michael@0 343
michael@0 344 buffer = mm_calloc(1, sizeof(struct evbuffer));
michael@0 345 if (buffer == NULL)
michael@0 346 return (NULL);
michael@0 347
michael@0 348 TAILQ_INIT(&buffer->callbacks);
michael@0 349 buffer->refcnt = 1;
michael@0 350 buffer->last_with_datap = &buffer->first;
michael@0 351
michael@0 352 return (buffer);
michael@0 353 }
michael@0 354
michael@0 355 int
michael@0 356 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags)
michael@0 357 {
michael@0 358 EVBUFFER_LOCK(buf);
michael@0 359 buf->flags |= (ev_uint32_t)flags;
michael@0 360 EVBUFFER_UNLOCK(buf);
michael@0 361 return 0;
michael@0 362 }
michael@0 363
michael@0 364 int
michael@0 365 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
michael@0 366 {
michael@0 367 EVBUFFER_LOCK(buf);
michael@0 368 buf->flags &= ~(ev_uint32_t)flags;
michael@0 369 EVBUFFER_UNLOCK(buf);
michael@0 370 return 0;
michael@0 371 }
michael@0 372
michael@0 373 void
michael@0 374 _evbuffer_incref(struct evbuffer *buf)
michael@0 375 {
michael@0 376 EVBUFFER_LOCK(buf);
michael@0 377 ++buf->refcnt;
michael@0 378 EVBUFFER_UNLOCK(buf);
michael@0 379 }
michael@0 380
michael@0 381 void
michael@0 382 _evbuffer_incref_and_lock(struct evbuffer *buf)
michael@0 383 {
michael@0 384 EVBUFFER_LOCK(buf);
michael@0 385 ++buf->refcnt;
michael@0 386 }
michael@0 387
michael@0 388 int
michael@0 389 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)
michael@0 390 {
michael@0 391 EVBUFFER_LOCK(buffer);
michael@0 392 buffer->cb_queue = event_base_get_deferred_cb_queue(base);
michael@0 393 buffer->deferred_cbs = 1;
michael@0 394 event_deferred_cb_init(&buffer->deferred,
michael@0 395 evbuffer_deferred_callback, buffer);
michael@0 396 EVBUFFER_UNLOCK(buffer);
michael@0 397 return 0;
michael@0 398 }
michael@0 399
michael@0 400 int
michael@0 401 evbuffer_enable_locking(struct evbuffer *buf, void *lock)
michael@0 402 {
michael@0 403 #ifdef _EVENT_DISABLE_THREAD_SUPPORT
michael@0 404 return -1;
michael@0 405 #else
michael@0 406 if (buf->lock)
michael@0 407 return -1;
michael@0 408
michael@0 409 if (!lock) {
michael@0 410 EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
michael@0 411 if (!lock)
michael@0 412 return -1;
michael@0 413 buf->lock = lock;
michael@0 414 buf->own_lock = 1;
michael@0 415 } else {
michael@0 416 buf->lock = lock;
michael@0 417 buf->own_lock = 0;
michael@0 418 }
michael@0 419
michael@0 420 return 0;
michael@0 421 #endif
michael@0 422 }
michael@0 423
michael@0 424 void
michael@0 425 evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev)
michael@0 426 {
michael@0 427 EVBUFFER_LOCK(buf);
michael@0 428 buf->parent = bev;
michael@0 429 EVBUFFER_UNLOCK(buf);
michael@0 430 }
michael@0 431
michael@0 432 static void
michael@0 433 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
michael@0 434 {
michael@0 435 struct evbuffer_cb_entry *cbent, *next;
michael@0 436 struct evbuffer_cb_info info;
michael@0 437 size_t new_size;
michael@0 438 ev_uint32_t mask, masked_val;
michael@0 439 int clear = 1;
michael@0 440
michael@0 441 if (running_deferred) {
michael@0 442 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
michael@0 443 masked_val = EVBUFFER_CB_ENABLED;
michael@0 444 } else if (buffer->deferred_cbs) {
michael@0 445 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
michael@0 446 masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
michael@0 447 /* Don't zero-out n_add/n_del, since the deferred callbacks
michael@0 448 will want to see them. */
michael@0 449 clear = 0;
michael@0 450 } else {
michael@0 451 mask = EVBUFFER_CB_ENABLED;
michael@0 452 masked_val = EVBUFFER_CB_ENABLED;
michael@0 453 }
michael@0 454
michael@0 455 ASSERT_EVBUFFER_LOCKED(buffer);
michael@0 456
michael@0 457 if (TAILQ_EMPTY(&buffer->callbacks)) {
michael@0 458 buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
michael@0 459 return;
michael@0 460 }
michael@0 461 if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
michael@0 462 return;
michael@0 463
michael@0 464 new_size = buffer->total_len;
michael@0 465 info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
michael@0 466 info.n_added = buffer->n_add_for_cb;
michael@0 467 info.n_deleted = buffer->n_del_for_cb;
michael@0 468 if (clear) {
michael@0 469 buffer->n_add_for_cb = 0;
michael@0 470 buffer->n_del_for_cb = 0;
michael@0 471 }
michael@0 472 for (cbent = TAILQ_FIRST(&buffer->callbacks);
michael@0 473 cbent != NULL;
michael@0 474 cbent = next) {
michael@0 475 /* Get the 'next' pointer now in case this callback decides
michael@0 476 * to remove itself or something. */
michael@0 477 next = TAILQ_NEXT(cbent, next);
michael@0 478
michael@0 479 if ((cbent->flags & mask) != masked_val)
michael@0 480 continue;
michael@0 481
michael@0 482 if ((cbent->flags & EVBUFFER_CB_OBSOLETE))
michael@0 483 cbent->cb.cb_obsolete(buffer,
michael@0 484 info.orig_size, new_size, cbent->cbarg);
michael@0 485 else
michael@0 486 cbent->cb.cb_func(buffer, &info, cbent->cbarg);
michael@0 487 }
michael@0 488 }
michael@0 489
michael@0 490 void
michael@0 491 evbuffer_invoke_callbacks(struct evbuffer *buffer)
michael@0 492 {
michael@0 493 if (TAILQ_EMPTY(&buffer->callbacks)) {
michael@0 494 buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
michael@0 495 return;
michael@0 496 }
michael@0 497
michael@0 498 if (buffer->deferred_cbs) {
michael@0 499 if (buffer->deferred.queued)
michael@0 500 return;
michael@0 501 _evbuffer_incref_and_lock(buffer);
michael@0 502 if (buffer->parent)
michael@0 503 bufferevent_incref(buffer->parent);
michael@0 504 EVBUFFER_UNLOCK(buffer);
michael@0 505 event_deferred_cb_schedule(buffer->cb_queue, &buffer->deferred);
michael@0 506 }
michael@0 507
michael@0 508 evbuffer_run_callbacks(buffer, 0);
michael@0 509 }
michael@0 510
michael@0 511 static void
michael@0 512 evbuffer_deferred_callback(struct deferred_cb *cb, void *arg)
michael@0 513 {
michael@0 514 struct bufferevent *parent = NULL;
michael@0 515 struct evbuffer *buffer = arg;
michael@0 516
michael@0 517 /* XXXX It would be better to run these callbacks without holding the
michael@0 518 * lock */
michael@0 519 EVBUFFER_LOCK(buffer);
michael@0 520 parent = buffer->parent;
michael@0 521 evbuffer_run_callbacks(buffer, 1);
michael@0 522 _evbuffer_decref_and_unlock(buffer);
michael@0 523 if (parent)
michael@0 524 bufferevent_decref(parent);
michael@0 525 }
michael@0 526
michael@0 527 static void
michael@0 528 evbuffer_remove_all_callbacks(struct evbuffer *buffer)
michael@0 529 {
michael@0 530 struct evbuffer_cb_entry *cbent;
michael@0 531
michael@0 532 while ((cbent = TAILQ_FIRST(&buffer->callbacks))) {
michael@0 533 TAILQ_REMOVE(&buffer->callbacks, cbent, next);
michael@0 534 mm_free(cbent);
michael@0 535 }
michael@0 536 }
michael@0 537
michael@0 538 void
michael@0 539 _evbuffer_decref_and_unlock(struct evbuffer *buffer)
michael@0 540 {
michael@0 541 struct evbuffer_chain *chain, *next;
michael@0 542 ASSERT_EVBUFFER_LOCKED(buffer);
michael@0 543
michael@0 544 EVUTIL_ASSERT(buffer->refcnt > 0);
michael@0 545
michael@0 546 if (--buffer->refcnt > 0) {
michael@0 547 EVBUFFER_UNLOCK(buffer);
michael@0 548 return;
michael@0 549 }
michael@0 550
michael@0 551 for (chain = buffer->first; chain != NULL; chain = next) {
michael@0 552 next = chain->next;
michael@0 553 evbuffer_chain_free(chain);
michael@0 554 }
michael@0 555 evbuffer_remove_all_callbacks(buffer);
michael@0 556 if (buffer->deferred_cbs)
michael@0 557 event_deferred_cb_cancel(buffer->cb_queue, &buffer->deferred);
michael@0 558
michael@0 559 EVBUFFER_UNLOCK(buffer);
michael@0 560 if (buffer->own_lock)
michael@0 561 EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
michael@0 562 mm_free(buffer);
michael@0 563 }
michael@0 564
michael@0 565 void
michael@0 566 evbuffer_free(struct evbuffer *buffer)
michael@0 567 {
michael@0 568 EVBUFFER_LOCK(buffer);
michael@0 569 _evbuffer_decref_and_unlock(buffer);
michael@0 570 }
michael@0 571
michael@0 572 void
michael@0 573 evbuffer_lock(struct evbuffer *buf)
michael@0 574 {
michael@0 575 EVBUFFER_LOCK(buf);
michael@0 576 }
michael@0 577
michael@0 578 void
michael@0 579 evbuffer_unlock(struct evbuffer *buf)
michael@0 580 {
michael@0 581 EVBUFFER_UNLOCK(buf);
michael@0 582 }
michael@0 583
michael@0 584 size_t
michael@0 585 evbuffer_get_length(const struct evbuffer *buffer)
michael@0 586 {
michael@0 587 size_t result;
michael@0 588
michael@0 589 EVBUFFER_LOCK(buffer);
michael@0 590
michael@0 591 result = (buffer->total_len);
michael@0 592
michael@0 593 EVBUFFER_UNLOCK(buffer);
michael@0 594
michael@0 595 return result;
michael@0 596 }
michael@0 597
michael@0 598 size_t
michael@0 599 evbuffer_get_contiguous_space(const struct evbuffer *buf)
michael@0 600 {
michael@0 601 struct evbuffer_chain *chain;
michael@0 602 size_t result;
michael@0 603
michael@0 604 EVBUFFER_LOCK(buf);
michael@0 605 chain = buf->first;
michael@0 606 result = (chain != NULL ? chain->off : 0);
michael@0 607 EVBUFFER_UNLOCK(buf);
michael@0 608
michael@0 609 return result;
michael@0 610 }
michael@0 611
michael@0 612 int
michael@0 613 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
michael@0 614 struct evbuffer_iovec *vec, int n_vecs)
michael@0 615 {
michael@0 616 struct evbuffer_chain *chain, **chainp;
michael@0 617 int n = -1;
michael@0 618
michael@0 619 EVBUFFER_LOCK(buf);
michael@0 620 if (buf->freeze_end)
michael@0 621 goto done;
michael@0 622 if (n_vecs < 1)
michael@0 623 goto done;
michael@0 624 if (n_vecs == 1) {
michael@0 625 if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL)
michael@0 626 goto done;
michael@0 627
michael@0 628 vec[0].iov_base = CHAIN_SPACE_PTR(chain);
michael@0 629 vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain);
michael@0 630 EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
michael@0 631 n = 1;
michael@0 632 } else {
michael@0 633 if (_evbuffer_expand_fast(buf, size, n_vecs)<0)
michael@0 634 goto done;
michael@0 635 n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs,
michael@0 636 &chainp, 0);
michael@0 637 }
michael@0 638
michael@0 639 done:
michael@0 640 EVBUFFER_UNLOCK(buf);
michael@0 641 return n;
michael@0 642
michael@0 643 }
michael@0 644
michael@0 645 static int
michael@0 646 advance_last_with_data(struct evbuffer *buf)
michael@0 647 {
michael@0 648 int n = 0;
michael@0 649 ASSERT_EVBUFFER_LOCKED(buf);
michael@0 650
michael@0 651 if (!*buf->last_with_datap)
michael@0 652 return 0;
michael@0 653
michael@0 654 while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) {
michael@0 655 buf->last_with_datap = &(*buf->last_with_datap)->next;
michael@0 656 ++n;
michael@0 657 }
michael@0 658 return n;
michael@0 659 }
michael@0 660
michael@0 661 int
michael@0 662 evbuffer_commit_space(struct evbuffer *buf,
michael@0 663 struct evbuffer_iovec *vec, int n_vecs)
michael@0 664 {
michael@0 665 struct evbuffer_chain *chain, **firstchainp, **chainp;
michael@0 666 int result = -1;
michael@0 667 size_t added = 0;
michael@0 668 int i;
michael@0 669
michael@0 670 EVBUFFER_LOCK(buf);
michael@0 671
michael@0 672 if (buf->freeze_end)
michael@0 673 goto done;
michael@0 674 if (n_vecs == 0) {
michael@0 675 result = 0;
michael@0 676 goto done;
michael@0 677 } else if (n_vecs == 1 &&
michael@0 678 (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) {
michael@0 679 /* The user only got or used one chain; it might not
michael@0 680 * be the first one with space in it. */
michael@0 681 if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last))
michael@0 682 goto done;
michael@0 683 buf->last->off += vec[0].iov_len;
michael@0 684 added = vec[0].iov_len;
michael@0 685 if (added)
michael@0 686 advance_last_with_data(buf);
michael@0 687 goto okay;
michael@0 688 }
michael@0 689
michael@0 690 /* Advance 'firstchain' to the first chain with space in it. */
michael@0 691 firstchainp = buf->last_with_datap;
michael@0 692 if (!*firstchainp)
michael@0 693 goto done;
michael@0 694 if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
michael@0 695 firstchainp = &(*firstchainp)->next;
michael@0 696 }
michael@0 697
michael@0 698 chain = *firstchainp;
michael@0 699 /* pass 1: make sure that the pointers and lengths of vecs[] are in
michael@0 700 * bounds before we try to commit anything. */
michael@0 701 for (i=0; i<n_vecs; ++i) {
michael@0 702 if (!chain)
michael@0 703 goto done;
michael@0 704 if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) ||
michael@0 705 (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain))
michael@0 706 goto done;
michael@0 707 chain = chain->next;
michael@0 708 }
michael@0 709 /* pass 2: actually adjust all the chains. */
michael@0 710 chainp = firstchainp;
michael@0 711 for (i=0; i<n_vecs; ++i) {
michael@0 712 (*chainp)->off += vec[i].iov_len;
michael@0 713 added += vec[i].iov_len;
michael@0 714 if (vec[i].iov_len) {
michael@0 715 buf->last_with_datap = chainp;
michael@0 716 }
michael@0 717 chainp = &(*chainp)->next;
michael@0 718 }
michael@0 719
michael@0 720 okay:
michael@0 721 buf->total_len += added;
michael@0 722 buf->n_add_for_cb += added;
michael@0 723 result = 0;
michael@0 724 evbuffer_invoke_callbacks(buf);
michael@0 725
michael@0 726 done:
michael@0 727 EVBUFFER_UNLOCK(buf);
michael@0 728 return result;
michael@0 729 }
michael@0 730
michael@0 731 static inline int
michael@0 732 HAS_PINNED_R(struct evbuffer *buf)
michael@0 733 {
michael@0 734 return (buf->last && CHAIN_PINNED_R(buf->last));
michael@0 735 }
michael@0 736
michael@0 737 static inline void
michael@0 738 ZERO_CHAIN(struct evbuffer *dst)
michael@0 739 {
michael@0 740 ASSERT_EVBUFFER_LOCKED(dst);
michael@0 741 dst->first = NULL;
michael@0 742 dst->last = NULL;
michael@0 743 dst->last_with_datap = &(dst)->first;
michael@0 744 dst->total_len = 0;
michael@0 745 }
michael@0 746
michael@0 747 /* Prepares the contents of src to be moved to another buffer by removing
michael@0 748 * read-pinned chains. The first pinned chain is saved in first, and the
michael@0 749 * last in last. If src has no read-pinned chains, first and last are set
michael@0 750 * to NULL. */
michael@0 751 static int
michael@0 752 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first,
michael@0 753 struct evbuffer_chain **last)
michael@0 754 {
michael@0 755 struct evbuffer_chain *chain, **pinned;
michael@0 756
michael@0 757 ASSERT_EVBUFFER_LOCKED(src);
michael@0 758
michael@0 759 if (!HAS_PINNED_R(src)) {
michael@0 760 *first = *last = NULL;
michael@0 761 return 0;
michael@0 762 }
michael@0 763
michael@0 764 pinned = src->last_with_datap;
michael@0 765 if (!CHAIN_PINNED_R(*pinned))
michael@0 766 pinned = &(*pinned)->next;
michael@0 767 EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned));
michael@0 768 chain = *first = *pinned;
michael@0 769 *last = src->last;
michael@0 770
michael@0 771 /* If there's data in the first pinned chain, we need to allocate
michael@0 772 * a new chain and copy the data over. */
michael@0 773 if (chain->off) {
michael@0 774 struct evbuffer_chain *tmp;
michael@0 775
michael@0 776 EVUTIL_ASSERT(pinned == src->last_with_datap);
michael@0 777 tmp = evbuffer_chain_new(chain->off);
michael@0 778 if (!tmp)
michael@0 779 return -1;
michael@0 780 memcpy(tmp->buffer, chain->buffer + chain->misalign,
michael@0 781 chain->off);
michael@0 782 tmp->off = chain->off;
michael@0 783 *src->last_with_datap = tmp;
michael@0 784 src->last = tmp;
michael@0 785 chain->misalign += chain->off;
michael@0 786 chain->off = 0;
michael@0 787 } else {
michael@0 788 src->last = *src->last_with_datap;
michael@0 789 *pinned = NULL;
michael@0 790 }
michael@0 791
michael@0 792 return 0;
michael@0 793 }
michael@0 794
michael@0 795 static inline void
michael@0 796 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned,
michael@0 797 struct evbuffer_chain *last)
michael@0 798 {
michael@0 799 ASSERT_EVBUFFER_LOCKED(src);
michael@0 800
michael@0 801 if (!pinned) {
michael@0 802 ZERO_CHAIN(src);
michael@0 803 return;
michael@0 804 }
michael@0 805
michael@0 806 src->first = pinned;
michael@0 807 src->last = last;
michael@0 808 src->last_with_datap = &src->first;
michael@0 809 src->total_len = 0;
michael@0 810 }
michael@0 811
michael@0 812 static inline void
michael@0 813 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src)
michael@0 814 {
michael@0 815 ASSERT_EVBUFFER_LOCKED(dst);
michael@0 816 ASSERT_EVBUFFER_LOCKED(src);
michael@0 817 dst->first = src->first;
michael@0 818 if (src->last_with_datap == &src->first)
michael@0 819 dst->last_with_datap = &dst->first;
michael@0 820 else
michael@0 821 dst->last_with_datap = src->last_with_datap;
michael@0 822 dst->last = src->last;
michael@0 823 dst->total_len = src->total_len;
michael@0 824 }
michael@0 825
michael@0 826 static void
michael@0 827 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
michael@0 828 {
michael@0 829 ASSERT_EVBUFFER_LOCKED(dst);
michael@0 830 ASSERT_EVBUFFER_LOCKED(src);
michael@0 831 dst->last->next = src->first;
michael@0 832 if (src->last_with_datap == &src->first)
michael@0 833 dst->last_with_datap = &dst->last->next;
michael@0 834 else
michael@0 835 dst->last_with_datap = src->last_with_datap;
michael@0 836 dst->last = src->last;
michael@0 837 dst->total_len += src->total_len;
michael@0 838 }
michael@0 839
michael@0 840 static void
michael@0 841 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
michael@0 842 {
michael@0 843 ASSERT_EVBUFFER_LOCKED(dst);
michael@0 844 ASSERT_EVBUFFER_LOCKED(src);
michael@0 845 src->last->next = dst->first;
michael@0 846 dst->first = src->first;
michael@0 847 dst->total_len += src->total_len;
michael@0 848 if (*dst->last_with_datap == NULL) {
michael@0 849 if (src->last_with_datap == &(src)->first)
michael@0 850 dst->last_with_datap = &dst->first;
michael@0 851 else
michael@0 852 dst->last_with_datap = src->last_with_datap;
michael@0 853 } else if (dst->last_with_datap == &dst->first) {
michael@0 854 dst->last_with_datap = &src->last->next;
michael@0 855 }
michael@0 856 }
michael@0 857
michael@0 858 int
michael@0 859 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
michael@0 860 {
michael@0 861 struct evbuffer_chain *pinned, *last;
michael@0 862 size_t in_total_len, out_total_len;
michael@0 863 int result = 0;
michael@0 864
michael@0 865 EVBUFFER_LOCK2(inbuf, outbuf);
michael@0 866 in_total_len = inbuf->total_len;
michael@0 867 out_total_len = outbuf->total_len;
michael@0 868
michael@0 869 if (in_total_len == 0 || outbuf == inbuf)
michael@0 870 goto done;
michael@0 871
michael@0 872 if (outbuf->freeze_end || inbuf->freeze_start) {
michael@0 873 result = -1;
michael@0 874 goto done;
michael@0 875 }
michael@0 876
michael@0 877 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
michael@0 878 result = -1;
michael@0 879 goto done;
michael@0 880 }
michael@0 881
michael@0 882 if (out_total_len == 0) {
michael@0 883 /* There might be an empty chain at the start of outbuf; free
michael@0 884 * it. */
michael@0 885 evbuffer_free_all_chains(outbuf->first);
michael@0 886 COPY_CHAIN(outbuf, inbuf);
michael@0 887 } else {
michael@0 888 APPEND_CHAIN(outbuf, inbuf);
michael@0 889 }
michael@0 890
michael@0 891 RESTORE_PINNED(inbuf, pinned, last);
michael@0 892
michael@0 893 inbuf->n_del_for_cb += in_total_len;
michael@0 894 outbuf->n_add_for_cb += in_total_len;
michael@0 895
michael@0 896 evbuffer_invoke_callbacks(inbuf);
michael@0 897 evbuffer_invoke_callbacks(outbuf);
michael@0 898
michael@0 899 done:
michael@0 900 EVBUFFER_UNLOCK2(inbuf, outbuf);
michael@0 901 return result;
michael@0 902 }
michael@0 903
michael@0 904 int
michael@0 905 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
michael@0 906 {
michael@0 907 struct evbuffer_chain *pinned, *last;
michael@0 908 size_t in_total_len, out_total_len;
michael@0 909 int result = 0;
michael@0 910
michael@0 911 EVBUFFER_LOCK2(inbuf, outbuf);
michael@0 912
michael@0 913 in_total_len = inbuf->total_len;
michael@0 914 out_total_len = outbuf->total_len;
michael@0 915
michael@0 916 if (!in_total_len || inbuf == outbuf)
michael@0 917 goto done;
michael@0 918
michael@0 919 if (outbuf->freeze_start || inbuf->freeze_start) {
michael@0 920 result = -1;
michael@0 921 goto done;
michael@0 922 }
michael@0 923
michael@0 924 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
michael@0 925 result = -1;
michael@0 926 goto done;
michael@0 927 }
michael@0 928
michael@0 929 if (out_total_len == 0) {
michael@0 930 /* There might be an empty chain at the start of outbuf; free
michael@0 931 * it. */
michael@0 932 evbuffer_free_all_chains(outbuf->first);
michael@0 933 COPY_CHAIN(outbuf, inbuf);
michael@0 934 } else {
michael@0 935 PREPEND_CHAIN(outbuf, inbuf);
michael@0 936 }
michael@0 937
michael@0 938 RESTORE_PINNED(inbuf, pinned, last);
michael@0 939
michael@0 940 inbuf->n_del_for_cb += in_total_len;
michael@0 941 outbuf->n_add_for_cb += in_total_len;
michael@0 942
michael@0 943 evbuffer_invoke_callbacks(inbuf);
michael@0 944 evbuffer_invoke_callbacks(outbuf);
michael@0 945 done:
michael@0 946 EVBUFFER_UNLOCK2(inbuf, outbuf);
michael@0 947 return result;
michael@0 948 }
michael@0 949
michael@0 950 int
michael@0 951 evbuffer_drain(struct evbuffer *buf, size_t len)
michael@0 952 {
michael@0 953 struct evbuffer_chain *chain, *next;
michael@0 954 size_t remaining, old_len;
michael@0 955 int result = 0;
michael@0 956
michael@0 957 EVBUFFER_LOCK(buf);
michael@0 958 old_len = buf->total_len;
michael@0 959
michael@0 960 if (old_len == 0)
michael@0 961 goto done;
michael@0 962
michael@0 963 if (buf->freeze_start) {
michael@0 964 result = -1;
michael@0 965 goto done;
michael@0 966 }
michael@0 967
michael@0 968 if (len >= old_len && !HAS_PINNED_R(buf)) {
michael@0 969 len = old_len;
michael@0 970 for (chain = buf->first; chain != NULL; chain = next) {
michael@0 971 next = chain->next;
michael@0 972 evbuffer_chain_free(chain);
michael@0 973 }
michael@0 974
michael@0 975 ZERO_CHAIN(buf);
michael@0 976 } else {
michael@0 977 if (len >= old_len)
michael@0 978 len = old_len;
michael@0 979
michael@0 980 buf->total_len -= len;
michael@0 981 remaining = len;
michael@0 982 for (chain = buf->first;
michael@0 983 remaining >= chain->off;
michael@0 984 chain = next) {
michael@0 985 next = chain->next;
michael@0 986 remaining -= chain->off;
michael@0 987
michael@0 988 if (chain == *buf->last_with_datap) {
michael@0 989 buf->last_with_datap = &buf->first;
michael@0 990 }
michael@0 991 if (&chain->next == buf->last_with_datap)
michael@0 992 buf->last_with_datap = &buf->first;
michael@0 993
michael@0 994 if (CHAIN_PINNED_R(chain)) {
michael@0 995 EVUTIL_ASSERT(remaining == 0);
michael@0 996 chain->misalign += chain->off;
michael@0 997 chain->off = 0;
michael@0 998 break;
michael@0 999 } else
michael@0 1000 evbuffer_chain_free(chain);
michael@0 1001 }
michael@0 1002
michael@0 1003 buf->first = chain;
michael@0 1004 if (chain) {
michael@0 1005 chain->misalign += remaining;
michael@0 1006 chain->off -= remaining;
michael@0 1007 }
michael@0 1008 }
michael@0 1009
michael@0 1010 buf->n_del_for_cb += len;
michael@0 1011 /* Tell someone about changes in this buffer */
michael@0 1012 evbuffer_invoke_callbacks(buf);
michael@0 1013
michael@0 1014 done:
michael@0 1015 EVBUFFER_UNLOCK(buf);
michael@0 1016 return result;
michael@0 1017 }
michael@0 1018
michael@0 1019 /* Reads data from an event buffer and drains the bytes read */
michael@0 1020 int
michael@0 1021 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
michael@0 1022 {
michael@0 1023 ev_ssize_t n;
michael@0 1024 EVBUFFER_LOCK(buf);
michael@0 1025 n = evbuffer_copyout(buf, data_out, datlen);
michael@0 1026 if (n > 0) {
michael@0 1027 if (evbuffer_drain(buf, n)<0)
michael@0 1028 n = -1;
michael@0 1029 }
michael@0 1030 EVBUFFER_UNLOCK(buf);
michael@0 1031 return (int)n;
michael@0 1032 }
michael@0 1033
michael@0 1034 ev_ssize_t
michael@0 1035 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen)
michael@0 1036 {
michael@0 1037 /*XXX fails badly on sendfile case. */
michael@0 1038 struct evbuffer_chain *chain;
michael@0 1039 char *data = data_out;
michael@0 1040 size_t nread;
michael@0 1041 ev_ssize_t result = 0;
michael@0 1042
michael@0 1043 EVBUFFER_LOCK(buf);
michael@0 1044
michael@0 1045 chain = buf->first;
michael@0 1046
michael@0 1047 if (datlen >= buf->total_len)
michael@0 1048 datlen = buf->total_len;
michael@0 1049
michael@0 1050 if (datlen == 0)
michael@0 1051 goto done;
michael@0 1052
michael@0 1053 if (buf->freeze_start) {
michael@0 1054 result = -1;
michael@0 1055 goto done;
michael@0 1056 }
michael@0 1057
michael@0 1058 nread = datlen;
michael@0 1059
michael@0 1060 while (datlen && datlen >= chain->off) {
michael@0 1061 memcpy(data, chain->buffer + chain->misalign, chain->off);
michael@0 1062 data += chain->off;
michael@0 1063 datlen -= chain->off;
michael@0 1064
michael@0 1065 chain = chain->next;
michael@0 1066 EVUTIL_ASSERT(chain || datlen==0);
michael@0 1067 }
michael@0 1068
michael@0 1069 if (datlen) {
michael@0 1070 EVUTIL_ASSERT(chain);
michael@0 1071 memcpy(data, chain->buffer + chain->misalign, datlen);
michael@0 1072 }
michael@0 1073
michael@0 1074 result = nread;
michael@0 1075 done:
michael@0 1076 EVBUFFER_UNLOCK(buf);
michael@0 1077 return result;
michael@0 1078 }
michael@0 1079
michael@0 1080 /* reads data from the src buffer to the dst buffer, avoids memcpy as
michael@0 1081 * possible. */
michael@0 1082 /* XXXX should return ev_ssize_t */
michael@0 1083 int
michael@0 1084 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
michael@0 1085 size_t datlen)
michael@0 1086 {
michael@0 1087 /*XXX We should have an option to force this to be zero-copy.*/
michael@0 1088
michael@0 1089 /*XXX can fail badly on sendfile case. */
michael@0 1090 struct evbuffer_chain *chain, *previous;
michael@0 1091 size_t nread = 0;
michael@0 1092 int result;
michael@0 1093
michael@0 1094 EVBUFFER_LOCK2(src, dst);
michael@0 1095
michael@0 1096 chain = previous = src->first;
michael@0 1097
michael@0 1098 if (datlen == 0 || dst == src) {
michael@0 1099 result = 0;
michael@0 1100 goto done;
michael@0 1101 }
michael@0 1102
michael@0 1103 if (dst->freeze_end || src->freeze_start) {
michael@0 1104 result = -1;
michael@0 1105 goto done;
michael@0 1106 }
michael@0 1107
michael@0 1108 /* short-cut if there is no more data buffered */
michael@0 1109 if (datlen >= src->total_len) {
michael@0 1110 datlen = src->total_len;
michael@0 1111 evbuffer_add_buffer(dst, src);
michael@0 1112 result = (int)datlen; /*XXXX should return ev_ssize_t*/
michael@0 1113 goto done;
michael@0 1114 }
michael@0 1115
michael@0 1116 /* removes chains if possible */
michael@0 1117 while (chain->off <= datlen) {
michael@0 1118 /* We can't remove the last with data from src unless we
michael@0 1119 * remove all chains, in which case we would have done the if
michael@0 1120 * block above */
michael@0 1121 EVUTIL_ASSERT(chain != *src->last_with_datap);
michael@0 1122 nread += chain->off;
michael@0 1123 datlen -= chain->off;
michael@0 1124 previous = chain;
michael@0 1125 if (src->last_with_datap == &chain->next)
michael@0 1126 src->last_with_datap = &src->first;
michael@0 1127 chain = chain->next;
michael@0 1128 }
michael@0 1129
michael@0 1130 if (nread) {
michael@0 1131 /* we can remove the chain */
michael@0 1132 struct evbuffer_chain **chp;
michael@0 1133 chp = evbuffer_free_trailing_empty_chains(dst);
michael@0 1134
michael@0 1135 if (dst->first == NULL) {
michael@0 1136 dst->first = src->first;
michael@0 1137 } else {
michael@0 1138 *chp = src->first;
michael@0 1139 }
michael@0 1140 dst->last = previous;
michael@0 1141 previous->next = NULL;
michael@0 1142 src->first = chain;
michael@0 1143 advance_last_with_data(dst);
michael@0 1144
michael@0 1145 dst->total_len += nread;
michael@0 1146 dst->n_add_for_cb += nread;
michael@0 1147 }
michael@0 1148
michael@0 1149 /* we know that there is more data in the src buffer than
michael@0 1150 * we want to read, so we manually drain the chain */
michael@0 1151 evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
michael@0 1152 chain->misalign += datlen;
michael@0 1153 chain->off -= datlen;
michael@0 1154 nread += datlen;
michael@0 1155
michael@0 1156 /* You might think we would want to increment dst->n_add_for_cb
michael@0 1157 * here too. But evbuffer_add above already took care of that.
michael@0 1158 */
michael@0 1159 src->total_len -= nread;
michael@0 1160 src->n_del_for_cb += nread;
michael@0 1161
michael@0 1162 if (nread) {
michael@0 1163 evbuffer_invoke_callbacks(dst);
michael@0 1164 evbuffer_invoke_callbacks(src);
michael@0 1165 }
michael@0 1166 result = (int)nread;/*XXXX should change return type */
michael@0 1167
michael@0 1168 done:
michael@0 1169 EVBUFFER_UNLOCK2(src, dst);
michael@0 1170 return result;
michael@0 1171 }
michael@0 1172
michael@0 1173 unsigned char *
michael@0 1174 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)
michael@0 1175 {
michael@0 1176 struct evbuffer_chain *chain, *next, *tmp, *last_with_data;
michael@0 1177 unsigned char *buffer, *result = NULL;
michael@0 1178 ev_ssize_t remaining;
michael@0 1179 int removed_last_with_data = 0;
michael@0 1180 int removed_last_with_datap = 0;
michael@0 1181
michael@0 1182 EVBUFFER_LOCK(buf);
michael@0 1183
michael@0 1184 chain = buf->first;
michael@0 1185
michael@0 1186 if (size < 0)
michael@0 1187 size = buf->total_len;
michael@0 1188 /* if size > buf->total_len, we cannot guarantee to the user that she
michael@0 1189 * is going to have a long enough buffer afterwards; so we return
michael@0 1190 * NULL */
michael@0 1191 if (size == 0 || (size_t)size > buf->total_len)
michael@0 1192 goto done;
michael@0 1193
michael@0 1194 /* No need to pull up anything; the first size bytes are
michael@0 1195 * already here. */
michael@0 1196 if (chain->off >= (size_t)size) {
michael@0 1197 result = chain->buffer + chain->misalign;
michael@0 1198 goto done;
michael@0 1199 }
michael@0 1200
michael@0 1201 /* Make sure that none of the chains we need to copy from is pinned. */
michael@0 1202 remaining = size - chain->off;
michael@0 1203 EVUTIL_ASSERT(remaining >= 0);
michael@0 1204 for (tmp=chain->next; tmp; tmp=tmp->next) {
michael@0 1205 if (CHAIN_PINNED(tmp))
michael@0 1206 goto done;
michael@0 1207 if (tmp->off >= (size_t)remaining)
michael@0 1208 break;
michael@0 1209 remaining -= tmp->off;
michael@0 1210 }
michael@0 1211
michael@0 1212 if (CHAIN_PINNED(chain)) {
michael@0 1213 size_t old_off = chain->off;
michael@0 1214 if (CHAIN_SPACE_LEN(chain) < size - chain->off) {
michael@0 1215 /* not enough room at end of chunk. */
michael@0 1216 goto done;
michael@0 1217 }
michael@0 1218 buffer = CHAIN_SPACE_PTR(chain);
michael@0 1219 tmp = chain;
michael@0 1220 tmp->off = size;
michael@0 1221 size -= old_off;
michael@0 1222 chain = chain->next;
michael@0 1223 } else if (chain->buffer_len - chain->misalign >= (size_t)size) {
michael@0 1224 /* already have enough space in the first chain */
michael@0 1225 size_t old_off = chain->off;
michael@0 1226 buffer = chain->buffer + chain->misalign + chain->off;
michael@0 1227 tmp = chain;
michael@0 1228 tmp->off = size;
michael@0 1229 size -= old_off;
michael@0 1230 chain = chain->next;
michael@0 1231 } else {
michael@0 1232 if ((tmp = evbuffer_chain_new(size)) == NULL) {
michael@0 1233 event_warn("%s: out of memory", __func__);
michael@0 1234 goto done;
michael@0 1235 }
michael@0 1236 buffer = tmp->buffer;
michael@0 1237 tmp->off = size;
michael@0 1238 buf->first = tmp;
michael@0 1239 }
michael@0 1240
michael@0 1241 /* TODO(niels): deal with buffers that point to NULL like sendfile */
michael@0 1242
michael@0 1243 /* Copy and free every chunk that will be entirely pulled into tmp */
michael@0 1244 last_with_data = *buf->last_with_datap;
michael@0 1245 for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
michael@0 1246 next = chain->next;
michael@0 1247
michael@0 1248 memcpy(buffer, chain->buffer + chain->misalign, chain->off);
michael@0 1249 size -= chain->off;
michael@0 1250 buffer += chain->off;
michael@0 1251 if (chain == last_with_data)
michael@0 1252 removed_last_with_data = 1;
michael@0 1253 if (&chain->next == buf->last_with_datap)
michael@0 1254 removed_last_with_datap = 1;
michael@0 1255
michael@0 1256 evbuffer_chain_free(chain);
michael@0 1257 }
michael@0 1258
michael@0 1259 if (chain != NULL) {
michael@0 1260 memcpy(buffer, chain->buffer + chain->misalign, size);
michael@0 1261 chain->misalign += size;
michael@0 1262 chain->off -= size;
michael@0 1263 } else {
michael@0 1264 buf->last = tmp;
michael@0 1265 }
michael@0 1266
michael@0 1267 tmp->next = chain;
michael@0 1268
michael@0 1269 if (removed_last_with_data) {
michael@0 1270 buf->last_with_datap = &buf->first;
michael@0 1271 } else if (removed_last_with_datap) {
michael@0 1272 if (buf->first->next && buf->first->next->off)
michael@0 1273 buf->last_with_datap = &buf->first->next;
michael@0 1274 else
michael@0 1275 buf->last_with_datap = &buf->first;
michael@0 1276 }
michael@0 1277
michael@0 1278 result = (tmp->buffer + tmp->misalign);
michael@0 1279
michael@0 1280 done:
michael@0 1281 EVBUFFER_UNLOCK(buf);
michael@0 1282 return result;
michael@0 1283 }
michael@0 1284
michael@0 1285 /*
michael@0 1286 * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
michael@0 1287 * The returned buffer needs to be freed by the called.
michael@0 1288 */
michael@0 1289 char *
michael@0 1290 evbuffer_readline(struct evbuffer *buffer)
michael@0 1291 {
michael@0 1292 return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
michael@0 1293 }
michael@0 1294
michael@0 1295 static inline ev_ssize_t
michael@0 1296 evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
michael@0 1297 {
michael@0 1298 struct evbuffer_chain *chain = it->_internal.chain;
michael@0 1299 size_t i = it->_internal.pos_in_chain;
michael@0 1300 while (chain != NULL) {
michael@0 1301 char *buffer = (char *)chain->buffer + chain->misalign;
michael@0 1302 char *cp = memchr(buffer+i, chr, chain->off-i);
michael@0 1303 if (cp) {
michael@0 1304 it->_internal.chain = chain;
michael@0 1305 it->_internal.pos_in_chain = cp - buffer;
michael@0 1306 it->pos += (cp - buffer - i);
michael@0 1307 return it->pos;
michael@0 1308 }
michael@0 1309 it->pos += chain->off - i;
michael@0 1310 i = 0;
michael@0 1311 chain = chain->next;
michael@0 1312 }
michael@0 1313
michael@0 1314 return (-1);
michael@0 1315 }
michael@0 1316
michael@0 1317 static inline char *
michael@0 1318 find_eol_char(char *s, size_t len)
michael@0 1319 {
michael@0 1320 #define CHUNK_SZ 128
michael@0 1321 /* Lots of benchmarking found this approach to be faster in practice
michael@0 1322 * than doing two memchrs over the whole buffer, doin a memchr on each
michael@0 1323 * char of the buffer, or trying to emulate memchr by hand. */
michael@0 1324 char *s_end, *cr, *lf;
michael@0 1325 s_end = s+len;
michael@0 1326 while (s < s_end) {
michael@0 1327 size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
michael@0 1328 cr = memchr(s, '\r', chunk);
michael@0 1329 lf = memchr(s, '\n', chunk);
michael@0 1330 if (cr) {
michael@0 1331 if (lf && lf < cr)
michael@0 1332 return lf;
michael@0 1333 return cr;
michael@0 1334 } else if (lf) {
michael@0 1335 return lf;
michael@0 1336 }
michael@0 1337 s += CHUNK_SZ;
michael@0 1338 }
michael@0 1339
michael@0 1340 return NULL;
michael@0 1341 #undef CHUNK_SZ
michael@0 1342 }
michael@0 1343
michael@0 1344 static ev_ssize_t
michael@0 1345 evbuffer_find_eol_char(struct evbuffer_ptr *it)
michael@0 1346 {
michael@0 1347 struct evbuffer_chain *chain = it->_internal.chain;
michael@0 1348 size_t i = it->_internal.pos_in_chain;
michael@0 1349 while (chain != NULL) {
michael@0 1350 char *buffer = (char *)chain->buffer + chain->misalign;
michael@0 1351 char *cp = find_eol_char(buffer+i, chain->off-i);
michael@0 1352 if (cp) {
michael@0 1353 it->_internal.chain = chain;
michael@0 1354 it->_internal.pos_in_chain = cp - buffer;
michael@0 1355 it->pos += (cp - buffer) - i;
michael@0 1356 return it->pos;
michael@0 1357 }
michael@0 1358 it->pos += chain->off - i;
michael@0 1359 i = 0;
michael@0 1360 chain = chain->next;
michael@0 1361 }
michael@0 1362
michael@0 1363 return (-1);
michael@0 1364 }
michael@0 1365
michael@0 1366 static inline int
michael@0 1367 evbuffer_strspn(
michael@0 1368 struct evbuffer_ptr *ptr, const char *chrset)
michael@0 1369 {
michael@0 1370 int count = 0;
michael@0 1371 struct evbuffer_chain *chain = ptr->_internal.chain;
michael@0 1372 size_t i = ptr->_internal.pos_in_chain;
michael@0 1373
michael@0 1374 if (!chain)
michael@0 1375 return -1;
michael@0 1376
michael@0 1377 while (1) {
michael@0 1378 char *buffer = (char *)chain->buffer + chain->misalign;
michael@0 1379 for (; i < chain->off; ++i) {
michael@0 1380 const char *p = chrset;
michael@0 1381 while (*p) {
michael@0 1382 if (buffer[i] == *p++)
michael@0 1383 goto next;
michael@0 1384 }
michael@0 1385 ptr->_internal.chain = chain;
michael@0 1386 ptr->_internal.pos_in_chain = i;
michael@0 1387 ptr->pos += count;
michael@0 1388 return count;
michael@0 1389 next:
michael@0 1390 ++count;
michael@0 1391 }
michael@0 1392 i = 0;
michael@0 1393
michael@0 1394 if (! chain->next) {
michael@0 1395 ptr->_internal.chain = chain;
michael@0 1396 ptr->_internal.pos_in_chain = i;
michael@0 1397 ptr->pos += count;
michael@0 1398 return count;
michael@0 1399 }
michael@0 1400
michael@0 1401 chain = chain->next;
michael@0 1402 }
michael@0 1403 }
michael@0 1404
michael@0 1405
michael@0 1406 static inline char
michael@0 1407 evbuffer_getchr(struct evbuffer_ptr *it)
michael@0 1408 {
michael@0 1409 struct evbuffer_chain *chain = it->_internal.chain;
michael@0 1410 size_t off = it->_internal.pos_in_chain;
michael@0 1411
michael@0 1412 return chain->buffer[chain->misalign + off];
michael@0 1413 }
michael@0 1414
michael@0 1415 struct evbuffer_ptr
michael@0 1416 evbuffer_search_eol(struct evbuffer *buffer,
michael@0 1417 struct evbuffer_ptr *start, size_t *eol_len_out,
michael@0 1418 enum evbuffer_eol_style eol_style)
michael@0 1419 {
michael@0 1420 struct evbuffer_ptr it, it2;
michael@0 1421 size_t extra_drain = 0;
michael@0 1422 int ok = 0;
michael@0 1423
michael@0 1424 EVBUFFER_LOCK(buffer);
michael@0 1425
michael@0 1426 if (start) {
michael@0 1427 memcpy(&it, start, sizeof(it));
michael@0 1428 } else {
michael@0 1429 it.pos = 0;
michael@0 1430 it._internal.chain = buffer->first;
michael@0 1431 it._internal.pos_in_chain = 0;
michael@0 1432 }
michael@0 1433
michael@0 1434 /* the eol_style determines our first stop character and how many
michael@0 1435 * characters we are going to drain afterwards. */
michael@0 1436 switch (eol_style) {
michael@0 1437 case EVBUFFER_EOL_ANY:
michael@0 1438 if (evbuffer_find_eol_char(&it) < 0)
michael@0 1439 goto done;
michael@0 1440 memcpy(&it2, &it, sizeof(it));
michael@0 1441 extra_drain = evbuffer_strspn(&it2, "\r\n");
michael@0 1442 break;
michael@0 1443 case EVBUFFER_EOL_CRLF_STRICT: {
michael@0 1444 it = evbuffer_search(buffer, "\r\n", 2, &it);
michael@0 1445 if (it.pos < 0)
michael@0 1446 goto done;
michael@0 1447 extra_drain = 2;
michael@0 1448 break;
michael@0 1449 }
michael@0 1450 case EVBUFFER_EOL_CRLF:
michael@0 1451 while (1) {
michael@0 1452 if (evbuffer_find_eol_char(&it) < 0)
michael@0 1453 goto done;
michael@0 1454 if (evbuffer_getchr(&it) == '\n') {
michael@0 1455 extra_drain = 1;
michael@0 1456 break;
michael@0 1457 } else if (!evbuffer_ptr_memcmp(
michael@0 1458 buffer, &it, "\r\n", 2)) {
michael@0 1459 extra_drain = 2;
michael@0 1460 break;
michael@0 1461 } else {
michael@0 1462 if (evbuffer_ptr_set(buffer, &it, 1,
michael@0 1463 EVBUFFER_PTR_ADD)<0)
michael@0 1464 goto done;
michael@0 1465 }
michael@0 1466 }
michael@0 1467 break;
michael@0 1468 case EVBUFFER_EOL_LF:
michael@0 1469 if (evbuffer_strchr(&it, '\n') < 0)
michael@0 1470 goto done;
michael@0 1471 extra_drain = 1;
michael@0 1472 break;
michael@0 1473 default:
michael@0 1474 goto done;
michael@0 1475 }
michael@0 1476
michael@0 1477 ok = 1;
michael@0 1478 done:
michael@0 1479 EVBUFFER_UNLOCK(buffer);
michael@0 1480
michael@0 1481 if (!ok) {
michael@0 1482 it.pos = -1;
michael@0 1483 }
michael@0 1484 if (eol_len_out)
michael@0 1485 *eol_len_out = extra_drain;
michael@0 1486
michael@0 1487 return it;
michael@0 1488 }
michael@0 1489
michael@0 1490 char *
michael@0 1491 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
michael@0 1492 enum evbuffer_eol_style eol_style)
michael@0 1493 {
michael@0 1494 struct evbuffer_ptr it;
michael@0 1495 char *line;
michael@0 1496 size_t n_to_copy=0, extra_drain=0;
michael@0 1497 char *result = NULL;
michael@0 1498
michael@0 1499 EVBUFFER_LOCK(buffer);
michael@0 1500
michael@0 1501 if (buffer->freeze_start) {
michael@0 1502 goto done;
michael@0 1503 }
michael@0 1504
michael@0 1505 it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);
michael@0 1506 if (it.pos < 0)
michael@0 1507 goto done;
michael@0 1508 n_to_copy = it.pos;
michael@0 1509
michael@0 1510 if ((line = mm_malloc(n_to_copy+1)) == NULL) {
michael@0 1511 event_warn("%s: out of memory", __func__);
michael@0 1512 goto done;
michael@0 1513 }
michael@0 1514
michael@0 1515 evbuffer_remove(buffer, line, n_to_copy);
michael@0 1516 line[n_to_copy] = '\0';
michael@0 1517
michael@0 1518 evbuffer_drain(buffer, extra_drain);
michael@0 1519 result = line;
michael@0 1520 done:
michael@0 1521 EVBUFFER_UNLOCK(buffer);
michael@0 1522
michael@0 1523 if (n_read_out)
michael@0 1524 *n_read_out = result ? n_to_copy : 0;
michael@0 1525
michael@0 1526 return result;
michael@0 1527 }
michael@0 1528
michael@0 1529 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
michael@0 1530
michael@0 1531 /* Adds data to an event buffer */
michael@0 1532
michael@0 1533 int
michael@0 1534 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)
michael@0 1535 {
michael@0 1536 struct evbuffer_chain *chain, *tmp;
michael@0 1537 const unsigned char *data = data_in;
michael@0 1538 size_t remain, to_alloc;
michael@0 1539 int result = -1;
michael@0 1540
michael@0 1541 EVBUFFER_LOCK(buf);
michael@0 1542
michael@0 1543 if (buf->freeze_end) {
michael@0 1544 goto done;
michael@0 1545 }
michael@0 1546
michael@0 1547 chain = buf->last;
michael@0 1548
michael@0 1549 /* If there are no chains allocated for this buffer, allocate one
michael@0 1550 * big enough to hold all the data. */
michael@0 1551 if (chain == NULL) {
michael@0 1552 chain = evbuffer_chain_new(datlen);
michael@0 1553 if (!chain)
michael@0 1554 goto done;
michael@0 1555 evbuffer_chain_insert(buf, chain);
michael@0 1556 }
michael@0 1557
michael@0 1558 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
michael@0 1559 remain = (size_t)(chain->buffer_len - chain->misalign - chain->off);
michael@0 1560 if (remain >= datlen) {
michael@0 1561 /* there's enough space to hold all the data in the
michael@0 1562 * current last chain */
michael@0 1563 memcpy(chain->buffer + chain->misalign + chain->off,
michael@0 1564 data, datlen);
michael@0 1565 chain->off += datlen;
michael@0 1566 buf->total_len += datlen;
michael@0 1567 buf->n_add_for_cb += datlen;
michael@0 1568 goto out;
michael@0 1569 } else if (!CHAIN_PINNED(chain) &&
michael@0 1570 evbuffer_chain_should_realign(chain, datlen)) {
michael@0 1571 /* we can fit the data into the misalignment */
michael@0 1572 evbuffer_chain_align(chain);
michael@0 1573
michael@0 1574 memcpy(chain->buffer + chain->off, data, datlen);
michael@0 1575 chain->off += datlen;
michael@0 1576 buf->total_len += datlen;
michael@0 1577 buf->n_add_for_cb += datlen;
michael@0 1578 goto out;
michael@0 1579 }
michael@0 1580 } else {
michael@0 1581 /* we cannot write any data to the last chain */
michael@0 1582 remain = 0;
michael@0 1583 }
michael@0 1584
michael@0 1585 /* we need to add another chain */
michael@0 1586 to_alloc = chain->buffer_len;
michael@0 1587 if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
michael@0 1588 to_alloc <<= 1;
michael@0 1589 if (datlen > to_alloc)
michael@0 1590 to_alloc = datlen;
michael@0 1591 tmp = evbuffer_chain_new(to_alloc);
michael@0 1592 if (tmp == NULL)
michael@0 1593 goto done;
michael@0 1594
michael@0 1595 if (remain) {
michael@0 1596 memcpy(chain->buffer + chain->misalign + chain->off,
michael@0 1597 data, remain);
michael@0 1598 chain->off += remain;
michael@0 1599 buf->total_len += remain;
michael@0 1600 buf->n_add_for_cb += remain;
michael@0 1601 }
michael@0 1602
michael@0 1603 data += remain;
michael@0 1604 datlen -= remain;
michael@0 1605
michael@0 1606 memcpy(tmp->buffer, data, datlen);
michael@0 1607 tmp->off = datlen;
michael@0 1608 evbuffer_chain_insert(buf, tmp);
michael@0 1609 buf->n_add_for_cb += datlen;
michael@0 1610
michael@0 1611 out:
michael@0 1612 evbuffer_invoke_callbacks(buf);
michael@0 1613 result = 0;
michael@0 1614 done:
michael@0 1615 EVBUFFER_UNLOCK(buf);
michael@0 1616 return result;
michael@0 1617 }
michael@0 1618
michael@0 1619 int
michael@0 1620 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
michael@0 1621 {
michael@0 1622 struct evbuffer_chain *chain, *tmp;
michael@0 1623 int result = -1;
michael@0 1624
michael@0 1625 EVBUFFER_LOCK(buf);
michael@0 1626
michael@0 1627 if (buf->freeze_start) {
michael@0 1628 goto done;
michael@0 1629 }
michael@0 1630
michael@0 1631 chain = buf->first;
michael@0 1632
michael@0 1633 if (chain == NULL) {
michael@0 1634 chain = evbuffer_chain_new(datlen);
michael@0 1635 if (!chain)
michael@0 1636 goto done;
michael@0 1637 evbuffer_chain_insert(buf, chain);
michael@0 1638 }
michael@0 1639
michael@0 1640 /* we cannot touch immutable buffers */
michael@0 1641 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
michael@0 1642 /* If this chain is empty, we can treat it as
michael@0 1643 * 'empty at the beginning' rather than 'empty at the end' */
michael@0 1644 if (chain->off == 0)
michael@0 1645 chain->misalign = chain->buffer_len;
michael@0 1646
michael@0 1647 if ((size_t)chain->misalign >= datlen) {
michael@0 1648 /* we have enough space to fit everything */
michael@0 1649 memcpy(chain->buffer + chain->misalign - datlen,
michael@0 1650 data, datlen);
michael@0 1651 chain->off += datlen;
michael@0 1652 chain->misalign -= datlen;
michael@0 1653 buf->total_len += datlen;
michael@0 1654 buf->n_add_for_cb += datlen;
michael@0 1655 goto out;
michael@0 1656 } else if (chain->misalign) {
michael@0 1657 /* we can only fit some of the data. */
michael@0 1658 memcpy(chain->buffer,
michael@0 1659 (char*)data + datlen - chain->misalign,
michael@0 1660 (size_t)chain->misalign);
michael@0 1661 chain->off += (size_t)chain->misalign;
michael@0 1662 buf->total_len += (size_t)chain->misalign;
michael@0 1663 buf->n_add_for_cb += (size_t)chain->misalign;
michael@0 1664 datlen -= (size_t)chain->misalign;
michael@0 1665 chain->misalign = 0;
michael@0 1666 }
michael@0 1667 }
michael@0 1668
michael@0 1669 /* we need to add another chain */
michael@0 1670 if ((tmp = evbuffer_chain_new(datlen)) == NULL)
michael@0 1671 goto done;
michael@0 1672 buf->first = tmp;
michael@0 1673 if (buf->last_with_datap == &buf->first)
michael@0 1674 buf->last_with_datap = &tmp->next;
michael@0 1675
michael@0 1676 tmp->next = chain;
michael@0 1677
michael@0 1678 tmp->off = datlen;
michael@0 1679 tmp->misalign = tmp->buffer_len - datlen;
michael@0 1680
michael@0 1681 memcpy(tmp->buffer + tmp->misalign, data, datlen);
michael@0 1682 buf->total_len += datlen;
michael@0 1683 buf->n_add_for_cb += (size_t)chain->misalign;
michael@0 1684
michael@0 1685 out:
michael@0 1686 evbuffer_invoke_callbacks(buf);
michael@0 1687 result = 0;
michael@0 1688 done:
michael@0 1689 EVBUFFER_UNLOCK(buf);
michael@0 1690 return result;
michael@0 1691 }
michael@0 1692
michael@0 1693 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */
michael@0 1694 static void
michael@0 1695 evbuffer_chain_align(struct evbuffer_chain *chain)
michael@0 1696 {
michael@0 1697 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));
michael@0 1698 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));
michael@0 1699 memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);
michael@0 1700 chain->misalign = 0;
michael@0 1701 }
michael@0 1702
michael@0 1703 #define MAX_TO_COPY_IN_EXPAND 4096
michael@0 1704 #define MAX_TO_REALIGN_IN_EXPAND 2048
michael@0 1705
michael@0 1706 /** Helper: return true iff we should realign chain to fit datalen bytes of
michael@0 1707 data in it. */
michael@0 1708 static int
michael@0 1709 evbuffer_chain_should_realign(struct evbuffer_chain *chain,
michael@0 1710 size_t datlen)
michael@0 1711 {
michael@0 1712 return chain->buffer_len - chain->off >= datlen &&
michael@0 1713 (chain->off < chain->buffer_len / 2) &&
michael@0 1714 (chain->off <= MAX_TO_REALIGN_IN_EXPAND);
michael@0 1715 }
michael@0 1716
michael@0 1717 /* Expands the available space in the event buffer to at least datlen, all in
michael@0 1718 * a single chunk. Return that chunk. */
michael@0 1719 static struct evbuffer_chain *
michael@0 1720 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
michael@0 1721 {
michael@0 1722 struct evbuffer_chain *chain, **chainp;
michael@0 1723 struct evbuffer_chain *result = NULL;
michael@0 1724 ASSERT_EVBUFFER_LOCKED(buf);
michael@0 1725
michael@0 1726 chainp = buf->last_with_datap;
michael@0 1727
michael@0 1728 /* XXX If *chainp is no longer writeable, but has enough space in its
michael@0 1729 * misalign, this might be a bad idea: we could still use *chainp, not
michael@0 1730 * (*chainp)->next. */
michael@0 1731 if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)
michael@0 1732 chainp = &(*chainp)->next;
michael@0 1733
michael@0 1734 /* 'chain' now points to the first chain with writable space (if any)
michael@0 1735 * We will either use it, realign it, replace it, or resize it. */
michael@0 1736 chain = *chainp;
michael@0 1737
michael@0 1738 if (chain == NULL ||
michael@0 1739 (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
michael@0 1740 /* We can't use the last_with_data chain at all. Just add a
michael@0 1741 * new one that's big enough. */
michael@0 1742 goto insert_new;
michael@0 1743 }
michael@0 1744
michael@0 1745 /* If we can fit all the data, then we don't have to do anything */
michael@0 1746 if (CHAIN_SPACE_LEN(chain) >= datlen) {
michael@0 1747 result = chain;
michael@0 1748 goto ok;
michael@0 1749 }
michael@0 1750
michael@0 1751 /* If the chain is completely empty, just replace it by adding a new
michael@0 1752 * empty chain. */
michael@0 1753 if (chain->off == 0) {
michael@0 1754 goto insert_new;
michael@0 1755 }
michael@0 1756
michael@0 1757 /* If the misalignment plus the remaining space fulfills our data
michael@0 1758 * needs, we could just force an alignment to happen. Afterwards, we
michael@0 1759 * have enough space. But only do this if we're saving a lot of space
michael@0 1760 * and not moving too much data. Otherwise the space savings are
michael@0 1761 * probably offset by the time lost in copying.
michael@0 1762 */
michael@0 1763 if (evbuffer_chain_should_realign(chain, datlen)) {
michael@0 1764 evbuffer_chain_align(chain);
michael@0 1765 result = chain;
michael@0 1766 goto ok;
michael@0 1767 }
michael@0 1768
michael@0 1769 /* At this point, we can either resize the last chunk with space in
michael@0 1770 * it, use the next chunk after it, or If we add a new chunk, we waste
michael@0 1771 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we
michael@0 1772 * resize, we have to copy chain->off bytes.
michael@0 1773 */
michael@0 1774
michael@0 1775 /* Would expanding this chunk be affordable and worthwhile? */
michael@0 1776 if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||
michael@0 1777 chain->off > MAX_TO_COPY_IN_EXPAND) {
michael@0 1778 /* It's not worth resizing this chain. Can the next one be
michael@0 1779 * used? */
michael@0 1780 if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) {
michael@0 1781 /* Yes, we can just use the next chain (which should
michael@0 1782 * be empty. */
michael@0 1783 result = chain->next;
michael@0 1784 goto ok;
michael@0 1785 } else {
michael@0 1786 /* No; append a new chain (which will free all
michael@0 1787 * terminal empty chains.) */
michael@0 1788 goto insert_new;
michael@0 1789 }
michael@0 1790 } else {
michael@0 1791 /* Okay, we're going to try to resize this chain: Not doing so
michael@0 1792 * would waste at least 1/8 of its current allocation, and we
michael@0 1793 * can do so without having to copy more than
michael@0 1794 * MAX_TO_COPY_IN_EXPAND bytes. */
michael@0 1795 /* figure out how much space we need */
michael@0 1796 size_t length = chain->off + datlen;
michael@0 1797 struct evbuffer_chain *tmp = evbuffer_chain_new(length);
michael@0 1798 if (tmp == NULL)
michael@0 1799 goto err;
michael@0 1800
michael@0 1801 /* copy the data over that we had so far */
michael@0 1802 tmp->off = chain->off;
michael@0 1803 memcpy(tmp->buffer, chain->buffer + chain->misalign,
michael@0 1804 chain->off);
michael@0 1805 /* fix up the list */
michael@0 1806 EVUTIL_ASSERT(*chainp == chain);
michael@0 1807 result = *chainp = tmp;
michael@0 1808
michael@0 1809 if (buf->last == chain)
michael@0 1810 buf->last = tmp;
michael@0 1811
michael@0 1812 tmp->next = chain->next;
michael@0 1813 evbuffer_chain_free(chain);
michael@0 1814 goto ok;
michael@0 1815 }
michael@0 1816
michael@0 1817 insert_new:
michael@0 1818 result = evbuffer_chain_insert_new(buf, datlen);
michael@0 1819 if (!result)
michael@0 1820 goto err;
michael@0 1821 ok:
michael@0 1822 EVUTIL_ASSERT(result);
michael@0 1823 EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);
michael@0 1824 err:
michael@0 1825 return result;
michael@0 1826 }
michael@0 1827
michael@0 1828 /* Make sure that datlen bytes are available for writing in the last n
michael@0 1829 * chains. Never copies or moves data. */
michael@0 1830 int
michael@0 1831 _evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n)
michael@0 1832 {
michael@0 1833 struct evbuffer_chain *chain = buf->last, *tmp, *next;
michael@0 1834 size_t avail;
michael@0 1835 int used;
michael@0 1836
michael@0 1837 ASSERT_EVBUFFER_LOCKED(buf);
michael@0 1838 EVUTIL_ASSERT(n >= 2);
michael@0 1839
michael@0 1840 if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
michael@0 1841 /* There is no last chunk, or we can't touch the last chunk.
michael@0 1842 * Just add a new chunk. */
michael@0 1843 chain = evbuffer_chain_new(datlen);
michael@0 1844 if (chain == NULL)
michael@0 1845 return (-1);
michael@0 1846
michael@0 1847 evbuffer_chain_insert(buf, chain);
michael@0 1848 return (0);
michael@0 1849 }
michael@0 1850
michael@0 1851 used = 0; /* number of chains we're using space in. */
michael@0 1852 avail = 0; /* how much space they have. */
michael@0 1853 /* How many bytes can we stick at the end of buffer as it is? Iterate
michael@0 1854 * over the chains at the end of the buffer, tring to see how much
michael@0 1855 * space we have in the first n. */
michael@0 1856 for (chain = *buf->last_with_datap; chain; chain = chain->next) {
michael@0 1857 if (chain->off) {
michael@0 1858 size_t space = (size_t) CHAIN_SPACE_LEN(chain);
michael@0 1859 EVUTIL_ASSERT(chain == *buf->last_with_datap);
michael@0 1860 if (space) {
michael@0 1861 avail += space;
michael@0 1862 ++used;
michael@0 1863 }
michael@0 1864 } else {
michael@0 1865 /* No data in chain; realign it. */
michael@0 1866 chain->misalign = 0;
michael@0 1867 avail += chain->buffer_len;
michael@0 1868 ++used;
michael@0 1869 }
michael@0 1870 if (avail >= datlen) {
michael@0 1871 /* There is already enough space. Just return */
michael@0 1872 return (0);
michael@0 1873 }
michael@0 1874 if (used == n)
michael@0 1875 break;
michael@0 1876 }
michael@0 1877
michael@0 1878 /* There wasn't enough space in the first n chains with space in
michael@0 1879 * them. Either add a new chain with enough space, or replace all
michael@0 1880 * empty chains with one that has enough space, depending on n. */
michael@0 1881 if (used < n) {
michael@0 1882 /* The loop ran off the end of the chains before it hit n
michael@0 1883 * chains; we can add another. */
michael@0 1884 EVUTIL_ASSERT(chain == NULL);
michael@0 1885
michael@0 1886 tmp = evbuffer_chain_new(datlen - avail);
michael@0 1887 if (tmp == NULL)
michael@0 1888 return (-1);
michael@0 1889
michael@0 1890 buf->last->next = tmp;
michael@0 1891 buf->last = tmp;
michael@0 1892 /* (we would only set last_with_data if we added the first
michael@0 1893 * chain. But if the buffer had no chains, we would have
michael@0 1894 * just allocated a new chain earlier) */
michael@0 1895 return (0);
michael@0 1896 } else {
michael@0 1897 /* Nuke _all_ the empty chains. */
michael@0 1898 int rmv_all = 0; /* True iff we removed last_with_data. */
michael@0 1899 chain = *buf->last_with_datap;
michael@0 1900 if (!chain->off) {
michael@0 1901 EVUTIL_ASSERT(chain == buf->first);
michael@0 1902 rmv_all = 1;
michael@0 1903 avail = 0;
michael@0 1904 } else {
michael@0 1905 avail = (size_t) CHAIN_SPACE_LEN(chain);
michael@0 1906 chain = chain->next;
michael@0 1907 }
michael@0 1908
michael@0 1909
michael@0 1910 for (; chain; chain = next) {
michael@0 1911 next = chain->next;
michael@0 1912 EVUTIL_ASSERT(chain->off == 0);
michael@0 1913 evbuffer_chain_free(chain);
michael@0 1914 }
michael@0 1915 tmp = evbuffer_chain_new(datlen - avail);
michael@0 1916 if (tmp == NULL) {
michael@0 1917 if (rmv_all) {
michael@0 1918 ZERO_CHAIN(buf);
michael@0 1919 } else {
michael@0 1920 buf->last = *buf->last_with_datap;
michael@0 1921 (*buf->last_with_datap)->next = NULL;
michael@0 1922 }
michael@0 1923 return (-1);
michael@0 1924 }
michael@0 1925
michael@0 1926 if (rmv_all) {
michael@0 1927 buf->first = buf->last = tmp;
michael@0 1928 buf->last_with_datap = &buf->first;
michael@0 1929 } else {
michael@0 1930 (*buf->last_with_datap)->next = tmp;
michael@0 1931 buf->last = tmp;
michael@0 1932 }
michael@0 1933 return (0);
michael@0 1934 }
michael@0 1935 }
michael@0 1936
michael@0 1937 int
michael@0 1938 evbuffer_expand(struct evbuffer *buf, size_t datlen)
michael@0 1939 {
michael@0 1940 struct evbuffer_chain *chain;
michael@0 1941
michael@0 1942 EVBUFFER_LOCK(buf);
michael@0 1943 chain = evbuffer_expand_singlechain(buf, datlen);
michael@0 1944 EVBUFFER_UNLOCK(buf);
michael@0 1945 return chain ? 0 : -1;
michael@0 1946 }
michael@0 1947
michael@0 1948 /*
michael@0 1949 * Reads data from a file descriptor into a buffer.
michael@0 1950 */
michael@0 1951
michael@0 1952 #if defined(_EVENT_HAVE_SYS_UIO_H) || defined(WIN32)
michael@0 1953 #define USE_IOVEC_IMPL
michael@0 1954 #endif
michael@0 1955
michael@0 1956 #ifdef USE_IOVEC_IMPL
michael@0 1957
michael@0 1958 #ifdef _EVENT_HAVE_SYS_UIO_H
michael@0 1959 /* number of iovec we use for writev, fragmentation is going to determine
michael@0 1960 * how much we end up writing */
michael@0 1961
michael@0 1962 #define DEFAULT_WRITE_IOVEC 128
michael@0 1963
michael@0 1964 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
michael@0 1965 #define NUM_WRITE_IOVEC UIO_MAXIOV
michael@0 1966 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
michael@0 1967 #define NUM_WRITE_IOVEC IOV_MAX
michael@0 1968 #else
michael@0 1969 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
michael@0 1970 #endif
michael@0 1971
michael@0 1972 #define IOV_TYPE struct iovec
michael@0 1973 #define IOV_PTR_FIELD iov_base
michael@0 1974 #define IOV_LEN_FIELD iov_len
michael@0 1975 #define IOV_LEN_TYPE size_t
michael@0 1976 #else
michael@0 1977 #define NUM_WRITE_IOVEC 16
michael@0 1978 #define IOV_TYPE WSABUF
michael@0 1979 #define IOV_PTR_FIELD buf
michael@0 1980 #define IOV_LEN_FIELD len
michael@0 1981 #define IOV_LEN_TYPE unsigned long
michael@0 1982 #endif
michael@0 1983 #endif
michael@0 1984 #define NUM_READ_IOVEC 4
michael@0 1985
michael@0 1986 #define EVBUFFER_MAX_READ 4096
michael@0 1987
michael@0 1988 /** Helper function to figure out which space to use for reading data into
michael@0 1989 an evbuffer. Internal use only.
michael@0 1990
michael@0 1991 @param buf The buffer to read into
michael@0 1992 @param howmuch How much we want to read.
michael@0 1993 @param vecs An array of two or more iovecs or WSABUFs.
michael@0 1994 @param n_vecs_avail The length of vecs
michael@0 1995 @param chainp A pointer to a variable to hold the first chain we're
michael@0 1996 reading into.
michael@0 1997 @param exact Boolean: if true, we do not provide more than 'howmuch'
michael@0 1998 space in the vectors, even if more space is available.
michael@0 1999 @return The number of buffers we're using.
michael@0 2000 */
michael@0 2001 int
michael@0 2002 _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
michael@0 2003 struct evbuffer_iovec *vecs, int n_vecs_avail,
michael@0 2004 struct evbuffer_chain ***chainp, int exact)
michael@0 2005 {
michael@0 2006 struct evbuffer_chain *chain;
michael@0 2007 struct evbuffer_chain **firstchainp;
michael@0 2008 size_t so_far;
michael@0 2009 int i;
michael@0 2010 ASSERT_EVBUFFER_LOCKED(buf);
michael@0 2011
michael@0 2012 if (howmuch < 0)
michael@0 2013 return -1;
michael@0 2014
michael@0 2015 so_far = 0;
michael@0 2016 /* Let firstchain be the first chain with any space on it */
michael@0 2017 firstchainp = buf->last_with_datap;
michael@0 2018 if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
michael@0 2019 firstchainp = &(*firstchainp)->next;
michael@0 2020 }
michael@0 2021
michael@0 2022 chain = *firstchainp;
michael@0 2023 for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) {
michael@0 2024 size_t avail = (size_t) CHAIN_SPACE_LEN(chain);
michael@0 2025 if (avail > (howmuch - so_far) && exact)
michael@0 2026 avail = howmuch - so_far;
michael@0 2027 vecs[i].iov_base = CHAIN_SPACE_PTR(chain);
michael@0 2028 vecs[i].iov_len = avail;
michael@0 2029 so_far += avail;
michael@0 2030 chain = chain->next;
michael@0 2031 }
michael@0 2032
michael@0 2033 *chainp = firstchainp;
michael@0 2034 return i;
michael@0 2035 }
michael@0 2036
michael@0 2037 static int
michael@0 2038 get_n_bytes_readable_on_socket(evutil_socket_t fd)
michael@0 2039 {
michael@0 2040 #if defined(FIONREAD) && defined(WIN32)
michael@0 2041 unsigned long lng = EVBUFFER_MAX_READ;
michael@0 2042 if (ioctlsocket(fd, FIONREAD, &lng) < 0)
michael@0 2043 return -1;
michael@0 2044 return (int)lng;
michael@0 2045 #elif defined(FIONREAD)
michael@0 2046 int n = EVBUFFER_MAX_READ;
michael@0 2047 if (ioctl(fd, FIONREAD, &n) < 0)
michael@0 2048 return -1;
michael@0 2049 return n;
michael@0 2050 #else
michael@0 2051 return EVBUFFER_MAX_READ;
michael@0 2052 #endif
michael@0 2053 }
michael@0 2054
michael@0 2055 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
michael@0 2056 * as howmuch? */
michael@0 2057 int
michael@0 2058 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
michael@0 2059 {
michael@0 2060 struct evbuffer_chain **chainp;
michael@0 2061 int n;
michael@0 2062 int result;
michael@0 2063
michael@0 2064 #ifdef USE_IOVEC_IMPL
michael@0 2065 int nvecs, i, remaining;
michael@0 2066 #else
michael@0 2067 struct evbuffer_chain *chain;
michael@0 2068 unsigned char *p;
michael@0 2069 #endif
michael@0 2070
michael@0 2071 EVBUFFER_LOCK(buf);
michael@0 2072
michael@0 2073 if (buf->freeze_end) {
michael@0 2074 result = -1;
michael@0 2075 goto done;
michael@0 2076 }
michael@0 2077
michael@0 2078 n = get_n_bytes_readable_on_socket(fd);
michael@0 2079 if (n <= 0 || n > EVBUFFER_MAX_READ)
michael@0 2080 n = EVBUFFER_MAX_READ;
michael@0 2081 if (howmuch < 0 || howmuch > n)
michael@0 2082 howmuch = n;
michael@0 2083
michael@0 2084 #ifdef USE_IOVEC_IMPL
michael@0 2085 /* Since we can use iovecs, we're willing to use the last
michael@0 2086 * NUM_READ_IOVEC chains. */
michael@0 2087 if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) {
michael@0 2088 result = -1;
michael@0 2089 goto done;
michael@0 2090 } else {
michael@0 2091 IOV_TYPE vecs[NUM_READ_IOVEC];
michael@0 2092 #ifdef _EVBUFFER_IOVEC_IS_NATIVE
michael@0 2093 nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs,
michael@0 2094 NUM_READ_IOVEC, &chainp, 1);
michael@0 2095 #else
michael@0 2096 /* We aren't using the native struct iovec. Therefore,
michael@0 2097 we are on win32. */
michael@0 2098 struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
michael@0 2099 nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2,
michael@0 2100 &chainp, 1);
michael@0 2101
michael@0 2102 for (i=0; i < nvecs; ++i)
michael@0 2103 WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
michael@0 2104 #endif
michael@0 2105
michael@0 2106 #ifdef WIN32
michael@0 2107 {
michael@0 2108 DWORD bytesRead;
michael@0 2109 DWORD flags=0;
michael@0 2110 if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
michael@0 2111 /* The read failed. It might be a close,
michael@0 2112 * or it might be an error. */
michael@0 2113 if (WSAGetLastError() == WSAECONNABORTED)
michael@0 2114 n = 0;
michael@0 2115 else
michael@0 2116 n = -1;
michael@0 2117 } else
michael@0 2118 n = bytesRead;
michael@0 2119 }
michael@0 2120 #else
michael@0 2121 n = readv(fd, vecs, nvecs);
michael@0 2122 #endif
michael@0 2123 }
michael@0 2124
michael@0 2125 #else /*!USE_IOVEC_IMPL*/
michael@0 2126 /* If we don't have FIONREAD, we might waste some space here */
michael@0 2127 /* XXX we _will_ waste some space here if there is any space left
michael@0 2128 * over on buf->last. */
michael@0 2129 if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
michael@0 2130 result = -1;
michael@0 2131 goto done;
michael@0 2132 }
michael@0 2133
michael@0 2134 /* We can append new data at this point */
michael@0 2135 p = chain->buffer + chain->misalign + chain->off;
michael@0 2136
michael@0 2137 #ifndef WIN32
michael@0 2138 n = read(fd, p, howmuch);
michael@0 2139 #else
michael@0 2140 n = recv(fd, p, howmuch, 0);
michael@0 2141 #endif
michael@0 2142 #endif /* USE_IOVEC_IMPL */
michael@0 2143
michael@0 2144 if (n == -1) {
michael@0 2145 result = -1;
michael@0 2146 goto done;
michael@0 2147 }
michael@0 2148 if (n == 0) {
michael@0 2149 result = 0;
michael@0 2150 goto done;
michael@0 2151 }
michael@0 2152
michael@0 2153 #ifdef USE_IOVEC_IMPL
michael@0 2154 remaining = n;
michael@0 2155 for (i=0; i < nvecs; ++i) {
michael@0 2156 ev_ssize_t space = (ev_ssize_t) CHAIN_SPACE_LEN(*chainp);
michael@0 2157 if (space < remaining) {
michael@0 2158 (*chainp)->off += space;
michael@0 2159 remaining -= (int)space;
michael@0 2160 } else {
michael@0 2161 (*chainp)->off += remaining;
michael@0 2162 buf->last_with_datap = chainp;
michael@0 2163 break;
michael@0 2164 }
michael@0 2165 chainp = &(*chainp)->next;
michael@0 2166 }
michael@0 2167 #else
michael@0 2168 chain->off += n;
michael@0 2169 advance_last_with_data(buf);
michael@0 2170 #endif
michael@0 2171 buf->total_len += n;
michael@0 2172 buf->n_add_for_cb += n;
michael@0 2173
michael@0 2174 /* Tell someone about changes in this buffer */
michael@0 2175 evbuffer_invoke_callbacks(buf);
michael@0 2176 result = n;
michael@0 2177 done:
michael@0 2178 EVBUFFER_UNLOCK(buf);
michael@0 2179 return result;
michael@0 2180 }
michael@0 2181
michael@0 2182 #ifdef WIN32
michael@0 2183 static int
michael@0 2184 evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd, ev_ssize_t howmuch)
michael@0 2185 {
michael@0 2186 int result;
michael@0 2187 int nchains, n;
michael@0 2188 struct evbuffer_iovec v[2];
michael@0 2189
michael@0 2190 EVBUFFER_LOCK(buf);
michael@0 2191
michael@0 2192 if (buf->freeze_end) {
michael@0 2193 result = -1;
michael@0 2194 goto done;
michael@0 2195 }
michael@0 2196
michael@0 2197 if (howmuch < 0)
michael@0 2198 howmuch = 16384;
michael@0 2199
michael@0 2200
michael@0 2201 /* XXX we _will_ waste some space here if there is any space left
michael@0 2202 * over on buf->last. */
michael@0 2203 nchains = evbuffer_reserve_space(buf, howmuch, v, 2);
michael@0 2204 if (nchains < 1 || nchains > 2) {
michael@0 2205 result = -1;
michael@0 2206 goto done;
michael@0 2207 }
michael@0 2208 n = read((int)fd, v[0].iov_base, (unsigned int)v[0].iov_len);
michael@0 2209 if (n <= 0) {
michael@0 2210 result = n;
michael@0 2211 goto done;
michael@0 2212 }
michael@0 2213 v[0].iov_len = (IOV_LEN_TYPE) n; /* XXXX another problem with big n.*/
michael@0 2214 if (nchains > 1) {
michael@0 2215 n = read((int)fd, v[1].iov_base, (unsigned int)v[1].iov_len);
michael@0 2216 if (n <= 0) {
michael@0 2217 result = (unsigned long) v[0].iov_len;
michael@0 2218 evbuffer_commit_space(buf, v, 1);
michael@0 2219 goto done;
michael@0 2220 }
michael@0 2221 v[1].iov_len = n;
michael@0 2222 }
michael@0 2223 evbuffer_commit_space(buf, v, nchains);
michael@0 2224
michael@0 2225 result = n;
michael@0 2226 done:
michael@0 2227 EVBUFFER_UNLOCK(buf);
michael@0 2228 return result;
michael@0 2229 }
michael@0 2230 #endif
michael@0 2231
michael@0 2232 #ifdef USE_IOVEC_IMPL
michael@0 2233 static inline int
michael@0 2234 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
michael@0 2235 ev_ssize_t howmuch)
michael@0 2236 {
michael@0 2237 IOV_TYPE iov[NUM_WRITE_IOVEC];
michael@0 2238 struct evbuffer_chain *chain = buffer->first;
michael@0 2239 int n, i = 0;
michael@0 2240
michael@0 2241 if (howmuch < 0)
michael@0 2242 return -1;
michael@0 2243
michael@0 2244 ASSERT_EVBUFFER_LOCKED(buffer);
michael@0 2245 /* XXX make this top out at some maximal data length? if the
michael@0 2246 * buffer has (say) 1MB in it, split over 128 chains, there's
michael@0 2247 * no way it all gets written in one go. */
michael@0 2248 while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
michael@0 2249 #ifdef USE_SENDFILE
michael@0 2250 /* we cannot write the file info via writev */
michael@0 2251 if (chain->flags & EVBUFFER_SENDFILE)
michael@0 2252 break;
michael@0 2253 #endif
michael@0 2254 iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);
michael@0 2255 if ((size_t)howmuch >= chain->off) {
michael@0 2256 /* XXXcould be problematic when windows supports mmap*/
michael@0 2257 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
michael@0 2258 howmuch -= chain->off;
michael@0 2259 } else {
michael@0 2260 /* XXXcould be problematic when windows supports mmap*/
michael@0 2261 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;
michael@0 2262 break;
michael@0 2263 }
michael@0 2264 chain = chain->next;
michael@0 2265 }
michael@0 2266 if (! i)
michael@0 2267 return 0;
michael@0 2268 #ifdef WIN32
michael@0 2269 {
michael@0 2270 DWORD bytesSent;
michael@0 2271 if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
michael@0 2272 n = -1;
michael@0 2273 else
michael@0 2274 n = bytesSent;
michael@0 2275 }
michael@0 2276 #else
michael@0 2277 n = writev(fd, iov, i);
michael@0 2278 #endif
michael@0 2279 return (n);
michael@0 2280 }
michael@0 2281 #endif
michael@0 2282
michael@0 2283 #ifdef USE_SENDFILE
michael@0 2284 static inline int
michael@0 2285 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t fd,
michael@0 2286 ev_ssize_t howmuch)
michael@0 2287 {
michael@0 2288 struct evbuffer_chain *chain = buffer->first;
michael@0 2289 struct evbuffer_chain_fd *info =
michael@0 2290 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
michael@0 2291 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
michael@0 2292 int res;
michael@0 2293 off_t len = chain->off;
michael@0 2294 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
michael@0 2295 ev_ssize_t res;
michael@0 2296 off_t offset = chain->misalign;
michael@0 2297 #endif
michael@0 2298
michael@0 2299 ASSERT_EVBUFFER_LOCKED(buffer);
michael@0 2300
michael@0 2301 #if defined(SENDFILE_IS_MACOSX)
michael@0 2302 res = sendfile(info->fd, fd, chain->misalign, &len, NULL, 0);
michael@0 2303 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
michael@0 2304 return (-1);
michael@0 2305
michael@0 2306 return (len);
michael@0 2307 #elif defined(SENDFILE_IS_FREEBSD)
michael@0 2308 res = sendfile(info->fd, fd, chain->misalign, chain->off, NULL, &len, 0);
michael@0 2309 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
michael@0 2310 return (-1);
michael@0 2311
michael@0 2312 return (len);
michael@0 2313 #elif defined(SENDFILE_IS_LINUX)
michael@0 2314 /* TODO(niels): implement splice */
michael@0 2315 res = sendfile(fd, info->fd, &offset, chain->off);
michael@0 2316 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
michael@0 2317 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */
michael@0 2318 return (0);
michael@0 2319 }
michael@0 2320 return (res);
michael@0 2321 #elif defined(SENDFILE_IS_SOLARIS)
michael@0 2322 {
michael@0 2323 const off_t offset_orig = offset;
michael@0 2324 res = sendfile(fd, info->fd, &offset, chain->off);
michael@0 2325 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
michael@0 2326 if (offset - offset_orig)
michael@0 2327 return offset - offset_orig;
michael@0 2328 /* if this is EAGAIN or EINTR and no bytes were
michael@0 2329 * written, return 0 */
michael@0 2330 return (0);
michael@0 2331 }
michael@0 2332 return (res);
michael@0 2333 }
michael@0 2334 #endif
michael@0 2335 }
michael@0 2336 #endif
michael@0 2337
michael@0 2338 int
michael@0 2339 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
michael@0 2340 ev_ssize_t howmuch)
michael@0 2341 {
michael@0 2342 int n = -1;
michael@0 2343
michael@0 2344 EVBUFFER_LOCK(buffer);
michael@0 2345
michael@0 2346 if (buffer->freeze_start) {
michael@0 2347 goto done;
michael@0 2348 }
michael@0 2349
michael@0 2350 if (howmuch < 0 || (size_t)howmuch > buffer->total_len)
michael@0 2351 howmuch = buffer->total_len;
michael@0 2352
michael@0 2353 if (howmuch > 0) {
michael@0 2354 #ifdef USE_SENDFILE
michael@0 2355 struct evbuffer_chain *chain = buffer->first;
michael@0 2356 if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))
michael@0 2357 n = evbuffer_write_sendfile(buffer, fd, howmuch);
michael@0 2358 else {
michael@0 2359 #endif
michael@0 2360 #ifdef USE_IOVEC_IMPL
michael@0 2361 n = evbuffer_write_iovec(buffer, fd, howmuch);
michael@0 2362 #elif defined(WIN32)
michael@0 2363 /* XXX(nickm) Don't disable this code until we know if
michael@0 2364 * the WSARecv code above works. */
michael@0 2365 void *p = evbuffer_pullup(buffer, howmuch);
michael@0 2366 n = send(fd, p, howmuch, 0);
michael@0 2367 #else
michael@0 2368 void *p = evbuffer_pullup(buffer, howmuch);
michael@0 2369 n = write(fd, p, howmuch);
michael@0 2370 #endif
michael@0 2371 #ifdef USE_SENDFILE
michael@0 2372 }
michael@0 2373 #endif
michael@0 2374 }
michael@0 2375
michael@0 2376 if (n > 0)
michael@0 2377 evbuffer_drain(buffer, n);
michael@0 2378
michael@0 2379 done:
michael@0 2380 EVBUFFER_UNLOCK(buffer);
michael@0 2381 return (n);
michael@0 2382 }
michael@0 2383
michael@0 2384 int
michael@0 2385 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)
michael@0 2386 {
michael@0 2387 return evbuffer_write_atmost(buffer, fd, -1);
michael@0 2388 }
michael@0 2389
michael@0 2390 unsigned char *
michael@0 2391 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
michael@0 2392 {
michael@0 2393 unsigned char *search;
michael@0 2394 struct evbuffer_ptr ptr;
michael@0 2395
michael@0 2396 EVBUFFER_LOCK(buffer);
michael@0 2397
michael@0 2398 ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
michael@0 2399 if (ptr.pos < 0) {
michael@0 2400 search = NULL;
michael@0 2401 } else {
michael@0 2402 search = evbuffer_pullup(buffer, ptr.pos + len);
michael@0 2403 if (search)
michael@0 2404 search += ptr.pos;
michael@0 2405 }
michael@0 2406 EVBUFFER_UNLOCK(buffer);
michael@0 2407 return search;
michael@0 2408 }
michael@0 2409
michael@0 2410 int
michael@0 2411 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
michael@0 2412 size_t position, enum evbuffer_ptr_how how)
michael@0 2413 {
michael@0 2414 size_t left = position;
michael@0 2415 struct evbuffer_chain *chain = NULL;
michael@0 2416
michael@0 2417 EVBUFFER_LOCK(buf);
michael@0 2418
michael@0 2419 switch (how) {
michael@0 2420 case EVBUFFER_PTR_SET:
michael@0 2421 chain = buf->first;
michael@0 2422 pos->pos = position;
michael@0 2423 position = 0;
michael@0 2424 break;
michael@0 2425 case EVBUFFER_PTR_ADD:
michael@0 2426 /* this avoids iterating over all previous chains if
michael@0 2427 we just want to advance the position */
michael@0 2428 chain = pos->_internal.chain;
michael@0 2429 pos->pos += position;
michael@0 2430 position = pos->_internal.pos_in_chain;
michael@0 2431 break;
michael@0 2432 }
michael@0 2433
michael@0 2434 while (chain && position + left >= chain->off) {
michael@0 2435 left -= chain->off - position;
michael@0 2436 chain = chain->next;
michael@0 2437 position = 0;
michael@0 2438 }
michael@0 2439 if (chain) {
michael@0 2440 pos->_internal.chain = chain;
michael@0 2441 pos->_internal.pos_in_chain = position + left;
michael@0 2442 } else {
michael@0 2443 pos->_internal.chain = NULL;
michael@0 2444 pos->pos = -1;
michael@0 2445 }
michael@0 2446
michael@0 2447 EVBUFFER_UNLOCK(buf);
michael@0 2448
michael@0 2449 return chain != NULL ? 0 : -1;
michael@0 2450 }
michael@0 2451
michael@0 2452 /**
michael@0 2453 Compare the bytes in buf at position pos to the len bytes in mem. Return
michael@0 2454 less than 0, 0, or greater than 0 as memcmp.
michael@0 2455 */
michael@0 2456 static int
michael@0 2457 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
michael@0 2458 const char *mem, size_t len)
michael@0 2459 {
michael@0 2460 struct evbuffer_chain *chain;
michael@0 2461 size_t position;
michael@0 2462 int r;
michael@0 2463
michael@0 2464 ASSERT_EVBUFFER_LOCKED(buf);
michael@0 2465
michael@0 2466 if (pos->pos + len > buf->total_len)
michael@0 2467 return -1;
michael@0 2468
michael@0 2469 chain = pos->_internal.chain;
michael@0 2470 position = pos->_internal.pos_in_chain;
michael@0 2471 while (len && chain) {
michael@0 2472 size_t n_comparable;
michael@0 2473 if (len + position > chain->off)
michael@0 2474 n_comparable = chain->off - position;
michael@0 2475 else
michael@0 2476 n_comparable = len;
michael@0 2477 r = memcmp(chain->buffer + chain->misalign + position, mem,
michael@0 2478 n_comparable);
michael@0 2479 if (r)
michael@0 2480 return r;
michael@0 2481 mem += n_comparable;
michael@0 2482 len -= n_comparable;
michael@0 2483 position = 0;
michael@0 2484 chain = chain->next;
michael@0 2485 }
michael@0 2486
michael@0 2487 return 0;
michael@0 2488 }
michael@0 2489
michael@0 2490 struct evbuffer_ptr
michael@0 2491 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)
michael@0 2492 {
michael@0 2493 return evbuffer_search_range(buffer, what, len, start, NULL);
michael@0 2494 }
michael@0 2495
michael@0 2496 struct evbuffer_ptr
michael@0 2497 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
michael@0 2498 {
michael@0 2499 struct evbuffer_ptr pos;
michael@0 2500 struct evbuffer_chain *chain, *last_chain = NULL;
michael@0 2501 const unsigned char *p;
michael@0 2502 char first;
michael@0 2503
michael@0 2504 EVBUFFER_LOCK(buffer);
michael@0 2505
michael@0 2506 if (start) {
michael@0 2507 memcpy(&pos, start, sizeof(pos));
michael@0 2508 chain = pos._internal.chain;
michael@0 2509 } else {
michael@0 2510 pos.pos = 0;
michael@0 2511 chain = pos._internal.chain = buffer->first;
michael@0 2512 pos._internal.pos_in_chain = 0;
michael@0 2513 }
michael@0 2514
michael@0 2515 if (end)
michael@0 2516 last_chain = end->_internal.chain;
michael@0 2517
michael@0 2518 if (!len || len > EV_SSIZE_MAX)
michael@0 2519 goto done;
michael@0 2520
michael@0 2521 first = what[0];
michael@0 2522
michael@0 2523 while (chain) {
michael@0 2524 const unsigned char *start_at =
michael@0 2525 chain->buffer + chain->misalign +
michael@0 2526 pos._internal.pos_in_chain;
michael@0 2527 p = memchr(start_at, first,
michael@0 2528 chain->off - pos._internal.pos_in_chain);
michael@0 2529 if (p) {
michael@0 2530 pos.pos += p - start_at;
michael@0 2531 pos._internal.pos_in_chain += p - start_at;
michael@0 2532 if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
michael@0 2533 if (end && pos.pos + (ev_ssize_t)len > end->pos)
michael@0 2534 goto not_found;
michael@0 2535 else
michael@0 2536 goto done;
michael@0 2537 }
michael@0 2538 ++pos.pos;
michael@0 2539 ++pos._internal.pos_in_chain;
michael@0 2540 if (pos._internal.pos_in_chain == chain->off) {
michael@0 2541 chain = pos._internal.chain = chain->next;
michael@0 2542 pos._internal.pos_in_chain = 0;
michael@0 2543 }
michael@0 2544 } else {
michael@0 2545 if (chain == last_chain)
michael@0 2546 goto not_found;
michael@0 2547 pos.pos += chain->off - pos._internal.pos_in_chain;
michael@0 2548 chain = pos._internal.chain = chain->next;
michael@0 2549 pos._internal.pos_in_chain = 0;
michael@0 2550 }
michael@0 2551 }
michael@0 2552
michael@0 2553 not_found:
michael@0 2554 pos.pos = -1;
michael@0 2555 pos._internal.chain = NULL;
michael@0 2556 done:
michael@0 2557 EVBUFFER_UNLOCK(buffer);
michael@0 2558 return pos;
michael@0 2559 }
michael@0 2560
michael@0 2561 int
michael@0 2562 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
michael@0 2563 struct evbuffer_ptr *start_at,
michael@0 2564 struct evbuffer_iovec *vec, int n_vec)
michael@0 2565 {
michael@0 2566 struct evbuffer_chain *chain;
michael@0 2567 int idx = 0;
michael@0 2568 ev_ssize_t len_so_far = 0;
michael@0 2569
michael@0 2570 EVBUFFER_LOCK(buffer);
michael@0 2571
michael@0 2572 if (start_at) {
michael@0 2573 chain = start_at->_internal.chain;
michael@0 2574 len_so_far = chain->off
michael@0 2575 - start_at->_internal.pos_in_chain;
michael@0 2576 idx = 1;
michael@0 2577 if (n_vec > 0) {
michael@0 2578 vec[0].iov_base = chain->buffer + chain->misalign
michael@0 2579 + start_at->_internal.pos_in_chain;
michael@0 2580 vec[0].iov_len = len_so_far;
michael@0 2581 }
michael@0 2582 chain = chain->next;
michael@0 2583 } else {
michael@0 2584 chain = buffer->first;
michael@0 2585 }
michael@0 2586
michael@0 2587 if (n_vec == 0 && len < 0) {
michael@0 2588 /* If no vectors are provided and they asked for "everything",
michael@0 2589 * pretend they asked for the actual available amount. */
michael@0 2590 len = buffer->total_len - len_so_far;
michael@0 2591 }
michael@0 2592
michael@0 2593 while (chain) {
michael@0 2594 if (len >= 0 && len_so_far >= len)
michael@0 2595 break;
michael@0 2596 if (idx<n_vec) {
michael@0 2597 vec[idx].iov_base = chain->buffer + chain->misalign;
michael@0 2598 vec[idx].iov_len = chain->off;
michael@0 2599 } else if (len<0) {
michael@0 2600 break;
michael@0 2601 }
michael@0 2602 ++idx;
michael@0 2603 len_so_far += chain->off;
michael@0 2604 chain = chain->next;
michael@0 2605 }
michael@0 2606
michael@0 2607 EVBUFFER_UNLOCK(buffer);
michael@0 2608
michael@0 2609 return idx;
michael@0 2610 }
michael@0 2611
michael@0 2612
michael@0 2613 int
michael@0 2614 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
michael@0 2615 {
michael@0 2616 char *buffer;
michael@0 2617 size_t space;
michael@0 2618 int sz, result = -1;
michael@0 2619 va_list aq;
michael@0 2620 struct evbuffer_chain *chain;
michael@0 2621
michael@0 2622
michael@0 2623 EVBUFFER_LOCK(buf);
michael@0 2624
michael@0 2625 if (buf->freeze_end) {
michael@0 2626 goto done;
michael@0 2627 }
michael@0 2628
michael@0 2629 /* make sure that at least some space is available */
michael@0 2630 if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL)
michael@0 2631 goto done;
michael@0 2632
michael@0 2633 for (;;) {
michael@0 2634 #if 0
michael@0 2635 size_t used = chain->misalign + chain->off;
michael@0 2636 buffer = (char *)chain->buffer + chain->misalign + chain->off;
michael@0 2637 EVUTIL_ASSERT(chain->buffer_len >= used);
michael@0 2638 space = chain->buffer_len - used;
michael@0 2639 #endif
michael@0 2640 buffer = (char*) CHAIN_SPACE_PTR(chain);
michael@0 2641 space = (size_t) CHAIN_SPACE_LEN(chain);
michael@0 2642
michael@0 2643 #ifndef va_copy
michael@0 2644 #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list))
michael@0 2645 #endif
michael@0 2646 va_copy(aq, ap);
michael@0 2647
michael@0 2648 sz = evutil_vsnprintf(buffer, space, fmt, aq);
michael@0 2649
michael@0 2650 va_end(aq);
michael@0 2651
michael@0 2652 if (sz < 0)
michael@0 2653 goto done;
michael@0 2654 if ((size_t)sz < space) {
michael@0 2655 chain->off += sz;
michael@0 2656 buf->total_len += sz;
michael@0 2657 buf->n_add_for_cb += sz;
michael@0 2658
michael@0 2659 advance_last_with_data(buf);
michael@0 2660 evbuffer_invoke_callbacks(buf);
michael@0 2661 result = sz;
michael@0 2662 goto done;
michael@0 2663 }
michael@0 2664 if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL)
michael@0 2665 goto done;
michael@0 2666 }
michael@0 2667 /* NOTREACHED */
michael@0 2668
michael@0 2669 done:
michael@0 2670 EVBUFFER_UNLOCK(buf);
michael@0 2671 return result;
michael@0 2672 }
michael@0 2673
michael@0 2674 int
michael@0 2675 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
michael@0 2676 {
michael@0 2677 int res = -1;
michael@0 2678 va_list ap;
michael@0 2679
michael@0 2680 va_start(ap, fmt);
michael@0 2681 res = evbuffer_add_vprintf(buf, fmt, ap);
michael@0 2682 va_end(ap);
michael@0 2683
michael@0 2684 return (res);
michael@0 2685 }
michael@0 2686
michael@0 2687 int
michael@0 2688 evbuffer_add_reference(struct evbuffer *outbuf,
michael@0 2689 const void *data, size_t datlen,
michael@0 2690 evbuffer_ref_cleanup_cb cleanupfn, void *extra)
michael@0 2691 {
michael@0 2692 struct evbuffer_chain *chain;
michael@0 2693 struct evbuffer_chain_reference *info;
michael@0 2694 int result = -1;
michael@0 2695
michael@0 2696 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));
michael@0 2697 if (!chain)
michael@0 2698 return (-1);
michael@0 2699 chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;
michael@0 2700 chain->buffer = (u_char *)data;
michael@0 2701 chain->buffer_len = datlen;
michael@0 2702 chain->off = datlen;
michael@0 2703
michael@0 2704 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);
michael@0 2705 info->cleanupfn = cleanupfn;
michael@0 2706 info->extra = extra;
michael@0 2707
michael@0 2708 EVBUFFER_LOCK(outbuf);
michael@0 2709 if (outbuf->freeze_end) {
michael@0 2710 /* don't call chain_free; we do not want to actually invoke
michael@0 2711 * the cleanup function */
michael@0 2712 mm_free(chain);
michael@0 2713 goto done;
michael@0 2714 }
michael@0 2715 evbuffer_chain_insert(outbuf, chain);
michael@0 2716 outbuf->n_add_for_cb += datlen;
michael@0 2717
michael@0 2718 evbuffer_invoke_callbacks(outbuf);
michael@0 2719
michael@0 2720 result = 0;
michael@0 2721 done:
michael@0 2722 EVBUFFER_UNLOCK(outbuf);
michael@0 2723
michael@0 2724 return result;
michael@0 2725 }
michael@0 2726
michael@0 2727 /* TODO(niels): maybe we don't want to own the fd, however, in that
michael@0 2728 * case, we should dup it - dup is cheap. Perhaps, we should use a
michael@0 2729 * callback instead?
michael@0 2730 */
michael@0 2731 /* TODO(niels): we may want to add to automagically convert to mmap, in
michael@0 2732 * case evbuffer_remove() or evbuffer_pullup() are being used.
michael@0 2733 */
michael@0 2734 int
michael@0 2735 evbuffer_add_file(struct evbuffer *outbuf, int fd,
michael@0 2736 ev_off_t offset, ev_off_t length)
michael@0 2737 {
michael@0 2738 #if defined(USE_SENDFILE) || defined(_EVENT_HAVE_MMAP)
michael@0 2739 struct evbuffer_chain *chain;
michael@0 2740 struct evbuffer_chain_fd *info;
michael@0 2741 #endif
michael@0 2742 #if defined(USE_SENDFILE)
michael@0 2743 int sendfile_okay = 1;
michael@0 2744 #endif
michael@0 2745 int ok = 1;
michael@0 2746
michael@0 2747 #if defined(USE_SENDFILE)
michael@0 2748 if (use_sendfile) {
michael@0 2749 EVBUFFER_LOCK(outbuf);
michael@0 2750 sendfile_okay = outbuf->flags & EVBUFFER_FLAG_DRAINS_TO_FD;
michael@0 2751 EVBUFFER_UNLOCK(outbuf);
michael@0 2752 }
michael@0 2753
michael@0 2754 if (use_sendfile && sendfile_okay) {
michael@0 2755 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
michael@0 2756 if (chain == NULL) {
michael@0 2757 event_warn("%s: out of memory", __func__);
michael@0 2758 return (-1);
michael@0 2759 }
michael@0 2760
michael@0 2761 chain->flags |= EVBUFFER_SENDFILE | EVBUFFER_IMMUTABLE;
michael@0 2762 chain->buffer = NULL; /* no reading possible */
michael@0 2763 chain->buffer_len = length + offset;
michael@0 2764 chain->off = length;
michael@0 2765 chain->misalign = offset;
michael@0 2766
michael@0 2767 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
michael@0 2768 info->fd = fd;
michael@0 2769
michael@0 2770 EVBUFFER_LOCK(outbuf);
michael@0 2771 if (outbuf->freeze_end) {
michael@0 2772 mm_free(chain);
michael@0 2773 ok = 0;
michael@0 2774 } else {
michael@0 2775 outbuf->n_add_for_cb += length;
michael@0 2776 evbuffer_chain_insert(outbuf, chain);
michael@0 2777 }
michael@0 2778 } else
michael@0 2779 #endif
michael@0 2780 #if defined(_EVENT_HAVE_MMAP)
michael@0 2781 if (use_mmap) {
michael@0 2782 void *mapped = mmap(NULL, length + offset, PROT_READ,
michael@0 2783 #ifdef MAP_NOCACHE
michael@0 2784 MAP_NOCACHE |
michael@0 2785 #endif
michael@0 2786 #ifdef MAP_FILE
michael@0 2787 MAP_FILE |
michael@0 2788 #endif
michael@0 2789 MAP_PRIVATE,
michael@0 2790 fd, 0);
michael@0 2791 /* some mmap implementations require offset to be a multiple of
michael@0 2792 * the page size. most users of this api, are likely to use 0
michael@0 2793 * so mapping everything is not likely to be a problem.
michael@0 2794 * TODO(niels): determine page size and round offset to that
michael@0 2795 * page size to avoid mapping too much memory.
michael@0 2796 */
michael@0 2797 if (mapped == MAP_FAILED) {
michael@0 2798 event_warn("%s: mmap(%d, %d, %zu) failed",
michael@0 2799 __func__, fd, 0, (size_t)(offset + length));
michael@0 2800 return (-1);
michael@0 2801 }
michael@0 2802 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
michael@0 2803 if (chain == NULL) {
michael@0 2804 event_warn("%s: out of memory", __func__);
michael@0 2805 munmap(mapped, length);
michael@0 2806 return (-1);
michael@0 2807 }
michael@0 2808
michael@0 2809 chain->flags |= EVBUFFER_MMAP | EVBUFFER_IMMUTABLE;
michael@0 2810 chain->buffer = mapped;
michael@0 2811 chain->buffer_len = length + offset;
michael@0 2812 chain->off = length + offset;
michael@0 2813
michael@0 2814 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
michael@0 2815 info->fd = fd;
michael@0 2816
michael@0 2817 EVBUFFER_LOCK(outbuf);
michael@0 2818 if (outbuf->freeze_end) {
michael@0 2819 info->fd = -1;
michael@0 2820 evbuffer_chain_free(chain);
michael@0 2821 ok = 0;
michael@0 2822 } else {
michael@0 2823 outbuf->n_add_for_cb += length;
michael@0 2824
michael@0 2825 evbuffer_chain_insert(outbuf, chain);
michael@0 2826
michael@0 2827 /* we need to subtract whatever we don't need */
michael@0 2828 evbuffer_drain(outbuf, offset);
michael@0 2829 }
michael@0 2830 } else
michael@0 2831 #endif
michael@0 2832 {
michael@0 2833 /* the default implementation */
michael@0 2834 struct evbuffer *tmp = evbuffer_new();
michael@0 2835 ev_ssize_t read;
michael@0 2836
michael@0 2837 if (tmp == NULL)
michael@0 2838 return (-1);
michael@0 2839
michael@0 2840 #ifdef WIN32
michael@0 2841 #define lseek _lseeki64
michael@0 2842 #endif
michael@0 2843 if (lseek(fd, offset, SEEK_SET) == -1) {
michael@0 2844 evbuffer_free(tmp);
michael@0 2845 return (-1);
michael@0 2846 }
michael@0 2847
michael@0 2848 /* we add everything to a temporary buffer, so that we
michael@0 2849 * can abort without side effects if the read fails.
michael@0 2850 */
michael@0 2851 while (length) {
michael@0 2852 read = evbuffer_readfile(tmp, fd, (ev_ssize_t)length);
michael@0 2853 if (read == -1) {
michael@0 2854 evbuffer_free(tmp);
michael@0 2855 return (-1);
michael@0 2856 }
michael@0 2857
michael@0 2858 length -= read;
michael@0 2859 }
michael@0 2860
michael@0 2861 EVBUFFER_LOCK(outbuf);
michael@0 2862 if (outbuf->freeze_end) {
michael@0 2863 evbuffer_free(tmp);
michael@0 2864 ok = 0;
michael@0 2865 } else {
michael@0 2866 evbuffer_add_buffer(outbuf, tmp);
michael@0 2867 evbuffer_free(tmp);
michael@0 2868
michael@0 2869 #ifdef WIN32
michael@0 2870 #define close _close
michael@0 2871 #endif
michael@0 2872 close(fd);
michael@0 2873 }
michael@0 2874 }
michael@0 2875
michael@0 2876 if (ok)
michael@0 2877 evbuffer_invoke_callbacks(outbuf);
michael@0 2878 EVBUFFER_UNLOCK(outbuf);
michael@0 2879
michael@0 2880 return ok ? 0 : -1;
michael@0 2881 }
michael@0 2882
michael@0 2883
michael@0 2884 void
michael@0 2885 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
michael@0 2886 {
michael@0 2887 EVBUFFER_LOCK(buffer);
michael@0 2888
michael@0 2889 if (!TAILQ_EMPTY(&buffer->callbacks))
michael@0 2890 evbuffer_remove_all_callbacks(buffer);
michael@0 2891
michael@0 2892 if (cb) {
michael@0 2893 struct evbuffer_cb_entry *ent =
michael@0 2894 evbuffer_add_cb(buffer, NULL, cbarg);
michael@0 2895 ent->cb.cb_obsolete = cb;
michael@0 2896 ent->flags |= EVBUFFER_CB_OBSOLETE;
michael@0 2897 }
michael@0 2898 EVBUFFER_UNLOCK(buffer);
michael@0 2899 }
michael@0 2900
michael@0 2901 struct evbuffer_cb_entry *
michael@0 2902 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
michael@0 2903 {
michael@0 2904 struct evbuffer_cb_entry *e;
michael@0 2905 if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
michael@0 2906 return NULL;
michael@0 2907 EVBUFFER_LOCK(buffer);
michael@0 2908 e->cb.cb_func = cb;
michael@0 2909 e->cbarg = cbarg;
michael@0 2910 e->flags = EVBUFFER_CB_ENABLED;
michael@0 2911 TAILQ_INSERT_HEAD(&buffer->callbacks, e, next);
michael@0 2912 EVBUFFER_UNLOCK(buffer);
michael@0 2913 return e;
michael@0 2914 }
michael@0 2915
michael@0 2916 int
michael@0 2917 evbuffer_remove_cb_entry(struct evbuffer *buffer,
michael@0 2918 struct evbuffer_cb_entry *ent)
michael@0 2919 {
michael@0 2920 EVBUFFER_LOCK(buffer);
michael@0 2921 TAILQ_REMOVE(&buffer->callbacks, ent, next);
michael@0 2922 EVBUFFER_UNLOCK(buffer);
michael@0 2923 mm_free(ent);
michael@0 2924 return 0;
michael@0 2925 }
michael@0 2926
michael@0 2927 int
michael@0 2928 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
michael@0 2929 {
michael@0 2930 struct evbuffer_cb_entry *cbent;
michael@0 2931 int result = -1;
michael@0 2932 EVBUFFER_LOCK(buffer);
michael@0 2933 TAILQ_FOREACH(cbent, &buffer->callbacks, next) {
michael@0 2934 if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
michael@0 2935 result = evbuffer_remove_cb_entry(buffer, cbent);
michael@0 2936 goto done;
michael@0 2937 }
michael@0 2938 }
michael@0 2939 done:
michael@0 2940 EVBUFFER_UNLOCK(buffer);
michael@0 2941 return result;
michael@0 2942 }
michael@0 2943
michael@0 2944 int
michael@0 2945 evbuffer_cb_set_flags(struct evbuffer *buffer,
michael@0 2946 struct evbuffer_cb_entry *cb, ev_uint32_t flags)
michael@0 2947 {
michael@0 2948 /* the user isn't allowed to mess with these. */
michael@0 2949 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
michael@0 2950 EVBUFFER_LOCK(buffer);
michael@0 2951 cb->flags |= flags;
michael@0 2952 EVBUFFER_UNLOCK(buffer);
michael@0 2953 return 0;
michael@0 2954 }
michael@0 2955
michael@0 2956 int
michael@0 2957 evbuffer_cb_clear_flags(struct evbuffer *buffer,
michael@0 2958 struct evbuffer_cb_entry *cb, ev_uint32_t flags)
michael@0 2959 {
michael@0 2960 /* the user isn't allowed to mess with these. */
michael@0 2961 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
michael@0 2962 EVBUFFER_LOCK(buffer);
michael@0 2963 cb->flags &= ~flags;
michael@0 2964 EVBUFFER_UNLOCK(buffer);
michael@0 2965 return 0;
michael@0 2966 }
michael@0 2967
michael@0 2968 int
michael@0 2969 evbuffer_freeze(struct evbuffer *buffer, int start)
michael@0 2970 {
michael@0 2971 EVBUFFER_LOCK(buffer);
michael@0 2972 if (start)
michael@0 2973 buffer->freeze_start = 1;
michael@0 2974 else
michael@0 2975 buffer->freeze_end = 1;
michael@0 2976 EVBUFFER_UNLOCK(buffer);
michael@0 2977 return 0;
michael@0 2978 }
michael@0 2979
michael@0 2980 int
michael@0 2981 evbuffer_unfreeze(struct evbuffer *buffer, int start)
michael@0 2982 {
michael@0 2983 EVBUFFER_LOCK(buffer);
michael@0 2984 if (start)
michael@0 2985 buffer->freeze_start = 0;
michael@0 2986 else
michael@0 2987 buffer->freeze_end = 0;
michael@0 2988 EVBUFFER_UNLOCK(buffer);
michael@0 2989 return 0;
michael@0 2990 }
michael@0 2991
michael@0 2992 #if 0
michael@0 2993 void
michael@0 2994 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
michael@0 2995 {
michael@0 2996 if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) {
michael@0 2997 cb->size_before_suspend = evbuffer_get_length(buffer);
michael@0 2998 cb->flags |= EVBUFFER_CB_SUSPENDED;
michael@0 2999 }
michael@0 3000 }
michael@0 3001
michael@0 3002 void
michael@0 3003 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
michael@0 3004 {
michael@0 3005 if ((cb->flags & EVBUFFER_CB_SUSPENDED)) {
michael@0 3006 unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);
michael@0 3007 size_t sz = cb->size_before_suspend;
michael@0 3008 cb->flags &= ~(EVBUFFER_CB_SUSPENDED|
michael@0 3009 EVBUFFER_CB_CALL_ON_UNSUSPEND);
michael@0 3010 cb->size_before_suspend = 0;
michael@0 3011 if (call && (cb->flags & EVBUFFER_CB_ENABLED)) {
michael@0 3012 cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);
michael@0 3013 }
michael@0 3014 }
michael@0 3015 }
michael@0 3016 #endif
michael@0 3017
michael@0 3018 /* These hooks are exposed so that the unit tests can temporarily disable
michael@0 3019 * sendfile support in order to test mmap, or both to test linear
michael@0 3020 * access. Don't use it; if we need to add a way to disable sendfile support
michael@0 3021 * in the future, it will probably be via an alternate version of
michael@0 3022 * evbuffer_add_file() with a 'flags' argument.
michael@0 3023 */
michael@0 3024 int _evbuffer_testing_use_sendfile(void);
michael@0 3025 int _evbuffer_testing_use_mmap(void);
michael@0 3026 int _evbuffer_testing_use_linear_file_access(void);
michael@0 3027
michael@0 3028 int
michael@0 3029 _evbuffer_testing_use_sendfile(void)
michael@0 3030 {
michael@0 3031 int ok = 0;
michael@0 3032 #ifdef USE_SENDFILE
michael@0 3033 use_sendfile = 1;
michael@0 3034 ok = 1;
michael@0 3035 #endif
michael@0 3036 #ifdef _EVENT_HAVE_MMAP
michael@0 3037 use_mmap = 0;
michael@0 3038 #endif
michael@0 3039 return ok;
michael@0 3040 }
michael@0 3041 int
michael@0 3042 _evbuffer_testing_use_mmap(void)
michael@0 3043 {
michael@0 3044 int ok = 0;
michael@0 3045 #ifdef USE_SENDFILE
michael@0 3046 use_sendfile = 0;
michael@0 3047 #endif
michael@0 3048 #ifdef _EVENT_HAVE_MMAP
michael@0 3049 use_mmap = 1;
michael@0 3050 ok = 1;
michael@0 3051 #endif
michael@0 3052 return ok;
michael@0 3053 }
michael@0 3054 int
michael@0 3055 _evbuffer_testing_use_linear_file_access(void)
michael@0 3056 {
michael@0 3057 #ifdef USE_SENDFILE
michael@0 3058 use_sendfile = 0;
michael@0 3059 #endif
michael@0 3060 #ifdef _EVENT_HAVE_MMAP
michael@0 3061 use_mmap = 0;
michael@0 3062 #endif
michael@0 3063 return 1;
michael@0 3064 }

mercurial