ipc/chromium/src/third_party/libevent/buffer.c

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

     1 /*
     2  * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
     3  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
     4  *
     5  * Redistribution and use in source and binary forms, with or without
     6  * modification, are permitted provided that the following conditions
     7  * are met:
     8  * 1. Redistributions of source code must retain the above copyright
     9  *    notice, this list of conditions and the following disclaimer.
    10  * 2. Redistributions in binary form must reproduce the above copyright
    11  *    notice, this list of conditions and the following disclaimer in the
    12  *    documentation and/or other materials provided with the distribution.
    13  * 3. The name of the author may not be used to endorse or promote products
    14  *    derived from this software without specific prior written permission.
    15  *
    16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
    17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
    18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
    19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
    20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
    21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
    25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    26  */
    28 #include "event2/event-config.h"
    30 #ifdef WIN32
    31 #include <winsock2.h>
    32 #include <windows.h>
    33 #include <io.h>
    34 #endif
    36 #ifdef _EVENT_HAVE_VASPRINTF
    37 /* If we have vasprintf, we need to define this before we include stdio.h. */
    38 #define _GNU_SOURCE
    39 #endif
    41 #include <sys/types.h>
    43 #ifdef _EVENT_HAVE_SYS_TIME_H
    44 #include <sys/time.h>
    45 #endif
    47 #ifdef _EVENT_HAVE_SYS_SOCKET_H
    48 #include <sys/socket.h>
    49 #endif
    51 #ifdef _EVENT_HAVE_SYS_UIO_H
    52 #include <sys/uio.h>
    53 #endif
    55 #ifdef _EVENT_HAVE_SYS_IOCTL_H
    56 #include <sys/ioctl.h>
    57 #endif
    59 #ifdef _EVENT_HAVE_SYS_MMAN_H
    60 #include <sys/mman.h>
    61 #endif
    63 #ifdef _EVENT_HAVE_SYS_SENDFILE_H
    64 #include <sys/sendfile.h>
    65 #endif
    67 #include <errno.h>
    68 #include <stdio.h>
    69 #include <stdlib.h>
    70 #include <string.h>
    71 #ifdef _EVENT_HAVE_STDARG_H
    72 #include <stdarg.h>
    73 #endif
    74 #ifdef _EVENT_HAVE_UNISTD_H
    75 #include <unistd.h>
    76 #endif
    77 #include <limits.h>
    79 #include "event2/event.h"
    80 #include "event2/buffer.h"
    81 #include "event2/buffer_compat.h"
    82 #include "event2/bufferevent.h"
    83 #include "event2/bufferevent_compat.h"
    84 #include "event2/bufferevent_struct.h"
    85 #include "event2/thread.h"
    86 #include "event2/event-config.h"
    87 #include "log-internal.h"
    88 #include "mm-internal.h"
    89 #include "util-internal.h"
    90 #include "evthread-internal.h"
    91 #include "evbuffer-internal.h"
    92 #include "bufferevent-internal.h"
    94 /* some systems do not have MAP_FAILED */
    95 #ifndef MAP_FAILED
    96 #define MAP_FAILED	((void *)-1)
    97 #endif
    99 /* send file support */
   100 #if defined(_EVENT_HAVE_SYS_SENDFILE_H) && defined(_EVENT_HAVE_SENDFILE) && defined(__linux__)
   101 #define USE_SENDFILE		1
   102 #define SENDFILE_IS_LINUX	1
   103 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__FreeBSD__)
   104 #define USE_SENDFILE		1
   105 #define SENDFILE_IS_FREEBSD	1
   106 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__APPLE__)
   107 #define USE_SENDFILE		1
   108 #define SENDFILE_IS_MACOSX	1
   109 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
   110 #define USE_SENDFILE		1
   111 #define SENDFILE_IS_SOLARIS	1
   112 #endif
   114 #ifdef USE_SENDFILE
   115 static int use_sendfile = 1;
   116 #endif
   117 #ifdef _EVENT_HAVE_MMAP
   118 static int use_mmap = 1;
   119 #endif
   122 /* Mask of user-selectable callback flags. */
   123 #define EVBUFFER_CB_USER_FLAGS	    0xffff
   124 /* Mask of all internal-use-only flags. */
   125 #define EVBUFFER_CB_INTERNAL_FLAGS  0xffff0000
   127 /* Flag set if the callback is using the cb_obsolete function pointer  */
   128 #define EVBUFFER_CB_OBSOLETE	       0x00040000
   130 /* evbuffer_chain support */
   131 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
   132 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
   133 	    0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
   135 #define CHAIN_PINNED(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
   136 #define CHAIN_PINNED_R(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
   138 static void evbuffer_chain_align(struct evbuffer_chain *chain);
   139 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain,
   140     size_t datalen);
   141 static void evbuffer_deferred_callback(struct deferred_cb *cb, void *arg);
   142 static int evbuffer_ptr_memcmp(const struct evbuffer *buf,
   143     const struct evbuffer_ptr *pos, const char *mem, size_t len);
   144 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf,
   145     size_t datlen);
   147 #ifdef WIN32
   148 static int evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd,
   149     ev_ssize_t howmuch);
   150 #else
   151 #define evbuffer_readfile evbuffer_read
   152 #endif
   154 static struct evbuffer_chain *
   155 evbuffer_chain_new(size_t size)
   156 {
   157 	struct evbuffer_chain *chain;
   158 	size_t to_alloc;
   160 	size += EVBUFFER_CHAIN_SIZE;
   162 	/* get the next largest memory that can hold the buffer */
   163 	to_alloc = MIN_BUFFER_SIZE;
   164 	while (to_alloc < size)
   165 		to_alloc <<= 1;
   167 	/* we get everything in one chunk */
   168 	if ((chain = mm_malloc(to_alloc)) == NULL)
   169 		return (NULL);
   171 	memset(chain, 0, EVBUFFER_CHAIN_SIZE);
   173 	chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;
   175 	/* this way we can manipulate the buffer to different addresses,
   176 	 * which is required for mmap for example.
   177 	 */
   178 	chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain);
   180 	return (chain);
   181 }
   183 static inline void
   184 evbuffer_chain_free(struct evbuffer_chain *chain)
   185 {
   186 	if (CHAIN_PINNED(chain)) {
   187 		chain->flags |= EVBUFFER_DANGLING;
   188 		return;
   189 	}
   190 	if (chain->flags & (EVBUFFER_MMAP|EVBUFFER_SENDFILE|
   191 		EVBUFFER_REFERENCE)) {
   192 		if (chain->flags & EVBUFFER_REFERENCE) {
   193 			struct evbuffer_chain_reference *info =
   194 			    EVBUFFER_CHAIN_EXTRA(
   195 				    struct evbuffer_chain_reference,
   196 				    chain);
   197 			if (info->cleanupfn)
   198 				(*info->cleanupfn)(chain->buffer,
   199 				    chain->buffer_len,
   200 				    info->extra);
   201 		}
   202 #ifdef _EVENT_HAVE_MMAP
   203 		if (chain->flags & EVBUFFER_MMAP) {
   204 			struct evbuffer_chain_fd *info =
   205 			    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
   206 				chain);
   207 			if (munmap(chain->buffer, chain->buffer_len) == -1)
   208 				event_warn("%s: munmap failed", __func__);
   209 			if (close(info->fd) == -1)
   210 				event_warn("%s: close(%d) failed",
   211 				    __func__, info->fd);
   212 		}
   213 #endif
   214 #ifdef USE_SENDFILE
   215 		if (chain->flags & EVBUFFER_SENDFILE) {
   216 			struct evbuffer_chain_fd *info =
   217 			    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
   218 				chain);
   219 			if (close(info->fd) == -1)
   220 				event_warn("%s: close(%d) failed",
   221 				    __func__, info->fd);
   222 		}
   223 #endif
   224 	}
   226 	mm_free(chain);
   227 }
   229 static void
   230 evbuffer_free_all_chains(struct evbuffer_chain *chain)
   231 {
   232 	struct evbuffer_chain *next;
   233 	for (; chain; chain = next) {
   234 		next = chain->next;
   235 		evbuffer_chain_free(chain);
   236 	}
   237 }
   239 #ifndef NDEBUG
   240 static int
   241 evbuffer_chains_all_empty(struct evbuffer_chain *chain)
   242 {
   243 	for (; chain; chain = chain->next) {
   244 		if (chain->off)
   245 			return 0;
   246 	}
   247 	return 1;
   248 }
   249 #else
   250 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
   251 "unused variable" warnings. */
   252 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) {
   253 	return 1;
   254 }
   255 #endif
   257 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
   258  * to replacing them all with a new chain.  Return a pointer to the place
   259  * where the new chain will go.
   260  *
   261  * Internal; requires lock.  The caller must fix up buf->last and buf->first
   262  * as needed; they might have been freed.
   263  */
   264 static struct evbuffer_chain **
   265 evbuffer_free_trailing_empty_chains(struct evbuffer *buf)
   266 {
   267 	struct evbuffer_chain **ch = buf->last_with_datap;
   268 	/* Find the first victim chain.  It might be *last_with_datap */
   269 	while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
   270 		ch = &(*ch)->next;
   271 	if (*ch) {
   272 		EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
   273 		evbuffer_free_all_chains(*ch);
   274 		*ch = NULL;
   275 	}
   276 	return ch;
   277 }
   279 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
   280  * chains as necessary.  Requires lock.  Does not schedule callbacks.
   281  */
   282 static void
   283 evbuffer_chain_insert(struct evbuffer *buf,
   284     struct evbuffer_chain *chain)
   285 {
   286 	ASSERT_EVBUFFER_LOCKED(buf);
   287 	if (*buf->last_with_datap == NULL) {
   288 		/* There are no chains data on the buffer at all. */
   289 		EVUTIL_ASSERT(buf->last_with_datap == &buf->first);
   290 		EVUTIL_ASSERT(buf->first == NULL);
   291 		buf->first = buf->last = chain;
   292 	} else {
   293 		struct evbuffer_chain **ch = buf->last_with_datap;
   294 		/* Find the first victim chain.  It might be *last_with_datap */
   295 		while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
   296 			ch = &(*ch)->next;
   297 		if (*ch == NULL) {
   298 			/* There is no victim; just append this new chain. */
   299 			buf->last->next = chain;
   300 			if (chain->off)
   301 				buf->last_with_datap = &buf->last->next;
   302 		} else {
   303 			/* Replace all victim chains with this chain. */
   304 			EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
   305 			evbuffer_free_all_chains(*ch);
   306 			*ch = chain;
   307 		}
   308 		buf->last = chain;
   309 	}
   310 	buf->total_len += chain->off;
   311 }
   313 static inline struct evbuffer_chain *
   314 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
   315 {
   316 	struct evbuffer_chain *chain;
   317 	if ((chain = evbuffer_chain_new(datlen)) == NULL)
   318 		return NULL;
   319 	evbuffer_chain_insert(buf, chain);
   320 	return chain;
   321 }
   323 void
   324 _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag)
   325 {
   326 	EVUTIL_ASSERT((chain->flags & flag) == 0);
   327 	chain->flags |= flag;
   328 }
   330 void
   331 _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag)
   332 {
   333 	EVUTIL_ASSERT((chain->flags & flag) != 0);
   334 	chain->flags &= ~flag;
   335 	if (chain->flags & EVBUFFER_DANGLING)
   336 		evbuffer_chain_free(chain);
   337 }
   339 struct evbuffer *
   340 evbuffer_new(void)
   341 {
   342 	struct evbuffer *buffer;
   344 	buffer = mm_calloc(1, sizeof(struct evbuffer));
   345 	if (buffer == NULL)
   346 		return (NULL);
   348 	TAILQ_INIT(&buffer->callbacks);
   349 	buffer->refcnt = 1;
   350 	buffer->last_with_datap = &buffer->first;
   352 	return (buffer);
   353 }
   355 int
   356 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags)
   357 {
   358 	EVBUFFER_LOCK(buf);
   359 	buf->flags |= (ev_uint32_t)flags;
   360 	EVBUFFER_UNLOCK(buf);
   361 	return 0;
   362 }
   364 int
   365 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
   366 {
   367 	EVBUFFER_LOCK(buf);
   368 	buf->flags &= ~(ev_uint32_t)flags;
   369 	EVBUFFER_UNLOCK(buf);
   370 	return 0;
   371 }
   373 void
   374 _evbuffer_incref(struct evbuffer *buf)
   375 {
   376 	EVBUFFER_LOCK(buf);
   377 	++buf->refcnt;
   378 	EVBUFFER_UNLOCK(buf);
   379 }
   381 void
   382 _evbuffer_incref_and_lock(struct evbuffer *buf)
   383 {
   384 	EVBUFFER_LOCK(buf);
   385 	++buf->refcnt;
   386 }
   388 int
   389 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)
   390 {
   391 	EVBUFFER_LOCK(buffer);
   392 	buffer->cb_queue = event_base_get_deferred_cb_queue(base);
   393 	buffer->deferred_cbs = 1;
   394 	event_deferred_cb_init(&buffer->deferred,
   395 	    evbuffer_deferred_callback, buffer);
   396 	EVBUFFER_UNLOCK(buffer);
   397 	return 0;
   398 }
   400 int
   401 evbuffer_enable_locking(struct evbuffer *buf, void *lock)
   402 {
   403 #ifdef _EVENT_DISABLE_THREAD_SUPPORT
   404 	return -1;
   405 #else
   406 	if (buf->lock)
   407 		return -1;
   409 	if (!lock) {
   410 		EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
   411 		if (!lock)
   412 			return -1;
   413 		buf->lock = lock;
   414 		buf->own_lock = 1;
   415 	} else {
   416 		buf->lock = lock;
   417 		buf->own_lock = 0;
   418 	}
   420 	return 0;
   421 #endif
   422 }
   424 void
   425 evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev)
   426 {
   427 	EVBUFFER_LOCK(buf);
   428 	buf->parent = bev;
   429 	EVBUFFER_UNLOCK(buf);
   430 }
   432 static void
   433 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
   434 {
   435 	struct evbuffer_cb_entry *cbent, *next;
   436 	struct evbuffer_cb_info info;
   437 	size_t new_size;
   438 	ev_uint32_t mask, masked_val;
   439 	int clear = 1;
   441 	if (running_deferred) {
   442 		mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
   443 		masked_val = EVBUFFER_CB_ENABLED;
   444 	} else if (buffer->deferred_cbs) {
   445 		mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
   446 		masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
   447 		/* Don't zero-out n_add/n_del, since the deferred callbacks
   448 		   will want to see them. */
   449 		clear = 0;
   450 	} else {
   451 		mask = EVBUFFER_CB_ENABLED;
   452 		masked_val = EVBUFFER_CB_ENABLED;
   453 	}
   455 	ASSERT_EVBUFFER_LOCKED(buffer);
   457 	if (TAILQ_EMPTY(&buffer->callbacks)) {
   458 		buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
   459 		return;
   460 	}
   461 	if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
   462 		return;
   464 	new_size = buffer->total_len;
   465 	info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
   466 	info.n_added = buffer->n_add_for_cb;
   467 	info.n_deleted = buffer->n_del_for_cb;
   468 	if (clear) {
   469 		buffer->n_add_for_cb = 0;
   470 		buffer->n_del_for_cb = 0;
   471 	}
   472 	for (cbent = TAILQ_FIRST(&buffer->callbacks);
   473 	     cbent != NULL;
   474 	     cbent = next) {
   475 		/* Get the 'next' pointer now in case this callback decides
   476 		 * to remove itself or something. */
   477 		next = TAILQ_NEXT(cbent, next);
   479 		if ((cbent->flags & mask) != masked_val)
   480 			continue;
   482 		if ((cbent->flags & EVBUFFER_CB_OBSOLETE))
   483 			cbent->cb.cb_obsolete(buffer,
   484 			    info.orig_size, new_size, cbent->cbarg);
   485 		else
   486 			cbent->cb.cb_func(buffer, &info, cbent->cbarg);
   487 	}
   488 }
   490 void
   491 evbuffer_invoke_callbacks(struct evbuffer *buffer)
   492 {
   493 	if (TAILQ_EMPTY(&buffer->callbacks)) {
   494 		buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
   495 		return;
   496 	}
   498 	if (buffer->deferred_cbs) {
   499 		if (buffer->deferred.queued)
   500 			return;
   501 		_evbuffer_incref_and_lock(buffer);
   502 		if (buffer->parent)
   503 			bufferevent_incref(buffer->parent);
   504 		EVBUFFER_UNLOCK(buffer);
   505 		event_deferred_cb_schedule(buffer->cb_queue, &buffer->deferred);
   506 	}
   508 	evbuffer_run_callbacks(buffer, 0);
   509 }
   511 static void
   512 evbuffer_deferred_callback(struct deferred_cb *cb, void *arg)
   513 {
   514 	struct bufferevent *parent = NULL;
   515 	struct evbuffer *buffer = arg;
   517 	/* XXXX It would be better to run these callbacks without holding the
   518 	 * lock */
   519 	EVBUFFER_LOCK(buffer);
   520 	parent = buffer->parent;
   521 	evbuffer_run_callbacks(buffer, 1);
   522 	_evbuffer_decref_and_unlock(buffer);
   523 	if (parent)
   524 		bufferevent_decref(parent);
   525 }
   527 static void
   528 evbuffer_remove_all_callbacks(struct evbuffer *buffer)
   529 {
   530 	struct evbuffer_cb_entry *cbent;
   532 	while ((cbent = TAILQ_FIRST(&buffer->callbacks))) {
   533 	    TAILQ_REMOVE(&buffer->callbacks, cbent, next);
   534 	    mm_free(cbent);
   535 	}
   536 }
   538 void
   539 _evbuffer_decref_and_unlock(struct evbuffer *buffer)
   540 {
   541 	struct evbuffer_chain *chain, *next;
   542 	ASSERT_EVBUFFER_LOCKED(buffer);
   544 	EVUTIL_ASSERT(buffer->refcnt > 0);
   546 	if (--buffer->refcnt > 0) {
   547 		EVBUFFER_UNLOCK(buffer);
   548 		return;
   549 	}
   551 	for (chain = buffer->first; chain != NULL; chain = next) {
   552 		next = chain->next;
   553 		evbuffer_chain_free(chain);
   554 	}
   555 	evbuffer_remove_all_callbacks(buffer);
   556 	if (buffer->deferred_cbs)
   557 		event_deferred_cb_cancel(buffer->cb_queue, &buffer->deferred);
   559 	EVBUFFER_UNLOCK(buffer);
   560 	if (buffer->own_lock)
   561 		EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
   562 	mm_free(buffer);
   563 }
   565 void
   566 evbuffer_free(struct evbuffer *buffer)
   567 {
   568 	EVBUFFER_LOCK(buffer);
   569 	_evbuffer_decref_and_unlock(buffer);
   570 }
   572 void
   573 evbuffer_lock(struct evbuffer *buf)
   574 {
   575 	EVBUFFER_LOCK(buf);
   576 }
   578 void
   579 evbuffer_unlock(struct evbuffer *buf)
   580 {
   581 	EVBUFFER_UNLOCK(buf);
   582 }
   584 size_t
   585 evbuffer_get_length(const struct evbuffer *buffer)
   586 {
   587 	size_t result;
   589 	EVBUFFER_LOCK(buffer);
   591 	result = (buffer->total_len);
   593 	EVBUFFER_UNLOCK(buffer);
   595 	return result;
   596 }
   598 size_t
   599 evbuffer_get_contiguous_space(const struct evbuffer *buf)
   600 {
   601 	struct evbuffer_chain *chain;
   602 	size_t result;
   604 	EVBUFFER_LOCK(buf);
   605 	chain = buf->first;
   606 	result = (chain != NULL ? chain->off : 0);
   607 	EVBUFFER_UNLOCK(buf);
   609 	return result;
   610 }
   612 int
   613 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
   614     struct evbuffer_iovec *vec, int n_vecs)
   615 {
   616 	struct evbuffer_chain *chain, **chainp;
   617 	int n = -1;
   619 	EVBUFFER_LOCK(buf);
   620 	if (buf->freeze_end)
   621 		goto done;
   622 	if (n_vecs < 1)
   623 		goto done;
   624 	if (n_vecs == 1) {
   625 		if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL)
   626 			goto done;
   628 		vec[0].iov_base = CHAIN_SPACE_PTR(chain);
   629 		vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain);
   630 		EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
   631 		n = 1;
   632 	} else {
   633 		if (_evbuffer_expand_fast(buf, size, n_vecs)<0)
   634 			goto done;
   635 		n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs,
   636 				&chainp, 0);
   637 	}
   639 done:
   640 	EVBUFFER_UNLOCK(buf);
   641 	return n;
   643 }
   645 static int
   646 advance_last_with_data(struct evbuffer *buf)
   647 {
   648 	int n = 0;
   649 	ASSERT_EVBUFFER_LOCKED(buf);
   651 	if (!*buf->last_with_datap)
   652 		return 0;
   654 	while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) {
   655 		buf->last_with_datap = &(*buf->last_with_datap)->next;
   656 		++n;
   657 	}
   658 	return n;
   659 }
   661 int
   662 evbuffer_commit_space(struct evbuffer *buf,
   663     struct evbuffer_iovec *vec, int n_vecs)
   664 {
   665 	struct evbuffer_chain *chain, **firstchainp, **chainp;
   666 	int result = -1;
   667 	size_t added = 0;
   668 	int i;
   670 	EVBUFFER_LOCK(buf);
   672 	if (buf->freeze_end)
   673 		goto done;
   674 	if (n_vecs == 0) {
   675 		result = 0;
   676 		goto done;
   677 	} else if (n_vecs == 1 &&
   678 	    (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) {
   679 		/* The user only got or used one chain; it might not
   680 		 * be the first one with space in it. */
   681 		if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last))
   682 			goto done;
   683 		buf->last->off += vec[0].iov_len;
   684 		added = vec[0].iov_len;
   685 		if (added)
   686 			advance_last_with_data(buf);
   687 		goto okay;
   688 	}
   690 	/* Advance 'firstchain' to the first chain with space in it. */
   691 	firstchainp = buf->last_with_datap;
   692 	if (!*firstchainp)
   693 		goto done;
   694 	if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
   695 		firstchainp = &(*firstchainp)->next;
   696 	}
   698 	chain = *firstchainp;
   699 	/* pass 1: make sure that the pointers and lengths of vecs[] are in
   700 	 * bounds before we try to commit anything. */
   701 	for (i=0; i<n_vecs; ++i) {
   702 		if (!chain)
   703 			goto done;
   704 		if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) ||
   705 		    (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain))
   706 			goto done;
   707 		chain = chain->next;
   708 	}
   709 	/* pass 2: actually adjust all the chains. */
   710 	chainp = firstchainp;
   711 	for (i=0; i<n_vecs; ++i) {
   712 		(*chainp)->off += vec[i].iov_len;
   713 		added += vec[i].iov_len;
   714 		if (vec[i].iov_len) {
   715 			buf->last_with_datap = chainp;
   716 		}
   717 		chainp = &(*chainp)->next;
   718 	}
   720 okay:
   721 	buf->total_len += added;
   722 	buf->n_add_for_cb += added;
   723 	result = 0;
   724 	evbuffer_invoke_callbacks(buf);
   726 done:
   727 	EVBUFFER_UNLOCK(buf);
   728 	return result;
   729 }
   731 static inline int
   732 HAS_PINNED_R(struct evbuffer *buf)
   733 {
   734 	return (buf->last && CHAIN_PINNED_R(buf->last));
   735 }
   737 static inline void
   738 ZERO_CHAIN(struct evbuffer *dst)
   739 {
   740 	ASSERT_EVBUFFER_LOCKED(dst);
   741 	dst->first = NULL;
   742 	dst->last = NULL;
   743 	dst->last_with_datap = &(dst)->first;
   744 	dst->total_len = 0;
   745 }
   747 /* Prepares the contents of src to be moved to another buffer by removing
   748  * read-pinned chains. The first pinned chain is saved in first, and the
   749  * last in last. If src has no read-pinned chains, first and last are set
   750  * to NULL. */
   751 static int
   752 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first,
   753 		struct evbuffer_chain **last)
   754 {
   755 	struct evbuffer_chain *chain, **pinned;
   757 	ASSERT_EVBUFFER_LOCKED(src);
   759 	if (!HAS_PINNED_R(src)) {
   760 		*first = *last = NULL;
   761 		return 0;
   762 	}
   764 	pinned = src->last_with_datap;
   765 	if (!CHAIN_PINNED_R(*pinned))
   766 		pinned = &(*pinned)->next;
   767 	EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned));
   768 	chain = *first = *pinned;
   769 	*last = src->last;
   771 	/* If there's data in the first pinned chain, we need to allocate
   772 	 * a new chain and copy the data over. */
   773 	if (chain->off) {
   774 		struct evbuffer_chain *tmp;
   776 		EVUTIL_ASSERT(pinned == src->last_with_datap);
   777 		tmp = evbuffer_chain_new(chain->off);
   778 		if (!tmp)
   779 			return -1;
   780 		memcpy(tmp->buffer, chain->buffer + chain->misalign,
   781 			chain->off);
   782 		tmp->off = chain->off;
   783 		*src->last_with_datap = tmp;
   784 		src->last = tmp;
   785 		chain->misalign += chain->off;
   786 		chain->off = 0;
   787 	} else {
   788 		src->last = *src->last_with_datap;
   789 		*pinned = NULL;
   790 	}
   792 	return 0;
   793 }
   795 static inline void
   796 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned,
   797 		struct evbuffer_chain *last)
   798 {
   799 	ASSERT_EVBUFFER_LOCKED(src);
   801 	if (!pinned) {
   802 		ZERO_CHAIN(src);
   803 		return;
   804 	}
   806 	src->first = pinned;
   807 	src->last = last;
   808 	src->last_with_datap = &src->first;
   809 	src->total_len = 0;
   810 }
   812 static inline void
   813 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src)
   814 {
   815 	ASSERT_EVBUFFER_LOCKED(dst);
   816 	ASSERT_EVBUFFER_LOCKED(src);
   817 	dst->first = src->first;
   818 	if (src->last_with_datap == &src->first)
   819 		dst->last_with_datap = &dst->first;
   820 	else
   821 		dst->last_with_datap = src->last_with_datap;
   822 	dst->last = src->last;
   823 	dst->total_len = src->total_len;
   824 }
   826 static void
   827 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
   828 {
   829 	ASSERT_EVBUFFER_LOCKED(dst);
   830 	ASSERT_EVBUFFER_LOCKED(src);
   831 	dst->last->next = src->first;
   832 	if (src->last_with_datap == &src->first)
   833 		dst->last_with_datap = &dst->last->next;
   834 	else
   835 		dst->last_with_datap = src->last_with_datap;
   836 	dst->last = src->last;
   837 	dst->total_len += src->total_len;
   838 }
   840 static void
   841 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
   842 {
   843 	ASSERT_EVBUFFER_LOCKED(dst);
   844 	ASSERT_EVBUFFER_LOCKED(src);
   845 	src->last->next = dst->first;
   846 	dst->first = src->first;
   847 	dst->total_len += src->total_len;
   848 	if (*dst->last_with_datap == NULL) {
   849 		if (src->last_with_datap == &(src)->first)
   850 			dst->last_with_datap = &dst->first;
   851 		else
   852 			dst->last_with_datap = src->last_with_datap;
   853 	} else if (dst->last_with_datap == &dst->first) {
   854 		dst->last_with_datap = &src->last->next;
   855 	}
   856 }
   858 int
   859 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
   860 {
   861 	struct evbuffer_chain *pinned, *last;
   862 	size_t in_total_len, out_total_len;
   863 	int result = 0;
   865 	EVBUFFER_LOCK2(inbuf, outbuf);
   866 	in_total_len = inbuf->total_len;
   867 	out_total_len = outbuf->total_len;
   869 	if (in_total_len == 0 || outbuf == inbuf)
   870 		goto done;
   872 	if (outbuf->freeze_end || inbuf->freeze_start) {
   873 		result = -1;
   874 		goto done;
   875 	}
   877 	if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
   878 		result = -1;
   879 		goto done;
   880 	}
   882 	if (out_total_len == 0) {
   883 		/* There might be an empty chain at the start of outbuf; free
   884 		 * it. */
   885 		evbuffer_free_all_chains(outbuf->first);
   886 		COPY_CHAIN(outbuf, inbuf);
   887 	} else {
   888 		APPEND_CHAIN(outbuf, inbuf);
   889 	}
   891 	RESTORE_PINNED(inbuf, pinned, last);
   893 	inbuf->n_del_for_cb += in_total_len;
   894 	outbuf->n_add_for_cb += in_total_len;
   896 	evbuffer_invoke_callbacks(inbuf);
   897 	evbuffer_invoke_callbacks(outbuf);
   899 done:
   900 	EVBUFFER_UNLOCK2(inbuf, outbuf);
   901 	return result;
   902 }
   904 int
   905 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
   906 {
   907 	struct evbuffer_chain *pinned, *last;
   908 	size_t in_total_len, out_total_len;
   909 	int result = 0;
   911 	EVBUFFER_LOCK2(inbuf, outbuf);
   913 	in_total_len = inbuf->total_len;
   914 	out_total_len = outbuf->total_len;
   916 	if (!in_total_len || inbuf == outbuf)
   917 		goto done;
   919 	if (outbuf->freeze_start || inbuf->freeze_start) {
   920 		result = -1;
   921 		goto done;
   922 	}
   924 	if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
   925 		result = -1;
   926 		goto done;
   927 	}
   929 	if (out_total_len == 0) {
   930 		/* There might be an empty chain at the start of outbuf; free
   931 		 * it. */
   932 		evbuffer_free_all_chains(outbuf->first);
   933 		COPY_CHAIN(outbuf, inbuf);
   934 	} else {
   935 		PREPEND_CHAIN(outbuf, inbuf);
   936 	}
   938 	RESTORE_PINNED(inbuf, pinned, last);
   940 	inbuf->n_del_for_cb += in_total_len;
   941 	outbuf->n_add_for_cb += in_total_len;
   943 	evbuffer_invoke_callbacks(inbuf);
   944 	evbuffer_invoke_callbacks(outbuf);
   945 done:
   946 	EVBUFFER_UNLOCK2(inbuf, outbuf);
   947 	return result;
   948 }
   950 int
   951 evbuffer_drain(struct evbuffer *buf, size_t len)
   952 {
   953 	struct evbuffer_chain *chain, *next;
   954 	size_t remaining, old_len;
   955 	int result = 0;
   957 	EVBUFFER_LOCK(buf);
   958 	old_len = buf->total_len;
   960 	if (old_len == 0)
   961 		goto done;
   963 	if (buf->freeze_start) {
   964 		result = -1;
   965 		goto done;
   966 	}
   968 	if (len >= old_len && !HAS_PINNED_R(buf)) {
   969 		len = old_len;
   970 		for (chain = buf->first; chain != NULL; chain = next) {
   971 			next = chain->next;
   972 			evbuffer_chain_free(chain);
   973 		}
   975 		ZERO_CHAIN(buf);
   976 	} else {
   977 		if (len >= old_len)
   978 			len = old_len;
   980 		buf->total_len -= len;
   981 		remaining = len;
   982 		for (chain = buf->first;
   983 		     remaining >= chain->off;
   984 		     chain = next) {
   985 			next = chain->next;
   986 			remaining -= chain->off;
   988 			if (chain == *buf->last_with_datap) {
   989 				buf->last_with_datap = &buf->first;
   990 			}
   991 			if (&chain->next == buf->last_with_datap)
   992 				buf->last_with_datap = &buf->first;
   994 			if (CHAIN_PINNED_R(chain)) {
   995 				EVUTIL_ASSERT(remaining == 0);
   996 				chain->misalign += chain->off;
   997 				chain->off = 0;
   998 				break;
   999 			} else
  1000 				evbuffer_chain_free(chain);
  1003 		buf->first = chain;
  1004 		if (chain) {
  1005 			chain->misalign += remaining;
  1006 			chain->off -= remaining;
  1010 	buf->n_del_for_cb += len;
  1011 	/* Tell someone about changes in this buffer */
  1012 	evbuffer_invoke_callbacks(buf);
  1014 done:
  1015 	EVBUFFER_UNLOCK(buf);
  1016 	return result;
  1019 /* Reads data from an event buffer and drains the bytes read */
  1020 int
  1021 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
  1023 	ev_ssize_t n;
  1024 	EVBUFFER_LOCK(buf);
  1025 	n = evbuffer_copyout(buf, data_out, datlen);
  1026 	if (n > 0) {
  1027 		if (evbuffer_drain(buf, n)<0)
  1028 			n = -1;
  1030 	EVBUFFER_UNLOCK(buf);
  1031 	return (int)n;
  1034 ev_ssize_t
  1035 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen)
  1037 	/*XXX fails badly on sendfile case. */
  1038 	struct evbuffer_chain *chain;
  1039 	char *data = data_out;
  1040 	size_t nread;
  1041 	ev_ssize_t result = 0;
  1043 	EVBUFFER_LOCK(buf);
  1045 	chain = buf->first;
  1047 	if (datlen >= buf->total_len)
  1048 		datlen = buf->total_len;
  1050 	if (datlen == 0)
  1051 		goto done;
  1053 	if (buf->freeze_start) {
  1054 		result = -1;
  1055 		goto done;
  1058 	nread = datlen;
  1060 	while (datlen && datlen >= chain->off) {
  1061 		memcpy(data, chain->buffer + chain->misalign, chain->off);
  1062 		data += chain->off;
  1063 		datlen -= chain->off;
  1065 		chain = chain->next;
  1066 		EVUTIL_ASSERT(chain || datlen==0);
  1069 	if (datlen) {
  1070 		EVUTIL_ASSERT(chain);
  1071 		memcpy(data, chain->buffer + chain->misalign, datlen);
  1074 	result = nread;
  1075 done:
  1076 	EVBUFFER_UNLOCK(buf);
  1077 	return result;
  1080 /* reads data from the src buffer to the dst buffer, avoids memcpy as
  1081  * possible. */
  1082 /*  XXXX should return ev_ssize_t */
  1083 int
  1084 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
  1085     size_t datlen)
  1087 	/*XXX We should have an option to force this to be zero-copy.*/
  1089 	/*XXX can fail badly on sendfile case. */
  1090 	struct evbuffer_chain *chain, *previous;
  1091 	size_t nread = 0;
  1092 	int result;
  1094 	EVBUFFER_LOCK2(src, dst);
  1096 	chain = previous = src->first;
  1098 	if (datlen == 0 || dst == src) {
  1099 		result = 0;
  1100 		goto done;
  1103 	if (dst->freeze_end || src->freeze_start) {
  1104 		result = -1;
  1105 		goto done;
  1108 	/* short-cut if there is no more data buffered */
  1109 	if (datlen >= src->total_len) {
  1110 		datlen = src->total_len;
  1111 		evbuffer_add_buffer(dst, src);
  1112 		result = (int)datlen; /*XXXX should return ev_ssize_t*/
  1113 		goto done;
  1116 	/* removes chains if possible */
  1117 	while (chain->off <= datlen) {
  1118 		/* We can't remove the last with data from src unless we
  1119 		 * remove all chains, in which case we would have done the if
  1120 		 * block above */
  1121 		EVUTIL_ASSERT(chain != *src->last_with_datap);
  1122 		nread += chain->off;
  1123 		datlen -= chain->off;
  1124 		previous = chain;
  1125 		if (src->last_with_datap == &chain->next)
  1126 			src->last_with_datap = &src->first;
  1127 		chain = chain->next;
  1130 	if (nread) {
  1131 		/* we can remove the chain */
  1132 		struct evbuffer_chain **chp;
  1133 		chp = evbuffer_free_trailing_empty_chains(dst);
  1135 		if (dst->first == NULL) {
  1136 			dst->first = src->first;
  1137 		} else {
  1138 			*chp = src->first;
  1140 		dst->last = previous;
  1141 		previous->next = NULL;
  1142 		src->first = chain;
  1143 		advance_last_with_data(dst);
  1145 		dst->total_len += nread;
  1146 		dst->n_add_for_cb += nread;
  1149 	/* we know that there is more data in the src buffer than
  1150 	 * we want to read, so we manually drain the chain */
  1151 	evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
  1152 	chain->misalign += datlen;
  1153 	chain->off -= datlen;
  1154 	nread += datlen;
  1156 	/* You might think we would want to increment dst->n_add_for_cb
  1157 	 * here too.  But evbuffer_add above already took care of that.
  1158 	 */
  1159 	src->total_len -= nread;
  1160 	src->n_del_for_cb += nread;
  1162 	if (nread) {
  1163 		evbuffer_invoke_callbacks(dst);
  1164 		evbuffer_invoke_callbacks(src);
  1166 	result = (int)nread;/*XXXX should change return type */
  1168 done:
  1169 	EVBUFFER_UNLOCK2(src, dst);
  1170 	return result;
  1173 unsigned char *
  1174 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)
  1176 	struct evbuffer_chain *chain, *next, *tmp, *last_with_data;
  1177 	unsigned char *buffer, *result = NULL;
  1178 	ev_ssize_t remaining;
  1179 	int removed_last_with_data = 0;
  1180 	int removed_last_with_datap = 0;
  1182 	EVBUFFER_LOCK(buf);
  1184 	chain = buf->first;
  1186 	if (size < 0)
  1187 		size = buf->total_len;
  1188 	/* if size > buf->total_len, we cannot guarantee to the user that she
  1189 	 * is going to have a long enough buffer afterwards; so we return
  1190 	 * NULL */
  1191 	if (size == 0 || (size_t)size > buf->total_len)
  1192 		goto done;
  1194 	/* No need to pull up anything; the first size bytes are
  1195 	 * already here. */
  1196 	if (chain->off >= (size_t)size) {
  1197 		result = chain->buffer + chain->misalign;
  1198 		goto done;
  1201 	/* Make sure that none of the chains we need to copy from is pinned. */
  1202 	remaining = size - chain->off;
  1203 	EVUTIL_ASSERT(remaining >= 0);
  1204 	for (tmp=chain->next; tmp; tmp=tmp->next) {
  1205 		if (CHAIN_PINNED(tmp))
  1206 			goto done;
  1207 		if (tmp->off >= (size_t)remaining)
  1208 			break;
  1209 		remaining -= tmp->off;
  1212 	if (CHAIN_PINNED(chain)) {
  1213 		size_t old_off = chain->off;
  1214 		if (CHAIN_SPACE_LEN(chain) < size - chain->off) {
  1215 			/* not enough room at end of chunk. */
  1216 			goto done;
  1218 		buffer = CHAIN_SPACE_PTR(chain);
  1219 		tmp = chain;
  1220 		tmp->off = size;
  1221 		size -= old_off;
  1222 		chain = chain->next;
  1223 	} else if (chain->buffer_len - chain->misalign >= (size_t)size) {
  1224 		/* already have enough space in the first chain */
  1225 		size_t old_off = chain->off;
  1226 		buffer = chain->buffer + chain->misalign + chain->off;
  1227 		tmp = chain;
  1228 		tmp->off = size;
  1229 		size -= old_off;
  1230 		chain = chain->next;
  1231 	} else {
  1232 		if ((tmp = evbuffer_chain_new(size)) == NULL) {
  1233 			event_warn("%s: out of memory", __func__);
  1234 			goto done;
  1236 		buffer = tmp->buffer;
  1237 		tmp->off = size;
  1238 		buf->first = tmp;
  1241 	/* TODO(niels): deal with buffers that point to NULL like sendfile */
  1243 	/* Copy and free every chunk that will be entirely pulled into tmp */
  1244 	last_with_data = *buf->last_with_datap;
  1245 	for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
  1246 		next = chain->next;
  1248 		memcpy(buffer, chain->buffer + chain->misalign, chain->off);
  1249 		size -= chain->off;
  1250 		buffer += chain->off;
  1251 		if (chain == last_with_data)
  1252 			removed_last_with_data = 1;
  1253 		if (&chain->next == buf->last_with_datap)
  1254 			removed_last_with_datap = 1;
  1256 		evbuffer_chain_free(chain);
  1259 	if (chain != NULL) {
  1260 		memcpy(buffer, chain->buffer + chain->misalign, size);
  1261 		chain->misalign += size;
  1262 		chain->off -= size;
  1263 	} else {
  1264 		buf->last = tmp;
  1267 	tmp->next = chain;
  1269 	if (removed_last_with_data) {
  1270 		buf->last_with_datap = &buf->first;
  1271 	} else if (removed_last_with_datap) {
  1272 		if (buf->first->next && buf->first->next->off)
  1273 			buf->last_with_datap = &buf->first->next;
  1274 		else
  1275 			buf->last_with_datap = &buf->first;
  1278 	result = (tmp->buffer + tmp->misalign);
  1280 done:
  1281 	EVBUFFER_UNLOCK(buf);
  1282 	return result;
  1285 /*
  1286  * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
  1287  * The returned buffer needs to be freed by the called.
  1288  */
  1289 char *
  1290 evbuffer_readline(struct evbuffer *buffer)
  1292 	return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
  1295 static inline ev_ssize_t
  1296 evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
  1298 	struct evbuffer_chain *chain = it->_internal.chain;
  1299 	size_t i = it->_internal.pos_in_chain;
  1300 	while (chain != NULL) {
  1301 		char *buffer = (char *)chain->buffer + chain->misalign;
  1302 		char *cp = memchr(buffer+i, chr, chain->off-i);
  1303 		if (cp) {
  1304 			it->_internal.chain = chain;
  1305 			it->_internal.pos_in_chain = cp - buffer;
  1306 			it->pos += (cp - buffer - i);
  1307 			return it->pos;
  1309 		it->pos += chain->off - i;
  1310 		i = 0;
  1311 		chain = chain->next;
  1314 	return (-1);
  1317 static inline char *
  1318 find_eol_char(char *s, size_t len)
  1320 #define CHUNK_SZ 128
  1321 	/* Lots of benchmarking found this approach to be faster in practice
  1322 	 * than doing two memchrs over the whole buffer, doin a memchr on each
  1323 	 * char of the buffer, or trying to emulate memchr by hand. */
  1324 	char *s_end, *cr, *lf;
  1325 	s_end = s+len;
  1326 	while (s < s_end) {
  1327 		size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
  1328 		cr = memchr(s, '\r', chunk);
  1329 		lf = memchr(s, '\n', chunk);
  1330 		if (cr) {
  1331 			if (lf && lf < cr)
  1332 				return lf;
  1333 			return cr;
  1334 		} else if (lf) {
  1335 			return lf;
  1337 		s += CHUNK_SZ;
  1340 	return NULL;
  1341 #undef CHUNK_SZ
  1344 static ev_ssize_t
  1345 evbuffer_find_eol_char(struct evbuffer_ptr *it)
  1347 	struct evbuffer_chain *chain = it->_internal.chain;
  1348 	size_t i = it->_internal.pos_in_chain;
  1349 	while (chain != NULL) {
  1350 		char *buffer = (char *)chain->buffer + chain->misalign;
  1351 		char *cp = find_eol_char(buffer+i, chain->off-i);
  1352 		if (cp) {
  1353 			it->_internal.chain = chain;
  1354 			it->_internal.pos_in_chain = cp - buffer;
  1355 			it->pos += (cp - buffer) - i;
  1356 			return it->pos;
  1358 		it->pos += chain->off - i;
  1359 		i = 0;
  1360 		chain = chain->next;
  1363 	return (-1);
  1366 static inline int
  1367 evbuffer_strspn(
  1368 	struct evbuffer_ptr *ptr, const char *chrset)
  1370 	int count = 0;
  1371 	struct evbuffer_chain *chain = ptr->_internal.chain;
  1372 	size_t i = ptr->_internal.pos_in_chain;
  1374 	if (!chain)
  1375 		return -1;
  1377 	while (1) {
  1378 		char *buffer = (char *)chain->buffer + chain->misalign;
  1379 		for (; i < chain->off; ++i) {
  1380 			const char *p = chrset;
  1381 			while (*p) {
  1382 				if (buffer[i] == *p++)
  1383 					goto next;
  1385 			ptr->_internal.chain = chain;
  1386 			ptr->_internal.pos_in_chain = i;
  1387 			ptr->pos += count;
  1388 			return count;
  1389 		next:
  1390 			++count;
  1392 		i = 0;
  1394 		if (! chain->next) {
  1395 			ptr->_internal.chain = chain;
  1396 			ptr->_internal.pos_in_chain = i;
  1397 			ptr->pos += count;
  1398 			return count;
  1401 		chain = chain->next;
  1406 static inline char
  1407 evbuffer_getchr(struct evbuffer_ptr *it)
  1409 	struct evbuffer_chain *chain = it->_internal.chain;
  1410 	size_t off = it->_internal.pos_in_chain;
  1412 	return chain->buffer[chain->misalign + off];
  1415 struct evbuffer_ptr
  1416 evbuffer_search_eol(struct evbuffer *buffer,
  1417     struct evbuffer_ptr *start, size_t *eol_len_out,
  1418     enum evbuffer_eol_style eol_style)
  1420 	struct evbuffer_ptr it, it2;
  1421 	size_t extra_drain = 0;
  1422 	int ok = 0;
  1424 	EVBUFFER_LOCK(buffer);
  1426 	if (start) {
  1427 		memcpy(&it, start, sizeof(it));
  1428 	} else {
  1429 		it.pos = 0;
  1430 		it._internal.chain = buffer->first;
  1431 		it._internal.pos_in_chain = 0;
  1434 	/* the eol_style determines our first stop character and how many
  1435 	 * characters we are going to drain afterwards. */
  1436 	switch (eol_style) {
  1437 	case EVBUFFER_EOL_ANY:
  1438 		if (evbuffer_find_eol_char(&it) < 0)
  1439 			goto done;
  1440 		memcpy(&it2, &it, sizeof(it));
  1441 		extra_drain = evbuffer_strspn(&it2, "\r\n");
  1442 		break;
  1443 	case EVBUFFER_EOL_CRLF_STRICT: {
  1444 		it = evbuffer_search(buffer, "\r\n", 2, &it);
  1445 		if (it.pos < 0)
  1446 			goto done;
  1447 		extra_drain = 2;
  1448 		break;
  1450 	case EVBUFFER_EOL_CRLF:
  1451 		while (1) {
  1452 			if (evbuffer_find_eol_char(&it) < 0)
  1453 				goto done;
  1454 			if (evbuffer_getchr(&it) == '\n') {
  1455 				extra_drain = 1;
  1456 				break;
  1457 			} else if (!evbuffer_ptr_memcmp(
  1458 				    buffer, &it, "\r\n", 2)) {
  1459 				extra_drain = 2;
  1460 				break;
  1461 			} else {
  1462 				if (evbuffer_ptr_set(buffer, &it, 1,
  1463 					EVBUFFER_PTR_ADD)<0)
  1464 					goto done;
  1467 		break;
  1468 	case EVBUFFER_EOL_LF:
  1469 		if (evbuffer_strchr(&it, '\n') < 0)
  1470 			goto done;
  1471 		extra_drain = 1;
  1472 		break;
  1473 	default:
  1474 		goto done;
  1477 	ok = 1;
  1478 done:
  1479 	EVBUFFER_UNLOCK(buffer);
  1481 	if (!ok) {
  1482 		it.pos = -1;
  1484 	if (eol_len_out)
  1485 		*eol_len_out = extra_drain;
  1487 	return it;
  1490 char *
  1491 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
  1492 		enum evbuffer_eol_style eol_style)
  1494 	struct evbuffer_ptr it;
  1495 	char *line;
  1496 	size_t n_to_copy=0, extra_drain=0;
  1497 	char *result = NULL;
  1499 	EVBUFFER_LOCK(buffer);
  1501 	if (buffer->freeze_start) {
  1502 		goto done;
  1505 	it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);
  1506 	if (it.pos < 0)
  1507 		goto done;
  1508 	n_to_copy = it.pos;
  1510 	if ((line = mm_malloc(n_to_copy+1)) == NULL) {
  1511 		event_warn("%s: out of memory", __func__);
  1512 		goto done;
  1515 	evbuffer_remove(buffer, line, n_to_copy);
  1516 	line[n_to_copy] = '\0';
  1518 	evbuffer_drain(buffer, extra_drain);
  1519 	result = line;
  1520 done:
  1521 	EVBUFFER_UNLOCK(buffer);
  1523 	if (n_read_out)
  1524 		*n_read_out = result ? n_to_copy : 0;
  1526 	return result;
  1529 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
  1531 /* Adds data to an event buffer */
  1533 int
  1534 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)
  1536 	struct evbuffer_chain *chain, *tmp;
  1537 	const unsigned char *data = data_in;
  1538 	size_t remain, to_alloc;
  1539 	int result = -1;
  1541 	EVBUFFER_LOCK(buf);
  1543 	if (buf->freeze_end) {
  1544 		goto done;
  1547 	chain = buf->last;
  1549 	/* If there are no chains allocated for this buffer, allocate one
  1550 	 * big enough to hold all the data. */
  1551 	if (chain == NULL) {
  1552 		chain = evbuffer_chain_new(datlen);
  1553 		if (!chain)
  1554 			goto done;
  1555 		evbuffer_chain_insert(buf, chain);
  1558 	if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
  1559 		remain = (size_t)(chain->buffer_len - chain->misalign - chain->off);
  1560 		if (remain >= datlen) {
  1561 			/* there's enough space to hold all the data in the
  1562 			 * current last chain */
  1563 			memcpy(chain->buffer + chain->misalign + chain->off,
  1564 			    data, datlen);
  1565 			chain->off += datlen;
  1566 			buf->total_len += datlen;
  1567 			buf->n_add_for_cb += datlen;
  1568 			goto out;
  1569 		} else if (!CHAIN_PINNED(chain) &&
  1570 		    evbuffer_chain_should_realign(chain, datlen)) {
  1571 			/* we can fit the data into the misalignment */
  1572 			evbuffer_chain_align(chain);
  1574 			memcpy(chain->buffer + chain->off, data, datlen);
  1575 			chain->off += datlen;
  1576 			buf->total_len += datlen;
  1577 			buf->n_add_for_cb += datlen;
  1578 			goto out;
  1580 	} else {
  1581 		/* we cannot write any data to the last chain */
  1582 		remain = 0;
  1585 	/* we need to add another chain */
  1586 	to_alloc = chain->buffer_len;
  1587 	if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
  1588 		to_alloc <<= 1;
  1589 	if (datlen > to_alloc)
  1590 		to_alloc = datlen;
  1591 	tmp = evbuffer_chain_new(to_alloc);
  1592 	if (tmp == NULL)
  1593 		goto done;
  1595 	if (remain) {
  1596 		memcpy(chain->buffer + chain->misalign + chain->off,
  1597 		    data, remain);
  1598 		chain->off += remain;
  1599 		buf->total_len += remain;
  1600 		buf->n_add_for_cb += remain;
  1603 	data += remain;
  1604 	datlen -= remain;
  1606 	memcpy(tmp->buffer, data, datlen);
  1607 	tmp->off = datlen;
  1608 	evbuffer_chain_insert(buf, tmp);
  1609 	buf->n_add_for_cb += datlen;
  1611 out:
  1612 	evbuffer_invoke_callbacks(buf);
  1613 	result = 0;
  1614 done:
  1615 	EVBUFFER_UNLOCK(buf);
  1616 	return result;
  1619 int
  1620 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
  1622 	struct evbuffer_chain *chain, *tmp;
  1623 	int result = -1;
  1625 	EVBUFFER_LOCK(buf);
  1627 	if (buf->freeze_start) {
  1628 		goto done;
  1631 	chain = buf->first;
  1633 	if (chain == NULL) {
  1634 		chain = evbuffer_chain_new(datlen);
  1635 		if (!chain)
  1636 			goto done;
  1637 		evbuffer_chain_insert(buf, chain);
  1640 	/* we cannot touch immutable buffers */
  1641 	if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
  1642 		/* If this chain is empty, we can treat it as
  1643 		 * 'empty at the beginning' rather than 'empty at the end' */
  1644 		if (chain->off == 0)
  1645 			chain->misalign = chain->buffer_len;
  1647 		if ((size_t)chain->misalign >= datlen) {
  1648 			/* we have enough space to fit everything */
  1649 			memcpy(chain->buffer + chain->misalign - datlen,
  1650 			    data, datlen);
  1651 			chain->off += datlen;
  1652 			chain->misalign -= datlen;
  1653 			buf->total_len += datlen;
  1654 			buf->n_add_for_cb += datlen;
  1655 			goto out;
  1656 		} else if (chain->misalign) {
  1657 			/* we can only fit some of the data. */
  1658 			memcpy(chain->buffer,
  1659 			    (char*)data + datlen - chain->misalign,
  1660 			    (size_t)chain->misalign);
  1661 			chain->off += (size_t)chain->misalign;
  1662 			buf->total_len += (size_t)chain->misalign;
  1663 			buf->n_add_for_cb += (size_t)chain->misalign;
  1664 			datlen -= (size_t)chain->misalign;
  1665 			chain->misalign = 0;
  1669 	/* we need to add another chain */
  1670 	if ((tmp = evbuffer_chain_new(datlen)) == NULL)
  1671 		goto done;
  1672 	buf->first = tmp;
  1673 	if (buf->last_with_datap == &buf->first)
  1674 		buf->last_with_datap = &tmp->next;
  1676 	tmp->next = chain;
  1678 	tmp->off = datlen;
  1679 	tmp->misalign = tmp->buffer_len - datlen;
  1681 	memcpy(tmp->buffer + tmp->misalign, data, datlen);
  1682 	buf->total_len += datlen;
  1683 	buf->n_add_for_cb += (size_t)chain->misalign;
  1685 out:
  1686 	evbuffer_invoke_callbacks(buf);
  1687 	result = 0;
  1688 done:
  1689 	EVBUFFER_UNLOCK(buf);
  1690 	return result;
  1693 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */
  1694 static void
  1695 evbuffer_chain_align(struct evbuffer_chain *chain)
  1697 	EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));
  1698 	EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));
  1699 	memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);
  1700 	chain->misalign = 0;
  1703 #define MAX_TO_COPY_IN_EXPAND 4096
  1704 #define MAX_TO_REALIGN_IN_EXPAND 2048
  1706 /** Helper: return true iff we should realign chain to fit datalen bytes of
  1707     data in it. */
  1708 static int
  1709 evbuffer_chain_should_realign(struct evbuffer_chain *chain,
  1710     size_t datlen)
  1712 	return chain->buffer_len - chain->off >= datlen &&
  1713 	    (chain->off < chain->buffer_len / 2) &&
  1714 	    (chain->off <= MAX_TO_REALIGN_IN_EXPAND);
  1717 /* Expands the available space in the event buffer to at least datlen, all in
  1718  * a single chunk.  Return that chunk. */
  1719 static struct evbuffer_chain *
  1720 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
  1722 	struct evbuffer_chain *chain, **chainp;
  1723 	struct evbuffer_chain *result = NULL;
  1724 	ASSERT_EVBUFFER_LOCKED(buf);
  1726 	chainp = buf->last_with_datap;
  1728 	/* XXX If *chainp is no longer writeable, but has enough space in its
  1729 	 * misalign, this might be a bad idea: we could still use *chainp, not
  1730 	 * (*chainp)->next. */
  1731 	if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)
  1732 		chainp = &(*chainp)->next;
  1734 	/* 'chain' now points to the first chain with writable space (if any)
  1735 	 * We will either use it, realign it, replace it, or resize it. */
  1736 	chain = *chainp;
  1738 	if (chain == NULL ||
  1739 	    (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
  1740 		/* We can't use the last_with_data chain at all.  Just add a
  1741 		 * new one that's big enough. */
  1742 		goto insert_new;
  1745 	/* If we can fit all the data, then we don't have to do anything */
  1746 	if (CHAIN_SPACE_LEN(chain) >= datlen) {
  1747 		result = chain;
  1748 		goto ok;
  1751 	/* If the chain is completely empty, just replace it by adding a new
  1752 	 * empty chain. */
  1753 	if (chain->off == 0) {
  1754 		goto insert_new;
  1757 	/* If the misalignment plus the remaining space fulfills our data
  1758 	 * needs, we could just force an alignment to happen.  Afterwards, we
  1759 	 * have enough space.  But only do this if we're saving a lot of space
  1760 	 * and not moving too much data.  Otherwise the space savings are
  1761 	 * probably offset by the time lost in copying.
  1762 	 */
  1763 	if (evbuffer_chain_should_realign(chain, datlen)) {
  1764 		evbuffer_chain_align(chain);
  1765 		result = chain;
  1766 		goto ok;
  1769 	/* At this point, we can either resize the last chunk with space in
  1770 	 * it, use the next chunk after it, or   If we add a new chunk, we waste
  1771 	 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk.  If we
  1772 	 * resize, we have to copy chain->off bytes.
  1773 	 */
  1775 	/* Would expanding this chunk be affordable and worthwhile? */
  1776 	if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||
  1777 	    chain->off > MAX_TO_COPY_IN_EXPAND) {
  1778 		/* It's not worth resizing this chain. Can the next one be
  1779 		 * used? */
  1780 		if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) {
  1781 			/* Yes, we can just use the next chain (which should
  1782 			 * be empty. */
  1783 			result = chain->next;
  1784 			goto ok;
  1785 		} else {
  1786 			/* No; append a new chain (which will free all
  1787 			 * terminal empty chains.) */
  1788 			goto insert_new;
  1790 	} else {
  1791 		/* Okay, we're going to try to resize this chain: Not doing so
  1792 		 * would waste at least 1/8 of its current allocation, and we
  1793 		 * can do so without having to copy more than
  1794 		 * MAX_TO_COPY_IN_EXPAND bytes. */
  1795 		/* figure out how much space we need */
  1796 		size_t length = chain->off + datlen;
  1797 		struct evbuffer_chain *tmp = evbuffer_chain_new(length);
  1798 		if (tmp == NULL)
  1799 			goto err;
  1801 		/* copy the data over that we had so far */
  1802 		tmp->off = chain->off;
  1803 		memcpy(tmp->buffer, chain->buffer + chain->misalign,
  1804 		    chain->off);
  1805 		/* fix up the list */
  1806 		EVUTIL_ASSERT(*chainp == chain);
  1807 		result = *chainp = tmp;
  1809 		if (buf->last == chain)
  1810 			buf->last = tmp;
  1812 		tmp->next = chain->next;
  1813 		evbuffer_chain_free(chain);
  1814 		goto ok;
  1817 insert_new:
  1818 	result = evbuffer_chain_insert_new(buf, datlen);
  1819 	if (!result)
  1820 		goto err;
  1821 ok:
  1822 	EVUTIL_ASSERT(result);
  1823 	EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);
  1824 err:
  1825 	return result;
  1828 /* Make sure that datlen bytes are available for writing in the last n
  1829  * chains.  Never copies or moves data. */
  1830 int
  1831 _evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n)
  1833 	struct evbuffer_chain *chain = buf->last, *tmp, *next;
  1834 	size_t avail;
  1835 	int used;
  1837 	ASSERT_EVBUFFER_LOCKED(buf);
  1838 	EVUTIL_ASSERT(n >= 2);
  1840 	if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
  1841 		/* There is no last chunk, or we can't touch the last chunk.
  1842 		 * Just add a new chunk. */
  1843 		chain = evbuffer_chain_new(datlen);
  1844 		if (chain == NULL)
  1845 			return (-1);
  1847 		evbuffer_chain_insert(buf, chain);
  1848 		return (0);
  1851 	used = 0; /* number of chains we're using space in. */
  1852 	avail = 0; /* how much space they have. */
  1853 	/* How many bytes can we stick at the end of buffer as it is?  Iterate
  1854 	 * over the chains at the end of the buffer, tring to see how much
  1855 	 * space we have in the first n. */
  1856 	for (chain = *buf->last_with_datap; chain; chain = chain->next) {
  1857 		if (chain->off) {
  1858 			size_t space = (size_t) CHAIN_SPACE_LEN(chain);
  1859 			EVUTIL_ASSERT(chain == *buf->last_with_datap);
  1860 			if (space) {
  1861 				avail += space;
  1862 				++used;
  1864 		} else {
  1865 			/* No data in chain; realign it. */
  1866 			chain->misalign = 0;
  1867 			avail += chain->buffer_len;
  1868 			++used;
  1870 		if (avail >= datlen) {
  1871 			/* There is already enough space.  Just return */
  1872 			return (0);
  1874 		if (used == n)
  1875 			break;
  1878 	/* There wasn't enough space in the first n chains with space in
  1879 	 * them. Either add a new chain with enough space, or replace all
  1880 	 * empty chains with one that has enough space, depending on n. */
  1881 	if (used < n) {
  1882 		/* The loop ran off the end of the chains before it hit n
  1883 		 * chains; we can add another. */
  1884 		EVUTIL_ASSERT(chain == NULL);
  1886 		tmp = evbuffer_chain_new(datlen - avail);
  1887 		if (tmp == NULL)
  1888 			return (-1);
  1890 		buf->last->next = tmp;
  1891 		buf->last = tmp;
  1892 		/* (we would only set last_with_data if we added the first
  1893 		 * chain. But if the buffer had no chains, we would have
  1894 		 * just allocated a new chain earlier) */
  1895 		return (0);
  1896 	} else {
  1897 		/* Nuke _all_ the empty chains. */
  1898 		int rmv_all = 0; /* True iff we removed last_with_data. */
  1899 		chain = *buf->last_with_datap;
  1900 		if (!chain->off) {
  1901 			EVUTIL_ASSERT(chain == buf->first);
  1902 			rmv_all = 1;
  1903 			avail = 0;
  1904 		} else {
  1905 			avail = (size_t) CHAIN_SPACE_LEN(chain);
  1906 			chain = chain->next;
  1910 		for (; chain; chain = next) {
  1911 			next = chain->next;
  1912 			EVUTIL_ASSERT(chain->off == 0);
  1913 			evbuffer_chain_free(chain);
  1915 		tmp = evbuffer_chain_new(datlen - avail);
  1916 		if (tmp == NULL) {
  1917 			if (rmv_all) {
  1918 				ZERO_CHAIN(buf);
  1919 			} else {
  1920 				buf->last = *buf->last_with_datap;
  1921 				(*buf->last_with_datap)->next = NULL;
  1923 			return (-1);
  1926 		if (rmv_all) {
  1927 			buf->first = buf->last = tmp;
  1928 			buf->last_with_datap = &buf->first;
  1929 		} else {
  1930 			(*buf->last_with_datap)->next = tmp;
  1931 			buf->last = tmp;
  1933 		return (0);
  1937 int
  1938 evbuffer_expand(struct evbuffer *buf, size_t datlen)
  1940 	struct evbuffer_chain *chain;
  1942 	EVBUFFER_LOCK(buf);
  1943 	chain = evbuffer_expand_singlechain(buf, datlen);
  1944 	EVBUFFER_UNLOCK(buf);
  1945 	return chain ? 0 : -1;
  1948 /*
  1949  * Reads data from a file descriptor into a buffer.
  1950  */
  1952 #if defined(_EVENT_HAVE_SYS_UIO_H) || defined(WIN32)
  1953 #define USE_IOVEC_IMPL
  1954 #endif
  1956 #ifdef USE_IOVEC_IMPL
  1958 #ifdef _EVENT_HAVE_SYS_UIO_H
  1959 /* number of iovec we use for writev, fragmentation is going to determine
  1960  * how much we end up writing */
  1962 #define DEFAULT_WRITE_IOVEC 128
  1964 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
  1965 #define NUM_WRITE_IOVEC UIO_MAXIOV
  1966 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
  1967 #define NUM_WRITE_IOVEC IOV_MAX
  1968 #else
  1969 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
  1970 #endif
  1972 #define IOV_TYPE struct iovec
  1973 #define IOV_PTR_FIELD iov_base
  1974 #define IOV_LEN_FIELD iov_len
  1975 #define IOV_LEN_TYPE size_t
  1976 #else
  1977 #define NUM_WRITE_IOVEC 16
  1978 #define IOV_TYPE WSABUF
  1979 #define IOV_PTR_FIELD buf
  1980 #define IOV_LEN_FIELD len
  1981 #define IOV_LEN_TYPE unsigned long
  1982 #endif
  1983 #endif
  1984 #define NUM_READ_IOVEC 4
  1986 #define EVBUFFER_MAX_READ	4096
  1988 /** Helper function to figure out which space to use for reading data into
  1989     an evbuffer.  Internal use only.
  1991     @param buf The buffer to read into
  1992     @param howmuch How much we want to read.
  1993     @param vecs An array of two or more iovecs or WSABUFs.
  1994     @param n_vecs_avail The length of vecs
  1995     @param chainp A pointer to a variable to hold the first chain we're
  1996       reading into.
  1997     @param exact Boolean: if true, we do not provide more than 'howmuch'
  1998       space in the vectors, even if more space is available.
  1999     @return The number of buffers we're using.
  2000  */
  2001 int
  2002 _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
  2003     struct evbuffer_iovec *vecs, int n_vecs_avail,
  2004     struct evbuffer_chain ***chainp, int exact)
  2006 	struct evbuffer_chain *chain;
  2007 	struct evbuffer_chain **firstchainp;
  2008 	size_t so_far;
  2009 	int i;
  2010 	ASSERT_EVBUFFER_LOCKED(buf);
  2012 	if (howmuch < 0)
  2013 		return -1;
  2015 	so_far = 0;
  2016 	/* Let firstchain be the first chain with any space on it */
  2017 	firstchainp = buf->last_with_datap;
  2018 	if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
  2019 		firstchainp = &(*firstchainp)->next;
  2022 	chain = *firstchainp;
  2023 	for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) {
  2024 		size_t avail = (size_t) CHAIN_SPACE_LEN(chain);
  2025 		if (avail > (howmuch - so_far) && exact)
  2026 			avail = howmuch - so_far;
  2027 		vecs[i].iov_base = CHAIN_SPACE_PTR(chain);
  2028 		vecs[i].iov_len = avail;
  2029 		so_far += avail;
  2030 		chain = chain->next;
  2033 	*chainp = firstchainp;
  2034 	return i;
  2037 static int
  2038 get_n_bytes_readable_on_socket(evutil_socket_t fd)
  2040 #if defined(FIONREAD) && defined(WIN32)
  2041 	unsigned long lng = EVBUFFER_MAX_READ;
  2042 	if (ioctlsocket(fd, FIONREAD, &lng) < 0)
  2043 		return -1;
  2044 	return (int)lng;
  2045 #elif defined(FIONREAD)
  2046 	int n = EVBUFFER_MAX_READ;
  2047 	if (ioctl(fd, FIONREAD, &n) < 0)
  2048 		return -1;
  2049 	return n;
  2050 #else
  2051 	return EVBUFFER_MAX_READ;
  2052 #endif
  2055 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
  2056  * as howmuch? */
  2057 int
  2058 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
  2060 	struct evbuffer_chain **chainp;
  2061 	int n;
  2062 	int result;
  2064 #ifdef USE_IOVEC_IMPL
  2065 	int nvecs, i, remaining;
  2066 #else
  2067 	struct evbuffer_chain *chain;
  2068 	unsigned char *p;
  2069 #endif
  2071 	EVBUFFER_LOCK(buf);
  2073 	if (buf->freeze_end) {
  2074 		result = -1;
  2075 		goto done;
  2078 	n = get_n_bytes_readable_on_socket(fd);
  2079 	if (n <= 0 || n > EVBUFFER_MAX_READ)
  2080 		n = EVBUFFER_MAX_READ;
  2081 	if (howmuch < 0 || howmuch > n)
  2082 		howmuch = n;
  2084 #ifdef USE_IOVEC_IMPL
  2085 	/* Since we can use iovecs, we're willing to use the last
  2086 	 * NUM_READ_IOVEC chains. */
  2087 	if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) {
  2088 		result = -1;
  2089 		goto done;
  2090 	} else {
  2091 		IOV_TYPE vecs[NUM_READ_IOVEC];
  2092 #ifdef _EVBUFFER_IOVEC_IS_NATIVE
  2093 		nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs,
  2094 		    NUM_READ_IOVEC, &chainp, 1);
  2095 #else
  2096 		/* We aren't using the native struct iovec.  Therefore,
  2097 		   we are on win32. */
  2098 		struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
  2099 		nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2,
  2100 		    &chainp, 1);
  2102 		for (i=0; i < nvecs; ++i)
  2103 			WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
  2104 #endif
  2106 #ifdef WIN32
  2108 			DWORD bytesRead;
  2109 			DWORD flags=0;
  2110 			if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
  2111 				/* The read failed. It might be a close,
  2112 				 * or it might be an error. */
  2113 				if (WSAGetLastError() == WSAECONNABORTED)
  2114 					n = 0;
  2115 				else
  2116 					n = -1;
  2117 			} else
  2118 				n = bytesRead;
  2120 #else
  2121 		n = readv(fd, vecs, nvecs);
  2122 #endif
  2125 #else /*!USE_IOVEC_IMPL*/
  2126 	/* If we don't have FIONREAD, we might waste some space here */
  2127 	/* XXX we _will_ waste some space here if there is any space left
  2128 	 * over on buf->last. */
  2129 	if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
  2130 		result = -1;
  2131 		goto done;
  2134 	/* We can append new data at this point */
  2135 	p = chain->buffer + chain->misalign + chain->off;
  2137 #ifndef WIN32
  2138 	n = read(fd, p, howmuch);
  2139 #else
  2140 	n = recv(fd, p, howmuch, 0);
  2141 #endif
  2142 #endif /* USE_IOVEC_IMPL */
  2144 	if (n == -1) {
  2145 		result = -1;
  2146 		goto done;
  2148 	if (n == 0) {
  2149 		result = 0;
  2150 		goto done;
  2153 #ifdef USE_IOVEC_IMPL
  2154 	remaining = n;
  2155 	for (i=0; i < nvecs; ++i) {
  2156 		ev_ssize_t space = (ev_ssize_t) CHAIN_SPACE_LEN(*chainp);
  2157 		if (space < remaining) {
  2158 			(*chainp)->off += space;
  2159 			remaining -= (int)space;
  2160 		} else {
  2161 			(*chainp)->off += remaining;
  2162 			buf->last_with_datap = chainp;
  2163 			break;
  2165 		chainp = &(*chainp)->next;
  2167 #else
  2168 	chain->off += n;
  2169 	advance_last_with_data(buf);
  2170 #endif
  2171 	buf->total_len += n;
  2172 	buf->n_add_for_cb += n;
  2174 	/* Tell someone about changes in this buffer */
  2175 	evbuffer_invoke_callbacks(buf);
  2176 	result = n;
  2177 done:
  2178 	EVBUFFER_UNLOCK(buf);
  2179 	return result;
  2182 #ifdef WIN32
  2183 static int
  2184 evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd, ev_ssize_t howmuch)
  2186 	int result;
  2187 	int nchains, n;
  2188 	struct evbuffer_iovec v[2];
  2190 	EVBUFFER_LOCK(buf);
  2192 	if (buf->freeze_end) {
  2193 		result = -1;
  2194 		goto done;
  2197 	if (howmuch < 0)
  2198 		howmuch = 16384;
  2201 	/* XXX we _will_ waste some space here if there is any space left
  2202 	 * over on buf->last. */
  2203 	nchains = evbuffer_reserve_space(buf, howmuch, v, 2);
  2204 	if (nchains < 1 || nchains > 2) {
  2205 		result = -1;
  2206 		goto done;
  2208 	n = read((int)fd, v[0].iov_base, (unsigned int)v[0].iov_len);
  2209 	if (n <= 0) {
  2210 		result = n;
  2211 		goto done;
  2213 	v[0].iov_len = (IOV_LEN_TYPE) n; /* XXXX another problem with big n.*/
  2214 	if (nchains > 1) {
  2215 		n = read((int)fd, v[1].iov_base, (unsigned int)v[1].iov_len);
  2216 		if (n <= 0) {
  2217 			result = (unsigned long) v[0].iov_len;
  2218 			evbuffer_commit_space(buf, v, 1);
  2219 			goto done;
  2221 		v[1].iov_len = n;
  2223 	evbuffer_commit_space(buf, v, nchains);
  2225 	result = n;
  2226 done:
  2227 	EVBUFFER_UNLOCK(buf);
  2228 	return result;
  2230 #endif
  2232 #ifdef USE_IOVEC_IMPL
  2233 static inline int
  2234 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
  2235     ev_ssize_t howmuch)
  2237 	IOV_TYPE iov[NUM_WRITE_IOVEC];
  2238 	struct evbuffer_chain *chain = buffer->first;
  2239 	int n, i = 0;
  2241 	if (howmuch < 0)
  2242 		return -1;
  2244 	ASSERT_EVBUFFER_LOCKED(buffer);
  2245 	/* XXX make this top out at some maximal data length?  if the
  2246 	 * buffer has (say) 1MB in it, split over 128 chains, there's
  2247 	 * no way it all gets written in one go. */
  2248 	while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
  2249 #ifdef USE_SENDFILE
  2250 		/* we cannot write the file info via writev */
  2251 		if (chain->flags & EVBUFFER_SENDFILE)
  2252 			break;
  2253 #endif
  2254 		iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);
  2255 		if ((size_t)howmuch >= chain->off) {
  2256 			/* XXXcould be problematic when windows supports mmap*/
  2257 			iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
  2258 			howmuch -= chain->off;
  2259 		} else {
  2260 			/* XXXcould be problematic when windows supports mmap*/
  2261 			iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;
  2262 			break;
  2264 		chain = chain->next;
  2266 	if (! i)
  2267 		return 0;
  2268 #ifdef WIN32
  2270 		DWORD bytesSent;
  2271 		if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
  2272 			n = -1;
  2273 		else
  2274 			n = bytesSent;
  2276 #else
  2277 	n = writev(fd, iov, i);
  2278 #endif
  2279 	return (n);
  2281 #endif
  2283 #ifdef USE_SENDFILE
  2284 static inline int
  2285 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t fd,
  2286     ev_ssize_t howmuch)
  2288 	struct evbuffer_chain *chain = buffer->first;
  2289 	struct evbuffer_chain_fd *info =
  2290 	    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
  2291 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
  2292 	int res;
  2293 	off_t len = chain->off;
  2294 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
  2295 	ev_ssize_t res;
  2296 	off_t offset = chain->misalign;
  2297 #endif
  2299 	ASSERT_EVBUFFER_LOCKED(buffer);
  2301 #if defined(SENDFILE_IS_MACOSX)
  2302 	res = sendfile(info->fd, fd, chain->misalign, &len, NULL, 0);
  2303 	if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
  2304 		return (-1);
  2306 	return (len);
  2307 #elif defined(SENDFILE_IS_FREEBSD)
  2308 	res = sendfile(info->fd, fd, chain->misalign, chain->off, NULL, &len, 0);
  2309 	if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
  2310 		return (-1);
  2312 	return (len);
  2313 #elif defined(SENDFILE_IS_LINUX)
  2314 	/* TODO(niels): implement splice */
  2315 	res = sendfile(fd, info->fd, &offset, chain->off);
  2316 	if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
  2317 		/* if this is EAGAIN or EINTR return 0; otherwise, -1 */
  2318 		return (0);
  2320 	return (res);
  2321 #elif defined(SENDFILE_IS_SOLARIS)
  2323 		const off_t offset_orig = offset;
  2324 		res = sendfile(fd, info->fd, &offset, chain->off);
  2325 		if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
  2326 			if (offset - offset_orig)
  2327 				return offset - offset_orig;
  2328 			/* if this is EAGAIN or EINTR and no bytes were
  2329 			 * written, return 0 */
  2330 			return (0);
  2332 		return (res);
  2334 #endif
  2336 #endif
  2338 int
  2339 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
  2340     ev_ssize_t howmuch)
  2342 	int n = -1;
  2344 	EVBUFFER_LOCK(buffer);
  2346 	if (buffer->freeze_start) {
  2347 		goto done;
  2350 	if (howmuch < 0 || (size_t)howmuch > buffer->total_len)
  2351 		howmuch = buffer->total_len;
  2353 	if (howmuch > 0) {
  2354 #ifdef USE_SENDFILE
  2355 		struct evbuffer_chain *chain = buffer->first;
  2356 		if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))
  2357 			n = evbuffer_write_sendfile(buffer, fd, howmuch);
  2358 		else {
  2359 #endif
  2360 #ifdef USE_IOVEC_IMPL
  2361 		n = evbuffer_write_iovec(buffer, fd, howmuch);
  2362 #elif defined(WIN32)
  2363 		/* XXX(nickm) Don't disable this code until we know if
  2364 		 * the WSARecv code above works. */
  2365 		void *p = evbuffer_pullup(buffer, howmuch);
  2366 		n = send(fd, p, howmuch, 0);
  2367 #else
  2368 		void *p = evbuffer_pullup(buffer, howmuch);
  2369 		n = write(fd, p, howmuch);
  2370 #endif
  2371 #ifdef USE_SENDFILE
  2373 #endif
  2376 	if (n > 0)
  2377 		evbuffer_drain(buffer, n);
  2379 done:
  2380 	EVBUFFER_UNLOCK(buffer);
  2381 	return (n);
  2384 int
  2385 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)
  2387 	return evbuffer_write_atmost(buffer, fd, -1);
  2390 unsigned char *
  2391 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
  2393 	unsigned char *search;
  2394 	struct evbuffer_ptr ptr;
  2396 	EVBUFFER_LOCK(buffer);
  2398 	ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
  2399 	if (ptr.pos < 0) {
  2400 		search = NULL;
  2401 	} else {
  2402 		search = evbuffer_pullup(buffer, ptr.pos + len);
  2403 		if (search)
  2404 			search += ptr.pos;
  2406 	EVBUFFER_UNLOCK(buffer);
  2407 	return search;
  2410 int
  2411 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
  2412     size_t position, enum evbuffer_ptr_how how)
  2414 	size_t left = position;
  2415 	struct evbuffer_chain *chain = NULL;
  2417 	EVBUFFER_LOCK(buf);
  2419 	switch (how) {
  2420 	case EVBUFFER_PTR_SET:
  2421 		chain = buf->first;
  2422 		pos->pos = position;
  2423 		position = 0;
  2424 		break;
  2425 	case EVBUFFER_PTR_ADD:
  2426 		/* this avoids iterating over all previous chains if
  2427 		   we just want to advance the position */
  2428 		chain = pos->_internal.chain;
  2429 		pos->pos += position;
  2430 		position = pos->_internal.pos_in_chain;
  2431 		break;
  2434 	while (chain && position + left >= chain->off) {
  2435 		left -= chain->off - position;
  2436 		chain = chain->next;
  2437 		position = 0;
  2439 	if (chain) {
  2440 		pos->_internal.chain = chain;
  2441 		pos->_internal.pos_in_chain = position + left;
  2442 	} else {
  2443 		pos->_internal.chain = NULL;
  2444 		pos->pos = -1;
  2447 	EVBUFFER_UNLOCK(buf);
  2449 	return chain != NULL ? 0 : -1;
  2452 /**
  2453    Compare the bytes in buf at position pos to the len bytes in mem.  Return
  2454    less than 0, 0, or greater than 0 as memcmp.
  2455  */
  2456 static int
  2457 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
  2458     const char *mem, size_t len)
  2460 	struct evbuffer_chain *chain;
  2461 	size_t position;
  2462 	int r;
  2464 	ASSERT_EVBUFFER_LOCKED(buf);
  2466 	if (pos->pos + len > buf->total_len)
  2467 		return -1;
  2469 	chain = pos->_internal.chain;
  2470 	position = pos->_internal.pos_in_chain;
  2471 	while (len && chain) {
  2472 		size_t n_comparable;
  2473 		if (len + position > chain->off)
  2474 			n_comparable = chain->off - position;
  2475 		else
  2476 			n_comparable = len;
  2477 		r = memcmp(chain->buffer + chain->misalign + position, mem,
  2478 		    n_comparable);
  2479 		if (r)
  2480 			return r;
  2481 		mem += n_comparable;
  2482 		len -= n_comparable;
  2483 		position = 0;
  2484 		chain = chain->next;
  2487 	return 0;
  2490 struct evbuffer_ptr
  2491 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)
  2493 	return evbuffer_search_range(buffer, what, len, start, NULL);
  2496 struct evbuffer_ptr
  2497 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
  2499 	struct evbuffer_ptr pos;
  2500 	struct evbuffer_chain *chain, *last_chain = NULL;
  2501 	const unsigned char *p;
  2502 	char first;
  2504 	EVBUFFER_LOCK(buffer);
  2506 	if (start) {
  2507 		memcpy(&pos, start, sizeof(pos));
  2508 		chain = pos._internal.chain;
  2509 	} else {
  2510 		pos.pos = 0;
  2511 		chain = pos._internal.chain = buffer->first;
  2512 		pos._internal.pos_in_chain = 0;
  2515 	if (end)
  2516 		last_chain = end->_internal.chain;
  2518 	if (!len || len > EV_SSIZE_MAX)
  2519 		goto done;
  2521 	first = what[0];
  2523 	while (chain) {
  2524 		const unsigned char *start_at =
  2525 		    chain->buffer + chain->misalign +
  2526 		    pos._internal.pos_in_chain;
  2527 		p = memchr(start_at, first,
  2528 		    chain->off - pos._internal.pos_in_chain);
  2529 		if (p) {
  2530 			pos.pos += p - start_at;
  2531 			pos._internal.pos_in_chain += p - start_at;
  2532 			if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
  2533 				if (end && pos.pos + (ev_ssize_t)len > end->pos)
  2534 					goto not_found;
  2535 				else
  2536 					goto done;
  2538 			++pos.pos;
  2539 			++pos._internal.pos_in_chain;
  2540 			if (pos._internal.pos_in_chain == chain->off) {
  2541 				chain = pos._internal.chain = chain->next;
  2542 				pos._internal.pos_in_chain = 0;
  2544 		} else {
  2545 			if (chain == last_chain)
  2546 				goto not_found;
  2547 			pos.pos += chain->off - pos._internal.pos_in_chain;
  2548 			chain = pos._internal.chain = chain->next;
  2549 			pos._internal.pos_in_chain = 0;
  2553 not_found:
  2554 	pos.pos = -1;
  2555 	pos._internal.chain = NULL;
  2556 done:
  2557 	EVBUFFER_UNLOCK(buffer);
  2558 	return pos;
  2561 int
  2562 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
  2563     struct evbuffer_ptr *start_at,
  2564     struct evbuffer_iovec *vec, int n_vec)
  2566 	struct evbuffer_chain *chain;
  2567 	int idx = 0;
  2568 	ev_ssize_t len_so_far = 0;
  2570 	EVBUFFER_LOCK(buffer);
  2572 	if (start_at) {
  2573 		chain = start_at->_internal.chain;
  2574 		len_so_far = chain->off
  2575 		    - start_at->_internal.pos_in_chain;
  2576 		idx = 1;
  2577 		if (n_vec > 0) {
  2578 			vec[0].iov_base = chain->buffer + chain->misalign
  2579 			    + start_at->_internal.pos_in_chain;
  2580 			vec[0].iov_len = len_so_far;
  2582 		chain = chain->next;
  2583 	} else {
  2584 		chain = buffer->first;
  2587 	if (n_vec == 0 && len < 0) {
  2588 		/* If no vectors are provided and they asked for "everything",
  2589 		 * pretend they asked for the actual available amount. */
  2590 		len = buffer->total_len - len_so_far;
  2593 	while (chain) {
  2594 		if (len >= 0 && len_so_far >= len)
  2595 			break;
  2596 		if (idx<n_vec) {
  2597 			vec[idx].iov_base = chain->buffer + chain->misalign;
  2598 			vec[idx].iov_len = chain->off;
  2599 		} else if (len<0) {
  2600 			break;
  2602 		++idx;
  2603 		len_so_far += chain->off;
  2604 		chain = chain->next;
  2607 	EVBUFFER_UNLOCK(buffer);
  2609 	return idx;
  2613 int
  2614 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
  2616 	char *buffer;
  2617 	size_t space;
  2618 	int sz, result = -1;
  2619 	va_list aq;
  2620 	struct evbuffer_chain *chain;
  2623 	EVBUFFER_LOCK(buf);
  2625 	if (buf->freeze_end) {
  2626 		goto done;
  2629 	/* make sure that at least some space is available */
  2630 	if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL)
  2631 		goto done;
  2633 	for (;;) {
  2634 #if 0
  2635 		size_t used = chain->misalign + chain->off;
  2636 		buffer = (char *)chain->buffer + chain->misalign + chain->off;
  2637 		EVUTIL_ASSERT(chain->buffer_len >= used);
  2638 		space = chain->buffer_len - used;
  2639 #endif
  2640 		buffer = (char*) CHAIN_SPACE_PTR(chain);
  2641 		space = (size_t) CHAIN_SPACE_LEN(chain);
  2643 #ifndef va_copy
  2644 #define	va_copy(dst, src)	memcpy(&(dst), &(src), sizeof(va_list))
  2645 #endif
  2646 		va_copy(aq, ap);
  2648 		sz = evutil_vsnprintf(buffer, space, fmt, aq);
  2650 		va_end(aq);
  2652 		if (sz < 0)
  2653 			goto done;
  2654 		if ((size_t)sz < space) {
  2655 			chain->off += sz;
  2656 			buf->total_len += sz;
  2657 			buf->n_add_for_cb += sz;
  2659 			advance_last_with_data(buf);
  2660 			evbuffer_invoke_callbacks(buf);
  2661 			result = sz;
  2662 			goto done;
  2664 		if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL)
  2665 			goto done;
  2667 	/* NOTREACHED */
  2669 done:
  2670 	EVBUFFER_UNLOCK(buf);
  2671 	return result;
  2674 int
  2675 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
  2677 	int res = -1;
  2678 	va_list ap;
  2680 	va_start(ap, fmt);
  2681 	res = evbuffer_add_vprintf(buf, fmt, ap);
  2682 	va_end(ap);
  2684 	return (res);
  2687 int
  2688 evbuffer_add_reference(struct evbuffer *outbuf,
  2689     const void *data, size_t datlen,
  2690     evbuffer_ref_cleanup_cb cleanupfn, void *extra)
  2692 	struct evbuffer_chain *chain;
  2693 	struct evbuffer_chain_reference *info;
  2694 	int result = -1;
  2696 	chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));
  2697 	if (!chain)
  2698 		return (-1);
  2699 	chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;
  2700 	chain->buffer = (u_char *)data;
  2701 	chain->buffer_len = datlen;
  2702 	chain->off = datlen;
  2704 	info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);
  2705 	info->cleanupfn = cleanupfn;
  2706 	info->extra = extra;
  2708 	EVBUFFER_LOCK(outbuf);
  2709 	if (outbuf->freeze_end) {
  2710 		/* don't call chain_free; we do not want to actually invoke
  2711 		 * the cleanup function */
  2712 		mm_free(chain);
  2713 		goto done;
  2715 	evbuffer_chain_insert(outbuf, chain);
  2716 	outbuf->n_add_for_cb += datlen;
  2718 	evbuffer_invoke_callbacks(outbuf);
  2720 	result = 0;
  2721 done:
  2722 	EVBUFFER_UNLOCK(outbuf);
  2724 	return result;
  2727 /* TODO(niels): maybe we don't want to own the fd, however, in that
  2728  * case, we should dup it - dup is cheap.  Perhaps, we should use a
  2729  * callback instead?
  2730  */
  2731 /* TODO(niels): we may want to add to automagically convert to mmap, in
  2732  * case evbuffer_remove() or evbuffer_pullup() are being used.
  2733  */
  2734 int
  2735 evbuffer_add_file(struct evbuffer *outbuf, int fd,
  2736     ev_off_t offset, ev_off_t length)
  2738 #if defined(USE_SENDFILE) || defined(_EVENT_HAVE_MMAP)
  2739 	struct evbuffer_chain *chain;
  2740 	struct evbuffer_chain_fd *info;
  2741 #endif
  2742 #if defined(USE_SENDFILE)
  2743 	int sendfile_okay = 1;
  2744 #endif
  2745 	int ok = 1;
  2747 #if defined(USE_SENDFILE)
  2748 	if (use_sendfile) {
  2749 		EVBUFFER_LOCK(outbuf);
  2750 		sendfile_okay = outbuf->flags & EVBUFFER_FLAG_DRAINS_TO_FD;
  2751 		EVBUFFER_UNLOCK(outbuf);
  2754 	if (use_sendfile && sendfile_okay) {
  2755 		chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
  2756 		if (chain == NULL) {
  2757 			event_warn("%s: out of memory", __func__);
  2758 			return (-1);
  2761 		chain->flags |= EVBUFFER_SENDFILE | EVBUFFER_IMMUTABLE;
  2762 		chain->buffer = NULL;	/* no reading possible */
  2763 		chain->buffer_len = length + offset;
  2764 		chain->off = length;
  2765 		chain->misalign = offset;
  2767 		info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
  2768 		info->fd = fd;
  2770 		EVBUFFER_LOCK(outbuf);
  2771 		if (outbuf->freeze_end) {
  2772 			mm_free(chain);
  2773 			ok = 0;
  2774 		} else {
  2775 			outbuf->n_add_for_cb += length;
  2776 			evbuffer_chain_insert(outbuf, chain);
  2778 	} else
  2779 #endif
  2780 #if defined(_EVENT_HAVE_MMAP)
  2781 	if (use_mmap) {
  2782 		void *mapped = mmap(NULL, length + offset, PROT_READ,
  2783 #ifdef MAP_NOCACHE
  2784 		    MAP_NOCACHE |
  2785 #endif
  2786 #ifdef MAP_FILE
  2787 		    MAP_FILE |
  2788 #endif
  2789 		    MAP_PRIVATE,
  2790 		    fd, 0);
  2791 		/* some mmap implementations require offset to be a multiple of
  2792 		 * the page size.  most users of this api, are likely to use 0
  2793 		 * so mapping everything is not likely to be a problem.
  2794 		 * TODO(niels): determine page size and round offset to that
  2795 		 * page size to avoid mapping too much memory.
  2796 		 */
  2797 		if (mapped == MAP_FAILED) {
  2798 			event_warn("%s: mmap(%d, %d, %zu) failed",
  2799 			    __func__, fd, 0, (size_t)(offset + length));
  2800 			return (-1);
  2802 		chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
  2803 		if (chain == NULL) {
  2804 			event_warn("%s: out of memory", __func__);
  2805 			munmap(mapped, length);
  2806 			return (-1);
  2809 		chain->flags |= EVBUFFER_MMAP | EVBUFFER_IMMUTABLE;
  2810 		chain->buffer = mapped;
  2811 		chain->buffer_len = length + offset;
  2812 		chain->off = length + offset;
  2814 		info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
  2815 		info->fd = fd;
  2817 		EVBUFFER_LOCK(outbuf);
  2818 		if (outbuf->freeze_end) {
  2819 			info->fd = -1;
  2820 			evbuffer_chain_free(chain);
  2821 			ok = 0;
  2822 		} else {
  2823 			outbuf->n_add_for_cb += length;
  2825 			evbuffer_chain_insert(outbuf, chain);
  2827 			/* we need to subtract whatever we don't need */
  2828 			evbuffer_drain(outbuf, offset);
  2830 	} else
  2831 #endif
  2833 		/* the default implementation */
  2834 		struct evbuffer *tmp = evbuffer_new();
  2835 		ev_ssize_t read;
  2837 		if (tmp == NULL)
  2838 			return (-1);
  2840 #ifdef WIN32
  2841 #define lseek _lseeki64
  2842 #endif
  2843 		if (lseek(fd, offset, SEEK_SET) == -1) {
  2844 			evbuffer_free(tmp);
  2845 			return (-1);
  2848 		/* we add everything to a temporary buffer, so that we
  2849 		 * can abort without side effects if the read fails.
  2850 		 */
  2851 		while (length) {
  2852 			read = evbuffer_readfile(tmp, fd, (ev_ssize_t)length);
  2853 			if (read == -1) {
  2854 				evbuffer_free(tmp);
  2855 				return (-1);
  2858 			length -= read;
  2861 		EVBUFFER_LOCK(outbuf);
  2862 		if (outbuf->freeze_end) {
  2863 			evbuffer_free(tmp);
  2864 			ok = 0;
  2865 		} else {
  2866 			evbuffer_add_buffer(outbuf, tmp);
  2867 			evbuffer_free(tmp);
  2869 #ifdef WIN32
  2870 #define close _close
  2871 #endif
  2872 			close(fd);
  2876 	if (ok)
  2877 		evbuffer_invoke_callbacks(outbuf);
  2878 	EVBUFFER_UNLOCK(outbuf);
  2880 	return ok ? 0 : -1;
  2884 void
  2885 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
  2887 	EVBUFFER_LOCK(buffer);
  2889 	if (!TAILQ_EMPTY(&buffer->callbacks))
  2890 		evbuffer_remove_all_callbacks(buffer);
  2892 	if (cb) {
  2893 		struct evbuffer_cb_entry *ent =
  2894 		    evbuffer_add_cb(buffer, NULL, cbarg);
  2895 		ent->cb.cb_obsolete = cb;
  2896 		ent->flags |= EVBUFFER_CB_OBSOLETE;
  2898 	EVBUFFER_UNLOCK(buffer);
  2901 struct evbuffer_cb_entry *
  2902 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
  2904 	struct evbuffer_cb_entry *e;
  2905 	if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
  2906 		return NULL;
  2907 	EVBUFFER_LOCK(buffer);
  2908 	e->cb.cb_func = cb;
  2909 	e->cbarg = cbarg;
  2910 	e->flags = EVBUFFER_CB_ENABLED;
  2911 	TAILQ_INSERT_HEAD(&buffer->callbacks, e, next);
  2912 	EVBUFFER_UNLOCK(buffer);
  2913 	return e;
  2916 int
  2917 evbuffer_remove_cb_entry(struct evbuffer *buffer,
  2918 			 struct evbuffer_cb_entry *ent)
  2920 	EVBUFFER_LOCK(buffer);
  2921 	TAILQ_REMOVE(&buffer->callbacks, ent, next);
  2922 	EVBUFFER_UNLOCK(buffer);
  2923 	mm_free(ent);
  2924 	return 0;
  2927 int
  2928 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
  2930 	struct evbuffer_cb_entry *cbent;
  2931 	int result = -1;
  2932 	EVBUFFER_LOCK(buffer);
  2933 	TAILQ_FOREACH(cbent, &buffer->callbacks, next) {
  2934 		if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
  2935 			result = evbuffer_remove_cb_entry(buffer, cbent);
  2936 			goto done;
  2939 done:
  2940 	EVBUFFER_UNLOCK(buffer);
  2941 	return result;
  2944 int
  2945 evbuffer_cb_set_flags(struct evbuffer *buffer,
  2946 		      struct evbuffer_cb_entry *cb, ev_uint32_t flags)
  2948 	/* the user isn't allowed to mess with these. */
  2949 	flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
  2950 	EVBUFFER_LOCK(buffer);
  2951 	cb->flags |= flags;
  2952 	EVBUFFER_UNLOCK(buffer);
  2953 	return 0;
  2956 int
  2957 evbuffer_cb_clear_flags(struct evbuffer *buffer,
  2958 		      struct evbuffer_cb_entry *cb, ev_uint32_t flags)
  2960 	/* the user isn't allowed to mess with these. */
  2961 	flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
  2962 	EVBUFFER_LOCK(buffer);
  2963 	cb->flags &= ~flags;
  2964 	EVBUFFER_UNLOCK(buffer);
  2965 	return 0;
  2968 int
  2969 evbuffer_freeze(struct evbuffer *buffer, int start)
  2971 	EVBUFFER_LOCK(buffer);
  2972 	if (start)
  2973 		buffer->freeze_start = 1;
  2974 	else
  2975 		buffer->freeze_end = 1;
  2976 	EVBUFFER_UNLOCK(buffer);
  2977 	return 0;
  2980 int
  2981 evbuffer_unfreeze(struct evbuffer *buffer, int start)
  2983 	EVBUFFER_LOCK(buffer);
  2984 	if (start)
  2985 		buffer->freeze_start = 0;
  2986 	else
  2987 		buffer->freeze_end = 0;
  2988 	EVBUFFER_UNLOCK(buffer);
  2989 	return 0;
  2992 #if 0
  2993 void
  2994 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
  2996 	if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) {
  2997 		cb->size_before_suspend = evbuffer_get_length(buffer);
  2998 		cb->flags |= EVBUFFER_CB_SUSPENDED;
  3002 void
  3003 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
  3005 	if ((cb->flags & EVBUFFER_CB_SUSPENDED)) {
  3006 		unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);
  3007 		size_t sz = cb->size_before_suspend;
  3008 		cb->flags &= ~(EVBUFFER_CB_SUSPENDED|
  3009 			       EVBUFFER_CB_CALL_ON_UNSUSPEND);
  3010 		cb->size_before_suspend = 0;
  3011 		if (call && (cb->flags & EVBUFFER_CB_ENABLED)) {
  3012 			cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);
  3016 #endif
  3018 /* These hooks are exposed so that the unit tests can temporarily disable
  3019  * sendfile support in order to test mmap, or both to test linear
  3020  * access. Don't use it; if we need to add a way to disable sendfile support
  3021  * in the future, it will probably be via an alternate version of
  3022  * evbuffer_add_file() with a 'flags' argument.
  3023  */
  3024 int _evbuffer_testing_use_sendfile(void);
  3025 int _evbuffer_testing_use_mmap(void);
  3026 int _evbuffer_testing_use_linear_file_access(void);
  3028 int
  3029 _evbuffer_testing_use_sendfile(void)
  3031 	int ok = 0;
  3032 #ifdef USE_SENDFILE
  3033 	use_sendfile = 1;
  3034 	ok = 1;
  3035 #endif
  3036 #ifdef _EVENT_HAVE_MMAP
  3037 	use_mmap = 0;
  3038 #endif
  3039 	return ok;
  3041 int
  3042 _evbuffer_testing_use_mmap(void)
  3044 	int ok = 0;
  3045 #ifdef USE_SENDFILE
  3046 	use_sendfile = 0;
  3047 #endif
  3048 #ifdef _EVENT_HAVE_MMAP
  3049 	use_mmap = 1;
  3050 	ok = 1;
  3051 #endif
  3052 	return ok;
  3054 int
  3055 _evbuffer_testing_use_linear_file_access(void)
  3057 #ifdef USE_SENDFILE
  3058 	use_sendfile = 0;
  3059 #endif
  3060 #ifdef _EVENT_HAVE_MMAP
  3061 	use_mmap = 0;
  3062 #endif
  3063 	return 1;

mercurial