ipc/chromium/src/third_party/libevent/buffer.c

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/ipc/chromium/src/third_party/libevent/buffer.c	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,3064 @@
     1.4 +/*
     1.5 + * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
     1.6 + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
     1.7 + *
     1.8 + * Redistribution and use in source and binary forms, with or without
     1.9 + * modification, are permitted provided that the following conditions
    1.10 + * are met:
    1.11 + * 1. Redistributions of source code must retain the above copyright
    1.12 + *    notice, this list of conditions and the following disclaimer.
    1.13 + * 2. Redistributions in binary form must reproduce the above copyright
    1.14 + *    notice, this list of conditions and the following disclaimer in the
    1.15 + *    documentation and/or other materials provided with the distribution.
    1.16 + * 3. The name of the author may not be used to endorse or promote products
    1.17 + *    derived from this software without specific prior written permission.
    1.18 + *
    1.19 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
    1.20 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
    1.21 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
    1.22 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
    1.23 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
    1.24 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    1.25 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    1.26 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    1.27 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
    1.28 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    1.29 + */
    1.30 +
    1.31 +#include "event2/event-config.h"
    1.32 +
    1.33 +#ifdef WIN32
    1.34 +#include <winsock2.h>
    1.35 +#include <windows.h>
    1.36 +#include <io.h>
    1.37 +#endif
    1.38 +
    1.39 +#ifdef _EVENT_HAVE_VASPRINTF
    1.40 +/* If we have vasprintf, we need to define this before we include stdio.h. */
    1.41 +#define _GNU_SOURCE
    1.42 +#endif
    1.43 +
    1.44 +#include <sys/types.h>
    1.45 +
    1.46 +#ifdef _EVENT_HAVE_SYS_TIME_H
    1.47 +#include <sys/time.h>
    1.48 +#endif
    1.49 +
    1.50 +#ifdef _EVENT_HAVE_SYS_SOCKET_H
    1.51 +#include <sys/socket.h>
    1.52 +#endif
    1.53 +
    1.54 +#ifdef _EVENT_HAVE_SYS_UIO_H
    1.55 +#include <sys/uio.h>
    1.56 +#endif
    1.57 +
    1.58 +#ifdef _EVENT_HAVE_SYS_IOCTL_H
    1.59 +#include <sys/ioctl.h>
    1.60 +#endif
    1.61 +
    1.62 +#ifdef _EVENT_HAVE_SYS_MMAN_H
    1.63 +#include <sys/mman.h>
    1.64 +#endif
    1.65 +
    1.66 +#ifdef _EVENT_HAVE_SYS_SENDFILE_H
    1.67 +#include <sys/sendfile.h>
    1.68 +#endif
    1.69 +
    1.70 +#include <errno.h>
    1.71 +#include <stdio.h>
    1.72 +#include <stdlib.h>
    1.73 +#include <string.h>
    1.74 +#ifdef _EVENT_HAVE_STDARG_H
    1.75 +#include <stdarg.h>
    1.76 +#endif
    1.77 +#ifdef _EVENT_HAVE_UNISTD_H
    1.78 +#include <unistd.h>
    1.79 +#endif
    1.80 +#include <limits.h>
    1.81 +
    1.82 +#include "event2/event.h"
    1.83 +#include "event2/buffer.h"
    1.84 +#include "event2/buffer_compat.h"
    1.85 +#include "event2/bufferevent.h"
    1.86 +#include "event2/bufferevent_compat.h"
    1.87 +#include "event2/bufferevent_struct.h"
    1.88 +#include "event2/thread.h"
    1.89 +#include "event2/event-config.h"
    1.90 +#include "log-internal.h"
    1.91 +#include "mm-internal.h"
    1.92 +#include "util-internal.h"
    1.93 +#include "evthread-internal.h"
    1.94 +#include "evbuffer-internal.h"
    1.95 +#include "bufferevent-internal.h"
    1.96 +
    1.97 +/* some systems do not have MAP_FAILED */
    1.98 +#ifndef MAP_FAILED
    1.99 +#define MAP_FAILED	((void *)-1)
   1.100 +#endif
   1.101 +
   1.102 +/* send file support */
   1.103 +#if defined(_EVENT_HAVE_SYS_SENDFILE_H) && defined(_EVENT_HAVE_SENDFILE) && defined(__linux__)
   1.104 +#define USE_SENDFILE		1
   1.105 +#define SENDFILE_IS_LINUX	1
   1.106 +#elif defined(_EVENT_HAVE_SENDFILE) && defined(__FreeBSD__)
   1.107 +#define USE_SENDFILE		1
   1.108 +#define SENDFILE_IS_FREEBSD	1
   1.109 +#elif defined(_EVENT_HAVE_SENDFILE) && defined(__APPLE__)
   1.110 +#define USE_SENDFILE		1
   1.111 +#define SENDFILE_IS_MACOSX	1
   1.112 +#elif defined(_EVENT_HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
   1.113 +#define USE_SENDFILE		1
   1.114 +#define SENDFILE_IS_SOLARIS	1
   1.115 +#endif
   1.116 +
   1.117 +#ifdef USE_SENDFILE
   1.118 +static int use_sendfile = 1;
   1.119 +#endif
   1.120 +#ifdef _EVENT_HAVE_MMAP
   1.121 +static int use_mmap = 1;
   1.122 +#endif
   1.123 +
   1.124 +
   1.125 +/* Mask of user-selectable callback flags. */
   1.126 +#define EVBUFFER_CB_USER_FLAGS	    0xffff
   1.127 +/* Mask of all internal-use-only flags. */
   1.128 +#define EVBUFFER_CB_INTERNAL_FLAGS  0xffff0000
   1.129 +
   1.130 +/* Flag set if the callback is using the cb_obsolete function pointer  */
   1.131 +#define EVBUFFER_CB_OBSOLETE	       0x00040000
   1.132 +
   1.133 +/* evbuffer_chain support */
   1.134 +#define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
   1.135 +#define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
   1.136 +	    0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
   1.137 +
   1.138 +#define CHAIN_PINNED(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
   1.139 +#define CHAIN_PINNED_R(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
   1.140 +
   1.141 +static void evbuffer_chain_align(struct evbuffer_chain *chain);
   1.142 +static int evbuffer_chain_should_realign(struct evbuffer_chain *chain,
   1.143 +    size_t datalen);
   1.144 +static void evbuffer_deferred_callback(struct deferred_cb *cb, void *arg);
   1.145 +static int evbuffer_ptr_memcmp(const struct evbuffer *buf,
   1.146 +    const struct evbuffer_ptr *pos, const char *mem, size_t len);
   1.147 +static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf,
   1.148 +    size_t datlen);
   1.149 +
   1.150 +#ifdef WIN32
   1.151 +static int evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd,
   1.152 +    ev_ssize_t howmuch);
   1.153 +#else
   1.154 +#define evbuffer_readfile evbuffer_read
   1.155 +#endif
   1.156 +
   1.157 +static struct evbuffer_chain *
   1.158 +evbuffer_chain_new(size_t size)
   1.159 +{
   1.160 +	struct evbuffer_chain *chain;
   1.161 +	size_t to_alloc;
   1.162 +
   1.163 +	size += EVBUFFER_CHAIN_SIZE;
   1.164 +
   1.165 +	/* get the next largest memory that can hold the buffer */
   1.166 +	to_alloc = MIN_BUFFER_SIZE;
   1.167 +	while (to_alloc < size)
   1.168 +		to_alloc <<= 1;
   1.169 +
   1.170 +	/* we get everything in one chunk */
   1.171 +	if ((chain = mm_malloc(to_alloc)) == NULL)
   1.172 +		return (NULL);
   1.173 +
   1.174 +	memset(chain, 0, EVBUFFER_CHAIN_SIZE);
   1.175 +
   1.176 +	chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;
   1.177 +
   1.178 +	/* this way we can manipulate the buffer to different addresses,
   1.179 +	 * which is required for mmap for example.
   1.180 +	 */
   1.181 +	chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain);
   1.182 +
   1.183 +	return (chain);
   1.184 +}
   1.185 +
   1.186 +static inline void
   1.187 +evbuffer_chain_free(struct evbuffer_chain *chain)
   1.188 +{
   1.189 +	if (CHAIN_PINNED(chain)) {
   1.190 +		chain->flags |= EVBUFFER_DANGLING;
   1.191 +		return;
   1.192 +	}
   1.193 +	if (chain->flags & (EVBUFFER_MMAP|EVBUFFER_SENDFILE|
   1.194 +		EVBUFFER_REFERENCE)) {
   1.195 +		if (chain->flags & EVBUFFER_REFERENCE) {
   1.196 +			struct evbuffer_chain_reference *info =
   1.197 +			    EVBUFFER_CHAIN_EXTRA(
   1.198 +				    struct evbuffer_chain_reference,
   1.199 +				    chain);
   1.200 +			if (info->cleanupfn)
   1.201 +				(*info->cleanupfn)(chain->buffer,
   1.202 +				    chain->buffer_len,
   1.203 +				    info->extra);
   1.204 +		}
   1.205 +#ifdef _EVENT_HAVE_MMAP
   1.206 +		if (chain->flags & EVBUFFER_MMAP) {
   1.207 +			struct evbuffer_chain_fd *info =
   1.208 +			    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
   1.209 +				chain);
   1.210 +			if (munmap(chain->buffer, chain->buffer_len) == -1)
   1.211 +				event_warn("%s: munmap failed", __func__);
   1.212 +			if (close(info->fd) == -1)
   1.213 +				event_warn("%s: close(%d) failed",
   1.214 +				    __func__, info->fd);
   1.215 +		}
   1.216 +#endif
   1.217 +#ifdef USE_SENDFILE
   1.218 +		if (chain->flags & EVBUFFER_SENDFILE) {
   1.219 +			struct evbuffer_chain_fd *info =
   1.220 +			    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
   1.221 +				chain);
   1.222 +			if (close(info->fd) == -1)
   1.223 +				event_warn("%s: close(%d) failed",
   1.224 +				    __func__, info->fd);
   1.225 +		}
   1.226 +#endif
   1.227 +	}
   1.228 +
   1.229 +	mm_free(chain);
   1.230 +}
   1.231 +
   1.232 +static void
   1.233 +evbuffer_free_all_chains(struct evbuffer_chain *chain)
   1.234 +{
   1.235 +	struct evbuffer_chain *next;
   1.236 +	for (; chain; chain = next) {
   1.237 +		next = chain->next;
   1.238 +		evbuffer_chain_free(chain);
   1.239 +	}
   1.240 +}
   1.241 +
   1.242 +#ifndef NDEBUG
   1.243 +static int
   1.244 +evbuffer_chains_all_empty(struct evbuffer_chain *chain)
   1.245 +{
   1.246 +	for (; chain; chain = chain->next) {
   1.247 +		if (chain->off)
   1.248 +			return 0;
   1.249 +	}
   1.250 +	return 1;
   1.251 +}
   1.252 +#else
   1.253 +/* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
   1.254 +"unused variable" warnings. */
   1.255 +static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) {
   1.256 +	return 1;
   1.257 +}
   1.258 +#endif
   1.259 +
   1.260 +/* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
   1.261 + * to replacing them all with a new chain.  Return a pointer to the place
   1.262 + * where the new chain will go.
   1.263 + *
   1.264 + * Internal; requires lock.  The caller must fix up buf->last and buf->first
   1.265 + * as needed; they might have been freed.
   1.266 + */
   1.267 +static struct evbuffer_chain **
   1.268 +evbuffer_free_trailing_empty_chains(struct evbuffer *buf)
   1.269 +{
   1.270 +	struct evbuffer_chain **ch = buf->last_with_datap;
   1.271 +	/* Find the first victim chain.  It might be *last_with_datap */
   1.272 +	while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
   1.273 +		ch = &(*ch)->next;
   1.274 +	if (*ch) {
   1.275 +		EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
   1.276 +		evbuffer_free_all_chains(*ch);
   1.277 +		*ch = NULL;
   1.278 +	}
   1.279 +	return ch;
   1.280 +}
   1.281 +
   1.282 +/* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
   1.283 + * chains as necessary.  Requires lock.  Does not schedule callbacks.
   1.284 + */
   1.285 +static void
   1.286 +evbuffer_chain_insert(struct evbuffer *buf,
   1.287 +    struct evbuffer_chain *chain)
   1.288 +{
   1.289 +	ASSERT_EVBUFFER_LOCKED(buf);
   1.290 +	if (*buf->last_with_datap == NULL) {
   1.291 +		/* There are no chains data on the buffer at all. */
   1.292 +		EVUTIL_ASSERT(buf->last_with_datap == &buf->first);
   1.293 +		EVUTIL_ASSERT(buf->first == NULL);
   1.294 +		buf->first = buf->last = chain;
   1.295 +	} else {
   1.296 +		struct evbuffer_chain **ch = buf->last_with_datap;
   1.297 +		/* Find the first victim chain.  It might be *last_with_datap */
   1.298 +		while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
   1.299 +			ch = &(*ch)->next;
   1.300 +		if (*ch == NULL) {
   1.301 +			/* There is no victim; just append this new chain. */
   1.302 +			buf->last->next = chain;
   1.303 +			if (chain->off)
   1.304 +				buf->last_with_datap = &buf->last->next;
   1.305 +		} else {
   1.306 +			/* Replace all victim chains with this chain. */
   1.307 +			EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
   1.308 +			evbuffer_free_all_chains(*ch);
   1.309 +			*ch = chain;
   1.310 +		}
   1.311 +		buf->last = chain;
   1.312 +	}
   1.313 +	buf->total_len += chain->off;
   1.314 +}
   1.315 +
   1.316 +static inline struct evbuffer_chain *
   1.317 +evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
   1.318 +{
   1.319 +	struct evbuffer_chain *chain;
   1.320 +	if ((chain = evbuffer_chain_new(datlen)) == NULL)
   1.321 +		return NULL;
   1.322 +	evbuffer_chain_insert(buf, chain);
   1.323 +	return chain;
   1.324 +}
   1.325 +
   1.326 +void
   1.327 +_evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag)
   1.328 +{
   1.329 +	EVUTIL_ASSERT((chain->flags & flag) == 0);
   1.330 +	chain->flags |= flag;
   1.331 +}
   1.332 +
   1.333 +void
   1.334 +_evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag)
   1.335 +{
   1.336 +	EVUTIL_ASSERT((chain->flags & flag) != 0);
   1.337 +	chain->flags &= ~flag;
   1.338 +	if (chain->flags & EVBUFFER_DANGLING)
   1.339 +		evbuffer_chain_free(chain);
   1.340 +}
   1.341 +
   1.342 +struct evbuffer *
   1.343 +evbuffer_new(void)
   1.344 +{
   1.345 +	struct evbuffer *buffer;
   1.346 +
   1.347 +	buffer = mm_calloc(1, sizeof(struct evbuffer));
   1.348 +	if (buffer == NULL)
   1.349 +		return (NULL);
   1.350 +
   1.351 +	TAILQ_INIT(&buffer->callbacks);
   1.352 +	buffer->refcnt = 1;
   1.353 +	buffer->last_with_datap = &buffer->first;
   1.354 +
   1.355 +	return (buffer);
   1.356 +}
   1.357 +
   1.358 +int
   1.359 +evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags)
   1.360 +{
   1.361 +	EVBUFFER_LOCK(buf);
   1.362 +	buf->flags |= (ev_uint32_t)flags;
   1.363 +	EVBUFFER_UNLOCK(buf);
   1.364 +	return 0;
   1.365 +}
   1.366 +
   1.367 +int
   1.368 +evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
   1.369 +{
   1.370 +	EVBUFFER_LOCK(buf);
   1.371 +	buf->flags &= ~(ev_uint32_t)flags;
   1.372 +	EVBUFFER_UNLOCK(buf);
   1.373 +	return 0;
   1.374 +}
   1.375 +
   1.376 +void
   1.377 +_evbuffer_incref(struct evbuffer *buf)
   1.378 +{
   1.379 +	EVBUFFER_LOCK(buf);
   1.380 +	++buf->refcnt;
   1.381 +	EVBUFFER_UNLOCK(buf);
   1.382 +}
   1.383 +
   1.384 +void
   1.385 +_evbuffer_incref_and_lock(struct evbuffer *buf)
   1.386 +{
   1.387 +	EVBUFFER_LOCK(buf);
   1.388 +	++buf->refcnt;
   1.389 +}
   1.390 +
   1.391 +int
   1.392 +evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)
   1.393 +{
   1.394 +	EVBUFFER_LOCK(buffer);
   1.395 +	buffer->cb_queue = event_base_get_deferred_cb_queue(base);
   1.396 +	buffer->deferred_cbs = 1;
   1.397 +	event_deferred_cb_init(&buffer->deferred,
   1.398 +	    evbuffer_deferred_callback, buffer);
   1.399 +	EVBUFFER_UNLOCK(buffer);
   1.400 +	return 0;
   1.401 +}
   1.402 +
   1.403 +int
   1.404 +evbuffer_enable_locking(struct evbuffer *buf, void *lock)
   1.405 +{
   1.406 +#ifdef _EVENT_DISABLE_THREAD_SUPPORT
   1.407 +	return -1;
   1.408 +#else
   1.409 +	if (buf->lock)
   1.410 +		return -1;
   1.411 +
   1.412 +	if (!lock) {
   1.413 +		EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
   1.414 +		if (!lock)
   1.415 +			return -1;
   1.416 +		buf->lock = lock;
   1.417 +		buf->own_lock = 1;
   1.418 +	} else {
   1.419 +		buf->lock = lock;
   1.420 +		buf->own_lock = 0;
   1.421 +	}
   1.422 +
   1.423 +	return 0;
   1.424 +#endif
   1.425 +}
   1.426 +
   1.427 +void
   1.428 +evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev)
   1.429 +{
   1.430 +	EVBUFFER_LOCK(buf);
   1.431 +	buf->parent = bev;
   1.432 +	EVBUFFER_UNLOCK(buf);
   1.433 +}
   1.434 +
   1.435 +static void
   1.436 +evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
   1.437 +{
   1.438 +	struct evbuffer_cb_entry *cbent, *next;
   1.439 +	struct evbuffer_cb_info info;
   1.440 +	size_t new_size;
   1.441 +	ev_uint32_t mask, masked_val;
   1.442 +	int clear = 1;
   1.443 +
   1.444 +	if (running_deferred) {
   1.445 +		mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
   1.446 +		masked_val = EVBUFFER_CB_ENABLED;
   1.447 +	} else if (buffer->deferred_cbs) {
   1.448 +		mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
   1.449 +		masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
   1.450 +		/* Don't zero-out n_add/n_del, since the deferred callbacks
   1.451 +		   will want to see them. */
   1.452 +		clear = 0;
   1.453 +	} else {
   1.454 +		mask = EVBUFFER_CB_ENABLED;
   1.455 +		masked_val = EVBUFFER_CB_ENABLED;
   1.456 +	}
   1.457 +
   1.458 +	ASSERT_EVBUFFER_LOCKED(buffer);
   1.459 +
   1.460 +	if (TAILQ_EMPTY(&buffer->callbacks)) {
   1.461 +		buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
   1.462 +		return;
   1.463 +	}
   1.464 +	if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
   1.465 +		return;
   1.466 +
   1.467 +	new_size = buffer->total_len;
   1.468 +	info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
   1.469 +	info.n_added = buffer->n_add_for_cb;
   1.470 +	info.n_deleted = buffer->n_del_for_cb;
   1.471 +	if (clear) {
   1.472 +		buffer->n_add_for_cb = 0;
   1.473 +		buffer->n_del_for_cb = 0;
   1.474 +	}
   1.475 +	for (cbent = TAILQ_FIRST(&buffer->callbacks);
   1.476 +	     cbent != NULL;
   1.477 +	     cbent = next) {
   1.478 +		/* Get the 'next' pointer now in case this callback decides
   1.479 +		 * to remove itself or something. */
   1.480 +		next = TAILQ_NEXT(cbent, next);
   1.481 +
   1.482 +		if ((cbent->flags & mask) != masked_val)
   1.483 +			continue;
   1.484 +
   1.485 +		if ((cbent->flags & EVBUFFER_CB_OBSOLETE))
   1.486 +			cbent->cb.cb_obsolete(buffer,
   1.487 +			    info.orig_size, new_size, cbent->cbarg);
   1.488 +		else
   1.489 +			cbent->cb.cb_func(buffer, &info, cbent->cbarg);
   1.490 +	}
   1.491 +}
   1.492 +
   1.493 +void
   1.494 +evbuffer_invoke_callbacks(struct evbuffer *buffer)
   1.495 +{
   1.496 +	if (TAILQ_EMPTY(&buffer->callbacks)) {
   1.497 +		buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
   1.498 +		return;
   1.499 +	}
   1.500 +
   1.501 +	if (buffer->deferred_cbs) {
   1.502 +		if (buffer->deferred.queued)
   1.503 +			return;
   1.504 +		_evbuffer_incref_and_lock(buffer);
   1.505 +		if (buffer->parent)
   1.506 +			bufferevent_incref(buffer->parent);
   1.507 +		EVBUFFER_UNLOCK(buffer);
   1.508 +		event_deferred_cb_schedule(buffer->cb_queue, &buffer->deferred);
   1.509 +	}
   1.510 +
   1.511 +	evbuffer_run_callbacks(buffer, 0);
   1.512 +}
   1.513 +
   1.514 +static void
   1.515 +evbuffer_deferred_callback(struct deferred_cb *cb, void *arg)
   1.516 +{
   1.517 +	struct bufferevent *parent = NULL;
   1.518 +	struct evbuffer *buffer = arg;
   1.519 +
   1.520 +	/* XXXX It would be better to run these callbacks without holding the
   1.521 +	 * lock */
   1.522 +	EVBUFFER_LOCK(buffer);
   1.523 +	parent = buffer->parent;
   1.524 +	evbuffer_run_callbacks(buffer, 1);
   1.525 +	_evbuffer_decref_and_unlock(buffer);
   1.526 +	if (parent)
   1.527 +		bufferevent_decref(parent);
   1.528 +}
   1.529 +
   1.530 +static void
   1.531 +evbuffer_remove_all_callbacks(struct evbuffer *buffer)
   1.532 +{
   1.533 +	struct evbuffer_cb_entry *cbent;
   1.534 +
   1.535 +	while ((cbent = TAILQ_FIRST(&buffer->callbacks))) {
   1.536 +	    TAILQ_REMOVE(&buffer->callbacks, cbent, next);
   1.537 +	    mm_free(cbent);
   1.538 +	}
   1.539 +}
   1.540 +
   1.541 +void
   1.542 +_evbuffer_decref_and_unlock(struct evbuffer *buffer)
   1.543 +{
   1.544 +	struct evbuffer_chain *chain, *next;
   1.545 +	ASSERT_EVBUFFER_LOCKED(buffer);
   1.546 +
   1.547 +	EVUTIL_ASSERT(buffer->refcnt > 0);
   1.548 +
   1.549 +	if (--buffer->refcnt > 0) {
   1.550 +		EVBUFFER_UNLOCK(buffer);
   1.551 +		return;
   1.552 +	}
   1.553 +
   1.554 +	for (chain = buffer->first; chain != NULL; chain = next) {
   1.555 +		next = chain->next;
   1.556 +		evbuffer_chain_free(chain);
   1.557 +	}
   1.558 +	evbuffer_remove_all_callbacks(buffer);
   1.559 +	if (buffer->deferred_cbs)
   1.560 +		event_deferred_cb_cancel(buffer->cb_queue, &buffer->deferred);
   1.561 +
   1.562 +	EVBUFFER_UNLOCK(buffer);
   1.563 +	if (buffer->own_lock)
   1.564 +		EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
   1.565 +	mm_free(buffer);
   1.566 +}
   1.567 +
   1.568 +void
   1.569 +evbuffer_free(struct evbuffer *buffer)
   1.570 +{
   1.571 +	EVBUFFER_LOCK(buffer);
   1.572 +	_evbuffer_decref_and_unlock(buffer);
   1.573 +}
   1.574 +
   1.575 +void
   1.576 +evbuffer_lock(struct evbuffer *buf)
   1.577 +{
   1.578 +	EVBUFFER_LOCK(buf);
   1.579 +}
   1.580 +
   1.581 +void
   1.582 +evbuffer_unlock(struct evbuffer *buf)
   1.583 +{
   1.584 +	EVBUFFER_UNLOCK(buf);
   1.585 +}
   1.586 +
   1.587 +size_t
   1.588 +evbuffer_get_length(const struct evbuffer *buffer)
   1.589 +{
   1.590 +	size_t result;
   1.591 +
   1.592 +	EVBUFFER_LOCK(buffer);
   1.593 +
   1.594 +	result = (buffer->total_len);
   1.595 +
   1.596 +	EVBUFFER_UNLOCK(buffer);
   1.597 +
   1.598 +	return result;
   1.599 +}
   1.600 +
   1.601 +size_t
   1.602 +evbuffer_get_contiguous_space(const struct evbuffer *buf)
   1.603 +{
   1.604 +	struct evbuffer_chain *chain;
   1.605 +	size_t result;
   1.606 +
   1.607 +	EVBUFFER_LOCK(buf);
   1.608 +	chain = buf->first;
   1.609 +	result = (chain != NULL ? chain->off : 0);
   1.610 +	EVBUFFER_UNLOCK(buf);
   1.611 +
   1.612 +	return result;
   1.613 +}
   1.614 +
   1.615 +int
   1.616 +evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
   1.617 +    struct evbuffer_iovec *vec, int n_vecs)
   1.618 +{
   1.619 +	struct evbuffer_chain *chain, **chainp;
   1.620 +	int n = -1;
   1.621 +
   1.622 +	EVBUFFER_LOCK(buf);
   1.623 +	if (buf->freeze_end)
   1.624 +		goto done;
   1.625 +	if (n_vecs < 1)
   1.626 +		goto done;
   1.627 +	if (n_vecs == 1) {
   1.628 +		if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL)
   1.629 +			goto done;
   1.630 +
   1.631 +		vec[0].iov_base = CHAIN_SPACE_PTR(chain);
   1.632 +		vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain);
   1.633 +		EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
   1.634 +		n = 1;
   1.635 +	} else {
   1.636 +		if (_evbuffer_expand_fast(buf, size, n_vecs)<0)
   1.637 +			goto done;
   1.638 +		n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs,
   1.639 +				&chainp, 0);
   1.640 +	}
   1.641 +
   1.642 +done:
   1.643 +	EVBUFFER_UNLOCK(buf);
   1.644 +	return n;
   1.645 +
   1.646 +}
   1.647 +
   1.648 +static int
   1.649 +advance_last_with_data(struct evbuffer *buf)
   1.650 +{
   1.651 +	int n = 0;
   1.652 +	ASSERT_EVBUFFER_LOCKED(buf);
   1.653 +
   1.654 +	if (!*buf->last_with_datap)
   1.655 +		return 0;
   1.656 +
   1.657 +	while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) {
   1.658 +		buf->last_with_datap = &(*buf->last_with_datap)->next;
   1.659 +		++n;
   1.660 +	}
   1.661 +	return n;
   1.662 +}
   1.663 +
   1.664 +int
   1.665 +evbuffer_commit_space(struct evbuffer *buf,
   1.666 +    struct evbuffer_iovec *vec, int n_vecs)
   1.667 +{
   1.668 +	struct evbuffer_chain *chain, **firstchainp, **chainp;
   1.669 +	int result = -1;
   1.670 +	size_t added = 0;
   1.671 +	int i;
   1.672 +
   1.673 +	EVBUFFER_LOCK(buf);
   1.674 +
   1.675 +	if (buf->freeze_end)
   1.676 +		goto done;
   1.677 +	if (n_vecs == 0) {
   1.678 +		result = 0;
   1.679 +		goto done;
   1.680 +	} else if (n_vecs == 1 &&
   1.681 +	    (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) {
   1.682 +		/* The user only got or used one chain; it might not
   1.683 +		 * be the first one with space in it. */
   1.684 +		if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last))
   1.685 +			goto done;
   1.686 +		buf->last->off += vec[0].iov_len;
   1.687 +		added = vec[0].iov_len;
   1.688 +		if (added)
   1.689 +			advance_last_with_data(buf);
   1.690 +		goto okay;
   1.691 +	}
   1.692 +
   1.693 +	/* Advance 'firstchain' to the first chain with space in it. */
   1.694 +	firstchainp = buf->last_with_datap;
   1.695 +	if (!*firstchainp)
   1.696 +		goto done;
   1.697 +	if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
   1.698 +		firstchainp = &(*firstchainp)->next;
   1.699 +	}
   1.700 +
   1.701 +	chain = *firstchainp;
   1.702 +	/* pass 1: make sure that the pointers and lengths of vecs[] are in
   1.703 +	 * bounds before we try to commit anything. */
   1.704 +	for (i=0; i<n_vecs; ++i) {
   1.705 +		if (!chain)
   1.706 +			goto done;
   1.707 +		if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) ||
   1.708 +		    (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain))
   1.709 +			goto done;
   1.710 +		chain = chain->next;
   1.711 +	}
   1.712 +	/* pass 2: actually adjust all the chains. */
   1.713 +	chainp = firstchainp;
   1.714 +	for (i=0; i<n_vecs; ++i) {
   1.715 +		(*chainp)->off += vec[i].iov_len;
   1.716 +		added += vec[i].iov_len;
   1.717 +		if (vec[i].iov_len) {
   1.718 +			buf->last_with_datap = chainp;
   1.719 +		}
   1.720 +		chainp = &(*chainp)->next;
   1.721 +	}
   1.722 +
   1.723 +okay:
   1.724 +	buf->total_len += added;
   1.725 +	buf->n_add_for_cb += added;
   1.726 +	result = 0;
   1.727 +	evbuffer_invoke_callbacks(buf);
   1.728 +
   1.729 +done:
   1.730 +	EVBUFFER_UNLOCK(buf);
   1.731 +	return result;
   1.732 +}
   1.733 +
   1.734 +static inline int
   1.735 +HAS_PINNED_R(struct evbuffer *buf)
   1.736 +{
   1.737 +	return (buf->last && CHAIN_PINNED_R(buf->last));
   1.738 +}
   1.739 +
   1.740 +static inline void
   1.741 +ZERO_CHAIN(struct evbuffer *dst)
   1.742 +{
   1.743 +	ASSERT_EVBUFFER_LOCKED(dst);
   1.744 +	dst->first = NULL;
   1.745 +	dst->last = NULL;
   1.746 +	dst->last_with_datap = &(dst)->first;
   1.747 +	dst->total_len = 0;
   1.748 +}
   1.749 +
   1.750 +/* Prepares the contents of src to be moved to another buffer by removing
   1.751 + * read-pinned chains. The first pinned chain is saved in first, and the
   1.752 + * last in last. If src has no read-pinned chains, first and last are set
   1.753 + * to NULL. */
   1.754 +static int
   1.755 +PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first,
   1.756 +		struct evbuffer_chain **last)
   1.757 +{
   1.758 +	struct evbuffer_chain *chain, **pinned;
   1.759 +
   1.760 +	ASSERT_EVBUFFER_LOCKED(src);
   1.761 +
   1.762 +	if (!HAS_PINNED_R(src)) {
   1.763 +		*first = *last = NULL;
   1.764 +		return 0;
   1.765 +	}
   1.766 +
   1.767 +	pinned = src->last_with_datap;
   1.768 +	if (!CHAIN_PINNED_R(*pinned))
   1.769 +		pinned = &(*pinned)->next;
   1.770 +	EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned));
   1.771 +	chain = *first = *pinned;
   1.772 +	*last = src->last;
   1.773 +
   1.774 +	/* If there's data in the first pinned chain, we need to allocate
   1.775 +	 * a new chain and copy the data over. */
   1.776 +	if (chain->off) {
   1.777 +		struct evbuffer_chain *tmp;
   1.778 +
   1.779 +		EVUTIL_ASSERT(pinned == src->last_with_datap);
   1.780 +		tmp = evbuffer_chain_new(chain->off);
   1.781 +		if (!tmp)
   1.782 +			return -1;
   1.783 +		memcpy(tmp->buffer, chain->buffer + chain->misalign,
   1.784 +			chain->off);
   1.785 +		tmp->off = chain->off;
   1.786 +		*src->last_with_datap = tmp;
   1.787 +		src->last = tmp;
   1.788 +		chain->misalign += chain->off;
   1.789 +		chain->off = 0;
   1.790 +	} else {
   1.791 +		src->last = *src->last_with_datap;
   1.792 +		*pinned = NULL;
   1.793 +	}
   1.794 +
   1.795 +	return 0;
   1.796 +}
   1.797 +
   1.798 +static inline void
   1.799 +RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned,
   1.800 +		struct evbuffer_chain *last)
   1.801 +{
   1.802 +	ASSERT_EVBUFFER_LOCKED(src);
   1.803 +
   1.804 +	if (!pinned) {
   1.805 +		ZERO_CHAIN(src);
   1.806 +		return;
   1.807 +	}
   1.808 +
   1.809 +	src->first = pinned;
   1.810 +	src->last = last;
   1.811 +	src->last_with_datap = &src->first;
   1.812 +	src->total_len = 0;
   1.813 +}
   1.814 +
   1.815 +static inline void
   1.816 +COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src)
   1.817 +{
   1.818 +	ASSERT_EVBUFFER_LOCKED(dst);
   1.819 +	ASSERT_EVBUFFER_LOCKED(src);
   1.820 +	dst->first = src->first;
   1.821 +	if (src->last_with_datap == &src->first)
   1.822 +		dst->last_with_datap = &dst->first;
   1.823 +	else
   1.824 +		dst->last_with_datap = src->last_with_datap;
   1.825 +	dst->last = src->last;
   1.826 +	dst->total_len = src->total_len;
   1.827 +}
   1.828 +
   1.829 +static void
   1.830 +APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
   1.831 +{
   1.832 +	ASSERT_EVBUFFER_LOCKED(dst);
   1.833 +	ASSERT_EVBUFFER_LOCKED(src);
   1.834 +	dst->last->next = src->first;
   1.835 +	if (src->last_with_datap == &src->first)
   1.836 +		dst->last_with_datap = &dst->last->next;
   1.837 +	else
   1.838 +		dst->last_with_datap = src->last_with_datap;
   1.839 +	dst->last = src->last;
   1.840 +	dst->total_len += src->total_len;
   1.841 +}
   1.842 +
   1.843 +static void
   1.844 +PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
   1.845 +{
   1.846 +	ASSERT_EVBUFFER_LOCKED(dst);
   1.847 +	ASSERT_EVBUFFER_LOCKED(src);
   1.848 +	src->last->next = dst->first;
   1.849 +	dst->first = src->first;
   1.850 +	dst->total_len += src->total_len;
   1.851 +	if (*dst->last_with_datap == NULL) {
   1.852 +		if (src->last_with_datap == &(src)->first)
   1.853 +			dst->last_with_datap = &dst->first;
   1.854 +		else
   1.855 +			dst->last_with_datap = src->last_with_datap;
   1.856 +	} else if (dst->last_with_datap == &dst->first) {
   1.857 +		dst->last_with_datap = &src->last->next;
   1.858 +	}
   1.859 +}
   1.860 +
   1.861 +int
   1.862 +evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
   1.863 +{
   1.864 +	struct evbuffer_chain *pinned, *last;
   1.865 +	size_t in_total_len, out_total_len;
   1.866 +	int result = 0;
   1.867 +
   1.868 +	EVBUFFER_LOCK2(inbuf, outbuf);
   1.869 +	in_total_len = inbuf->total_len;
   1.870 +	out_total_len = outbuf->total_len;
   1.871 +
   1.872 +	if (in_total_len == 0 || outbuf == inbuf)
   1.873 +		goto done;
   1.874 +
   1.875 +	if (outbuf->freeze_end || inbuf->freeze_start) {
   1.876 +		result = -1;
   1.877 +		goto done;
   1.878 +	}
   1.879 +
   1.880 +	if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
   1.881 +		result = -1;
   1.882 +		goto done;
   1.883 +	}
   1.884 +
   1.885 +	if (out_total_len == 0) {
   1.886 +		/* There might be an empty chain at the start of outbuf; free
   1.887 +		 * it. */
   1.888 +		evbuffer_free_all_chains(outbuf->first);
   1.889 +		COPY_CHAIN(outbuf, inbuf);
   1.890 +	} else {
   1.891 +		APPEND_CHAIN(outbuf, inbuf);
   1.892 +	}
   1.893 +
   1.894 +	RESTORE_PINNED(inbuf, pinned, last);
   1.895 +
   1.896 +	inbuf->n_del_for_cb += in_total_len;
   1.897 +	outbuf->n_add_for_cb += in_total_len;
   1.898 +
   1.899 +	evbuffer_invoke_callbacks(inbuf);
   1.900 +	evbuffer_invoke_callbacks(outbuf);
   1.901 +
   1.902 +done:
   1.903 +	EVBUFFER_UNLOCK2(inbuf, outbuf);
   1.904 +	return result;
   1.905 +}
   1.906 +
   1.907 +int
   1.908 +evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
   1.909 +{
   1.910 +	struct evbuffer_chain *pinned, *last;
   1.911 +	size_t in_total_len, out_total_len;
   1.912 +	int result = 0;
   1.913 +
   1.914 +	EVBUFFER_LOCK2(inbuf, outbuf);
   1.915 +
   1.916 +	in_total_len = inbuf->total_len;
   1.917 +	out_total_len = outbuf->total_len;
   1.918 +
   1.919 +	if (!in_total_len || inbuf == outbuf)
   1.920 +		goto done;
   1.921 +
   1.922 +	if (outbuf->freeze_start || inbuf->freeze_start) {
   1.923 +		result = -1;
   1.924 +		goto done;
   1.925 +	}
   1.926 +
   1.927 +	if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
   1.928 +		result = -1;
   1.929 +		goto done;
   1.930 +	}
   1.931 +
   1.932 +	if (out_total_len == 0) {
   1.933 +		/* There might be an empty chain at the start of outbuf; free
   1.934 +		 * it. */
   1.935 +		evbuffer_free_all_chains(outbuf->first);
   1.936 +		COPY_CHAIN(outbuf, inbuf);
   1.937 +	} else {
   1.938 +		PREPEND_CHAIN(outbuf, inbuf);
   1.939 +	}
   1.940 +
   1.941 +	RESTORE_PINNED(inbuf, pinned, last);
   1.942 +
   1.943 +	inbuf->n_del_for_cb += in_total_len;
   1.944 +	outbuf->n_add_for_cb += in_total_len;
   1.945 +
   1.946 +	evbuffer_invoke_callbacks(inbuf);
   1.947 +	evbuffer_invoke_callbacks(outbuf);
   1.948 +done:
   1.949 +	EVBUFFER_UNLOCK2(inbuf, outbuf);
   1.950 +	return result;
   1.951 +}
   1.952 +
   1.953 +int
   1.954 +evbuffer_drain(struct evbuffer *buf, size_t len)
   1.955 +{
   1.956 +	struct evbuffer_chain *chain, *next;
   1.957 +	size_t remaining, old_len;
   1.958 +	int result = 0;
   1.959 +
   1.960 +	EVBUFFER_LOCK(buf);
   1.961 +	old_len = buf->total_len;
   1.962 +
   1.963 +	if (old_len == 0)
   1.964 +		goto done;
   1.965 +
   1.966 +	if (buf->freeze_start) {
   1.967 +		result = -1;
   1.968 +		goto done;
   1.969 +	}
   1.970 +
   1.971 +	if (len >= old_len && !HAS_PINNED_R(buf)) {
   1.972 +		len = old_len;
   1.973 +		for (chain = buf->first; chain != NULL; chain = next) {
   1.974 +			next = chain->next;
   1.975 +			evbuffer_chain_free(chain);
   1.976 +		}
   1.977 +
   1.978 +		ZERO_CHAIN(buf);
   1.979 +	} else {
   1.980 +		if (len >= old_len)
   1.981 +			len = old_len;
   1.982 +
   1.983 +		buf->total_len -= len;
   1.984 +		remaining = len;
   1.985 +		for (chain = buf->first;
   1.986 +		     remaining >= chain->off;
   1.987 +		     chain = next) {
   1.988 +			next = chain->next;
   1.989 +			remaining -= chain->off;
   1.990 +
   1.991 +			if (chain == *buf->last_with_datap) {
   1.992 +				buf->last_with_datap = &buf->first;
   1.993 +			}
   1.994 +			if (&chain->next == buf->last_with_datap)
   1.995 +				buf->last_with_datap = &buf->first;
   1.996 +
   1.997 +			if (CHAIN_PINNED_R(chain)) {
   1.998 +				EVUTIL_ASSERT(remaining == 0);
   1.999 +				chain->misalign += chain->off;
  1.1000 +				chain->off = 0;
  1.1001 +				break;
  1.1002 +			} else
  1.1003 +				evbuffer_chain_free(chain);
  1.1004 +		}
  1.1005 +
  1.1006 +		buf->first = chain;
  1.1007 +		if (chain) {
  1.1008 +			chain->misalign += remaining;
  1.1009 +			chain->off -= remaining;
  1.1010 +		}
  1.1011 +	}
  1.1012 +
  1.1013 +	buf->n_del_for_cb += len;
  1.1014 +	/* Tell someone about changes in this buffer */
  1.1015 +	evbuffer_invoke_callbacks(buf);
  1.1016 +
  1.1017 +done:
  1.1018 +	EVBUFFER_UNLOCK(buf);
  1.1019 +	return result;
  1.1020 +}
  1.1021 +
  1.1022 +/* Reads data from an event buffer and drains the bytes read */
  1.1023 +int
  1.1024 +evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
  1.1025 +{
  1.1026 +	ev_ssize_t n;
  1.1027 +	EVBUFFER_LOCK(buf);
  1.1028 +	n = evbuffer_copyout(buf, data_out, datlen);
  1.1029 +	if (n > 0) {
  1.1030 +		if (evbuffer_drain(buf, n)<0)
  1.1031 +			n = -1;
  1.1032 +	}
  1.1033 +	EVBUFFER_UNLOCK(buf);
  1.1034 +	return (int)n;
  1.1035 +}
  1.1036 +
  1.1037 +ev_ssize_t
  1.1038 +evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen)
  1.1039 +{
  1.1040 +	/*XXX fails badly on sendfile case. */
  1.1041 +	struct evbuffer_chain *chain;
  1.1042 +	char *data = data_out;
  1.1043 +	size_t nread;
  1.1044 +	ev_ssize_t result = 0;
  1.1045 +
  1.1046 +	EVBUFFER_LOCK(buf);
  1.1047 +
  1.1048 +	chain = buf->first;
  1.1049 +
  1.1050 +	if (datlen >= buf->total_len)
  1.1051 +		datlen = buf->total_len;
  1.1052 +
  1.1053 +	if (datlen == 0)
  1.1054 +		goto done;
  1.1055 +
  1.1056 +	if (buf->freeze_start) {
  1.1057 +		result = -1;
  1.1058 +		goto done;
  1.1059 +	}
  1.1060 +
  1.1061 +	nread = datlen;
  1.1062 +
  1.1063 +	while (datlen && datlen >= chain->off) {
  1.1064 +		memcpy(data, chain->buffer + chain->misalign, chain->off);
  1.1065 +		data += chain->off;
  1.1066 +		datlen -= chain->off;
  1.1067 +
  1.1068 +		chain = chain->next;
  1.1069 +		EVUTIL_ASSERT(chain || datlen==0);
  1.1070 +	}
  1.1071 +
  1.1072 +	if (datlen) {
  1.1073 +		EVUTIL_ASSERT(chain);
  1.1074 +		memcpy(data, chain->buffer + chain->misalign, datlen);
  1.1075 +	}
  1.1076 +
  1.1077 +	result = nread;
  1.1078 +done:
  1.1079 +	EVBUFFER_UNLOCK(buf);
  1.1080 +	return result;
  1.1081 +}
  1.1082 +
  1.1083 +/* reads data from the src buffer to the dst buffer, avoids memcpy as
  1.1084 + * possible. */
  1.1085 +/*  XXXX should return ev_ssize_t */
  1.1086 +int
  1.1087 +evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
  1.1088 +    size_t datlen)
  1.1089 +{
  1.1090 +	/*XXX We should have an option to force this to be zero-copy.*/
  1.1091 +
  1.1092 +	/*XXX can fail badly on sendfile case. */
  1.1093 +	struct evbuffer_chain *chain, *previous;
  1.1094 +	size_t nread = 0;
  1.1095 +	int result;
  1.1096 +
  1.1097 +	EVBUFFER_LOCK2(src, dst);
  1.1098 +
  1.1099 +	chain = previous = src->first;
  1.1100 +
  1.1101 +	if (datlen == 0 || dst == src) {
  1.1102 +		result = 0;
  1.1103 +		goto done;
  1.1104 +	}
  1.1105 +
  1.1106 +	if (dst->freeze_end || src->freeze_start) {
  1.1107 +		result = -1;
  1.1108 +		goto done;
  1.1109 +	}
  1.1110 +
  1.1111 +	/* short-cut if there is no more data buffered */
  1.1112 +	if (datlen >= src->total_len) {
  1.1113 +		datlen = src->total_len;
  1.1114 +		evbuffer_add_buffer(dst, src);
  1.1115 +		result = (int)datlen; /*XXXX should return ev_ssize_t*/
  1.1116 +		goto done;
  1.1117 +	}
  1.1118 +
  1.1119 +	/* removes chains if possible */
  1.1120 +	while (chain->off <= datlen) {
  1.1121 +		/* We can't remove the last with data from src unless we
  1.1122 +		 * remove all chains, in which case we would have done the if
  1.1123 +		 * block above */
  1.1124 +		EVUTIL_ASSERT(chain != *src->last_with_datap);
  1.1125 +		nread += chain->off;
  1.1126 +		datlen -= chain->off;
  1.1127 +		previous = chain;
  1.1128 +		if (src->last_with_datap == &chain->next)
  1.1129 +			src->last_with_datap = &src->first;
  1.1130 +		chain = chain->next;
  1.1131 +	}
  1.1132 +
  1.1133 +	if (nread) {
  1.1134 +		/* we can remove the chain */
  1.1135 +		struct evbuffer_chain **chp;
  1.1136 +		chp = evbuffer_free_trailing_empty_chains(dst);
  1.1137 +
  1.1138 +		if (dst->first == NULL) {
  1.1139 +			dst->first = src->first;
  1.1140 +		} else {
  1.1141 +			*chp = src->first;
  1.1142 +		}
  1.1143 +		dst->last = previous;
  1.1144 +		previous->next = NULL;
  1.1145 +		src->first = chain;
  1.1146 +		advance_last_with_data(dst);
  1.1147 +
  1.1148 +		dst->total_len += nread;
  1.1149 +		dst->n_add_for_cb += nread;
  1.1150 +	}
  1.1151 +
  1.1152 +	/* we know that there is more data in the src buffer than
  1.1153 +	 * we want to read, so we manually drain the chain */
  1.1154 +	evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
  1.1155 +	chain->misalign += datlen;
  1.1156 +	chain->off -= datlen;
  1.1157 +	nread += datlen;
  1.1158 +
  1.1159 +	/* You might think we would want to increment dst->n_add_for_cb
  1.1160 +	 * here too.  But evbuffer_add above already took care of that.
  1.1161 +	 */
  1.1162 +	src->total_len -= nread;
  1.1163 +	src->n_del_for_cb += nread;
  1.1164 +
  1.1165 +	if (nread) {
  1.1166 +		evbuffer_invoke_callbacks(dst);
  1.1167 +		evbuffer_invoke_callbacks(src);
  1.1168 +	}
  1.1169 +	result = (int)nread;/*XXXX should change return type */
  1.1170 +
  1.1171 +done:
  1.1172 +	EVBUFFER_UNLOCK2(src, dst);
  1.1173 +	return result;
  1.1174 +}
  1.1175 +
  1.1176 +unsigned char *
  1.1177 +evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)
  1.1178 +{
  1.1179 +	struct evbuffer_chain *chain, *next, *tmp, *last_with_data;
  1.1180 +	unsigned char *buffer, *result = NULL;
  1.1181 +	ev_ssize_t remaining;
  1.1182 +	int removed_last_with_data = 0;
  1.1183 +	int removed_last_with_datap = 0;
  1.1184 +
  1.1185 +	EVBUFFER_LOCK(buf);
  1.1186 +
  1.1187 +	chain = buf->first;
  1.1188 +
  1.1189 +	if (size < 0)
  1.1190 +		size = buf->total_len;
  1.1191 +	/* if size > buf->total_len, we cannot guarantee to the user that she
  1.1192 +	 * is going to have a long enough buffer afterwards; so we return
  1.1193 +	 * NULL */
  1.1194 +	if (size == 0 || (size_t)size > buf->total_len)
  1.1195 +		goto done;
  1.1196 +
  1.1197 +	/* No need to pull up anything; the first size bytes are
  1.1198 +	 * already here. */
  1.1199 +	if (chain->off >= (size_t)size) {
  1.1200 +		result = chain->buffer + chain->misalign;
  1.1201 +		goto done;
  1.1202 +	}
  1.1203 +
  1.1204 +	/* Make sure that none of the chains we need to copy from is pinned. */
  1.1205 +	remaining = size - chain->off;
  1.1206 +	EVUTIL_ASSERT(remaining >= 0);
  1.1207 +	for (tmp=chain->next; tmp; tmp=tmp->next) {
  1.1208 +		if (CHAIN_PINNED(tmp))
  1.1209 +			goto done;
  1.1210 +		if (tmp->off >= (size_t)remaining)
  1.1211 +			break;
  1.1212 +		remaining -= tmp->off;
  1.1213 +	}
  1.1214 +
  1.1215 +	if (CHAIN_PINNED(chain)) {
  1.1216 +		size_t old_off = chain->off;
  1.1217 +		if (CHAIN_SPACE_LEN(chain) < size - chain->off) {
  1.1218 +			/* not enough room at end of chunk. */
  1.1219 +			goto done;
  1.1220 +		}
  1.1221 +		buffer = CHAIN_SPACE_PTR(chain);
  1.1222 +		tmp = chain;
  1.1223 +		tmp->off = size;
  1.1224 +		size -= old_off;
  1.1225 +		chain = chain->next;
  1.1226 +	} else if (chain->buffer_len - chain->misalign >= (size_t)size) {
  1.1227 +		/* already have enough space in the first chain */
  1.1228 +		size_t old_off = chain->off;
  1.1229 +		buffer = chain->buffer + chain->misalign + chain->off;
  1.1230 +		tmp = chain;
  1.1231 +		tmp->off = size;
  1.1232 +		size -= old_off;
  1.1233 +		chain = chain->next;
  1.1234 +	} else {
  1.1235 +		if ((tmp = evbuffer_chain_new(size)) == NULL) {
  1.1236 +			event_warn("%s: out of memory", __func__);
  1.1237 +			goto done;
  1.1238 +		}
  1.1239 +		buffer = tmp->buffer;
  1.1240 +		tmp->off = size;
  1.1241 +		buf->first = tmp;
  1.1242 +	}
  1.1243 +
  1.1244 +	/* TODO(niels): deal with buffers that point to NULL like sendfile */
  1.1245 +
  1.1246 +	/* Copy and free every chunk that will be entirely pulled into tmp */
  1.1247 +	last_with_data = *buf->last_with_datap;
  1.1248 +	for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
  1.1249 +		next = chain->next;
  1.1250 +
  1.1251 +		memcpy(buffer, chain->buffer + chain->misalign, chain->off);
  1.1252 +		size -= chain->off;
  1.1253 +		buffer += chain->off;
  1.1254 +		if (chain == last_with_data)
  1.1255 +			removed_last_with_data = 1;
  1.1256 +		if (&chain->next == buf->last_with_datap)
  1.1257 +			removed_last_with_datap = 1;
  1.1258 +
  1.1259 +		evbuffer_chain_free(chain);
  1.1260 +	}
  1.1261 +
  1.1262 +	if (chain != NULL) {
  1.1263 +		memcpy(buffer, chain->buffer + chain->misalign, size);
  1.1264 +		chain->misalign += size;
  1.1265 +		chain->off -= size;
  1.1266 +	} else {
  1.1267 +		buf->last = tmp;
  1.1268 +	}
  1.1269 +
  1.1270 +	tmp->next = chain;
  1.1271 +
  1.1272 +	if (removed_last_with_data) {
  1.1273 +		buf->last_with_datap = &buf->first;
  1.1274 +	} else if (removed_last_with_datap) {
  1.1275 +		if (buf->first->next && buf->first->next->off)
  1.1276 +			buf->last_with_datap = &buf->first->next;
  1.1277 +		else
  1.1278 +			buf->last_with_datap = &buf->first;
  1.1279 +	}
  1.1280 +
  1.1281 +	result = (tmp->buffer + tmp->misalign);
  1.1282 +
  1.1283 +done:
  1.1284 +	EVBUFFER_UNLOCK(buf);
  1.1285 +	return result;
  1.1286 +}
  1.1287 +
  1.1288 +/*
  1.1289 + * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
  1.1290 + * The returned buffer needs to be freed by the called.
  1.1291 + */
  1.1292 +char *
  1.1293 +evbuffer_readline(struct evbuffer *buffer)
  1.1294 +{
  1.1295 +	return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
  1.1296 +}
  1.1297 +
  1.1298 +static inline ev_ssize_t
  1.1299 +evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
  1.1300 +{
  1.1301 +	struct evbuffer_chain *chain = it->_internal.chain;
  1.1302 +	size_t i = it->_internal.pos_in_chain;
  1.1303 +	while (chain != NULL) {
  1.1304 +		char *buffer = (char *)chain->buffer + chain->misalign;
  1.1305 +		char *cp = memchr(buffer+i, chr, chain->off-i);
  1.1306 +		if (cp) {
  1.1307 +			it->_internal.chain = chain;
  1.1308 +			it->_internal.pos_in_chain = cp - buffer;
  1.1309 +			it->pos += (cp - buffer - i);
  1.1310 +			return it->pos;
  1.1311 +		}
  1.1312 +		it->pos += chain->off - i;
  1.1313 +		i = 0;
  1.1314 +		chain = chain->next;
  1.1315 +	}
  1.1316 +
  1.1317 +	return (-1);
  1.1318 +}
  1.1319 +
  1.1320 +static inline char *
  1.1321 +find_eol_char(char *s, size_t len)
  1.1322 +{
  1.1323 +#define CHUNK_SZ 128
  1.1324 +	/* Lots of benchmarking found this approach to be faster in practice
  1.1325 +	 * than doing two memchrs over the whole buffer, doin a memchr on each
  1.1326 +	 * char of the buffer, or trying to emulate memchr by hand. */
  1.1327 +	char *s_end, *cr, *lf;
  1.1328 +	s_end = s+len;
  1.1329 +	while (s < s_end) {
  1.1330 +		size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
  1.1331 +		cr = memchr(s, '\r', chunk);
  1.1332 +		lf = memchr(s, '\n', chunk);
  1.1333 +		if (cr) {
  1.1334 +			if (lf && lf < cr)
  1.1335 +				return lf;
  1.1336 +			return cr;
  1.1337 +		} else if (lf) {
  1.1338 +			return lf;
  1.1339 +		}
  1.1340 +		s += CHUNK_SZ;
  1.1341 +	}
  1.1342 +
  1.1343 +	return NULL;
  1.1344 +#undef CHUNK_SZ
  1.1345 +}
  1.1346 +
  1.1347 +static ev_ssize_t
  1.1348 +evbuffer_find_eol_char(struct evbuffer_ptr *it)
  1.1349 +{
  1.1350 +	struct evbuffer_chain *chain = it->_internal.chain;
  1.1351 +	size_t i = it->_internal.pos_in_chain;
  1.1352 +	while (chain != NULL) {
  1.1353 +		char *buffer = (char *)chain->buffer + chain->misalign;
  1.1354 +		char *cp = find_eol_char(buffer+i, chain->off-i);
  1.1355 +		if (cp) {
  1.1356 +			it->_internal.chain = chain;
  1.1357 +			it->_internal.pos_in_chain = cp - buffer;
  1.1358 +			it->pos += (cp - buffer) - i;
  1.1359 +			return it->pos;
  1.1360 +		}
  1.1361 +		it->pos += chain->off - i;
  1.1362 +		i = 0;
  1.1363 +		chain = chain->next;
  1.1364 +	}
  1.1365 +
  1.1366 +	return (-1);
  1.1367 +}
  1.1368 +
  1.1369 +static inline int
  1.1370 +evbuffer_strspn(
  1.1371 +	struct evbuffer_ptr *ptr, const char *chrset)
  1.1372 +{
  1.1373 +	int count = 0;
  1.1374 +	struct evbuffer_chain *chain = ptr->_internal.chain;
  1.1375 +	size_t i = ptr->_internal.pos_in_chain;
  1.1376 +
  1.1377 +	if (!chain)
  1.1378 +		return -1;
  1.1379 +
  1.1380 +	while (1) {
  1.1381 +		char *buffer = (char *)chain->buffer + chain->misalign;
  1.1382 +		for (; i < chain->off; ++i) {
  1.1383 +			const char *p = chrset;
  1.1384 +			while (*p) {
  1.1385 +				if (buffer[i] == *p++)
  1.1386 +					goto next;
  1.1387 +			}
  1.1388 +			ptr->_internal.chain = chain;
  1.1389 +			ptr->_internal.pos_in_chain = i;
  1.1390 +			ptr->pos += count;
  1.1391 +			return count;
  1.1392 +		next:
  1.1393 +			++count;
  1.1394 +		}
  1.1395 +		i = 0;
  1.1396 +
  1.1397 +		if (! chain->next) {
  1.1398 +			ptr->_internal.chain = chain;
  1.1399 +			ptr->_internal.pos_in_chain = i;
  1.1400 +			ptr->pos += count;
  1.1401 +			return count;
  1.1402 +		}
  1.1403 +
  1.1404 +		chain = chain->next;
  1.1405 +	}
  1.1406 +}
  1.1407 +
  1.1408 +
  1.1409 +static inline char
  1.1410 +evbuffer_getchr(struct evbuffer_ptr *it)
  1.1411 +{
  1.1412 +	struct evbuffer_chain *chain = it->_internal.chain;
  1.1413 +	size_t off = it->_internal.pos_in_chain;
  1.1414 +
  1.1415 +	return chain->buffer[chain->misalign + off];
  1.1416 +}
  1.1417 +
  1.1418 +struct evbuffer_ptr
  1.1419 +evbuffer_search_eol(struct evbuffer *buffer,
  1.1420 +    struct evbuffer_ptr *start, size_t *eol_len_out,
  1.1421 +    enum evbuffer_eol_style eol_style)
  1.1422 +{
  1.1423 +	struct evbuffer_ptr it, it2;
  1.1424 +	size_t extra_drain = 0;
  1.1425 +	int ok = 0;
  1.1426 +
  1.1427 +	EVBUFFER_LOCK(buffer);
  1.1428 +
  1.1429 +	if (start) {
  1.1430 +		memcpy(&it, start, sizeof(it));
  1.1431 +	} else {
  1.1432 +		it.pos = 0;
  1.1433 +		it._internal.chain = buffer->first;
  1.1434 +		it._internal.pos_in_chain = 0;
  1.1435 +	}
  1.1436 +
  1.1437 +	/* the eol_style determines our first stop character and how many
  1.1438 +	 * characters we are going to drain afterwards. */
  1.1439 +	switch (eol_style) {
  1.1440 +	case EVBUFFER_EOL_ANY:
  1.1441 +		if (evbuffer_find_eol_char(&it) < 0)
  1.1442 +			goto done;
  1.1443 +		memcpy(&it2, &it, sizeof(it));
  1.1444 +		extra_drain = evbuffer_strspn(&it2, "\r\n");
  1.1445 +		break;
  1.1446 +	case EVBUFFER_EOL_CRLF_STRICT: {
  1.1447 +		it = evbuffer_search(buffer, "\r\n", 2, &it);
  1.1448 +		if (it.pos < 0)
  1.1449 +			goto done;
  1.1450 +		extra_drain = 2;
  1.1451 +		break;
  1.1452 +	}
  1.1453 +	case EVBUFFER_EOL_CRLF:
  1.1454 +		while (1) {
  1.1455 +			if (evbuffer_find_eol_char(&it) < 0)
  1.1456 +				goto done;
  1.1457 +			if (evbuffer_getchr(&it) == '\n') {
  1.1458 +				extra_drain = 1;
  1.1459 +				break;
  1.1460 +			} else if (!evbuffer_ptr_memcmp(
  1.1461 +				    buffer, &it, "\r\n", 2)) {
  1.1462 +				extra_drain = 2;
  1.1463 +				break;
  1.1464 +			} else {
  1.1465 +				if (evbuffer_ptr_set(buffer, &it, 1,
  1.1466 +					EVBUFFER_PTR_ADD)<0)
  1.1467 +					goto done;
  1.1468 +			}
  1.1469 +		}
  1.1470 +		break;
  1.1471 +	case EVBUFFER_EOL_LF:
  1.1472 +		if (evbuffer_strchr(&it, '\n') < 0)
  1.1473 +			goto done;
  1.1474 +		extra_drain = 1;
  1.1475 +		break;
  1.1476 +	default:
  1.1477 +		goto done;
  1.1478 +	}
  1.1479 +
  1.1480 +	ok = 1;
  1.1481 +done:
  1.1482 +	EVBUFFER_UNLOCK(buffer);
  1.1483 +
  1.1484 +	if (!ok) {
  1.1485 +		it.pos = -1;
  1.1486 +	}
  1.1487 +	if (eol_len_out)
  1.1488 +		*eol_len_out = extra_drain;
  1.1489 +
  1.1490 +	return it;
  1.1491 +}
  1.1492 +
  1.1493 +char *
  1.1494 +evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
  1.1495 +		enum evbuffer_eol_style eol_style)
  1.1496 +{
  1.1497 +	struct evbuffer_ptr it;
  1.1498 +	char *line;
  1.1499 +	size_t n_to_copy=0, extra_drain=0;
  1.1500 +	char *result = NULL;
  1.1501 +
  1.1502 +	EVBUFFER_LOCK(buffer);
  1.1503 +
  1.1504 +	if (buffer->freeze_start) {
  1.1505 +		goto done;
  1.1506 +	}
  1.1507 +
  1.1508 +	it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);
  1.1509 +	if (it.pos < 0)
  1.1510 +		goto done;
  1.1511 +	n_to_copy = it.pos;
  1.1512 +
  1.1513 +	if ((line = mm_malloc(n_to_copy+1)) == NULL) {
  1.1514 +		event_warn("%s: out of memory", __func__);
  1.1515 +		goto done;
  1.1516 +	}
  1.1517 +
  1.1518 +	evbuffer_remove(buffer, line, n_to_copy);
  1.1519 +	line[n_to_copy] = '\0';
  1.1520 +
  1.1521 +	evbuffer_drain(buffer, extra_drain);
  1.1522 +	result = line;
  1.1523 +done:
  1.1524 +	EVBUFFER_UNLOCK(buffer);
  1.1525 +
  1.1526 +	if (n_read_out)
  1.1527 +		*n_read_out = result ? n_to_copy : 0;
  1.1528 +
  1.1529 +	return result;
  1.1530 +}
  1.1531 +
  1.1532 +#define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
  1.1533 +
  1.1534 +/* Adds data to an event buffer */
  1.1535 +
  1.1536 +int
  1.1537 +evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)
  1.1538 +{
  1.1539 +	struct evbuffer_chain *chain, *tmp;
  1.1540 +	const unsigned char *data = data_in;
  1.1541 +	size_t remain, to_alloc;
  1.1542 +	int result = -1;
  1.1543 +
  1.1544 +	EVBUFFER_LOCK(buf);
  1.1545 +
  1.1546 +	if (buf->freeze_end) {
  1.1547 +		goto done;
  1.1548 +	}
  1.1549 +
  1.1550 +	chain = buf->last;
  1.1551 +
  1.1552 +	/* If there are no chains allocated for this buffer, allocate one
  1.1553 +	 * big enough to hold all the data. */
  1.1554 +	if (chain == NULL) {
  1.1555 +		chain = evbuffer_chain_new(datlen);
  1.1556 +		if (!chain)
  1.1557 +			goto done;
  1.1558 +		evbuffer_chain_insert(buf, chain);
  1.1559 +	}
  1.1560 +
  1.1561 +	if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
  1.1562 +		remain = (size_t)(chain->buffer_len - chain->misalign - chain->off);
  1.1563 +		if (remain >= datlen) {
  1.1564 +			/* there's enough space to hold all the data in the
  1.1565 +			 * current last chain */
  1.1566 +			memcpy(chain->buffer + chain->misalign + chain->off,
  1.1567 +			    data, datlen);
  1.1568 +			chain->off += datlen;
  1.1569 +			buf->total_len += datlen;
  1.1570 +			buf->n_add_for_cb += datlen;
  1.1571 +			goto out;
  1.1572 +		} else if (!CHAIN_PINNED(chain) &&
  1.1573 +		    evbuffer_chain_should_realign(chain, datlen)) {
  1.1574 +			/* we can fit the data into the misalignment */
  1.1575 +			evbuffer_chain_align(chain);
  1.1576 +
  1.1577 +			memcpy(chain->buffer + chain->off, data, datlen);
  1.1578 +			chain->off += datlen;
  1.1579 +			buf->total_len += datlen;
  1.1580 +			buf->n_add_for_cb += datlen;
  1.1581 +			goto out;
  1.1582 +		}
  1.1583 +	} else {
  1.1584 +		/* we cannot write any data to the last chain */
  1.1585 +		remain = 0;
  1.1586 +	}
  1.1587 +
  1.1588 +	/* we need to add another chain */
  1.1589 +	to_alloc = chain->buffer_len;
  1.1590 +	if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
  1.1591 +		to_alloc <<= 1;
  1.1592 +	if (datlen > to_alloc)
  1.1593 +		to_alloc = datlen;
  1.1594 +	tmp = evbuffer_chain_new(to_alloc);
  1.1595 +	if (tmp == NULL)
  1.1596 +		goto done;
  1.1597 +
  1.1598 +	if (remain) {
  1.1599 +		memcpy(chain->buffer + chain->misalign + chain->off,
  1.1600 +		    data, remain);
  1.1601 +		chain->off += remain;
  1.1602 +		buf->total_len += remain;
  1.1603 +		buf->n_add_for_cb += remain;
  1.1604 +	}
  1.1605 +
  1.1606 +	data += remain;
  1.1607 +	datlen -= remain;
  1.1608 +
  1.1609 +	memcpy(tmp->buffer, data, datlen);
  1.1610 +	tmp->off = datlen;
  1.1611 +	evbuffer_chain_insert(buf, tmp);
  1.1612 +	buf->n_add_for_cb += datlen;
  1.1613 +
  1.1614 +out:
  1.1615 +	evbuffer_invoke_callbacks(buf);
  1.1616 +	result = 0;
  1.1617 +done:
  1.1618 +	EVBUFFER_UNLOCK(buf);
  1.1619 +	return result;
  1.1620 +}
  1.1621 +
  1.1622 +int
  1.1623 +evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
  1.1624 +{
  1.1625 +	struct evbuffer_chain *chain, *tmp;
  1.1626 +	int result = -1;
  1.1627 +
  1.1628 +	EVBUFFER_LOCK(buf);
  1.1629 +
  1.1630 +	if (buf->freeze_start) {
  1.1631 +		goto done;
  1.1632 +	}
  1.1633 +
  1.1634 +	chain = buf->first;
  1.1635 +
  1.1636 +	if (chain == NULL) {
  1.1637 +		chain = evbuffer_chain_new(datlen);
  1.1638 +		if (!chain)
  1.1639 +			goto done;
  1.1640 +		evbuffer_chain_insert(buf, chain);
  1.1641 +	}
  1.1642 +
  1.1643 +	/* we cannot touch immutable buffers */
  1.1644 +	if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
  1.1645 +		/* If this chain is empty, we can treat it as
  1.1646 +		 * 'empty at the beginning' rather than 'empty at the end' */
  1.1647 +		if (chain->off == 0)
  1.1648 +			chain->misalign = chain->buffer_len;
  1.1649 +
  1.1650 +		if ((size_t)chain->misalign >= datlen) {
  1.1651 +			/* we have enough space to fit everything */
  1.1652 +			memcpy(chain->buffer + chain->misalign - datlen,
  1.1653 +			    data, datlen);
  1.1654 +			chain->off += datlen;
  1.1655 +			chain->misalign -= datlen;
  1.1656 +			buf->total_len += datlen;
  1.1657 +			buf->n_add_for_cb += datlen;
  1.1658 +			goto out;
  1.1659 +		} else if (chain->misalign) {
  1.1660 +			/* we can only fit some of the data. */
  1.1661 +			memcpy(chain->buffer,
  1.1662 +			    (char*)data + datlen - chain->misalign,
  1.1663 +			    (size_t)chain->misalign);
  1.1664 +			chain->off += (size_t)chain->misalign;
  1.1665 +			buf->total_len += (size_t)chain->misalign;
  1.1666 +			buf->n_add_for_cb += (size_t)chain->misalign;
  1.1667 +			datlen -= (size_t)chain->misalign;
  1.1668 +			chain->misalign = 0;
  1.1669 +		}
  1.1670 +	}
  1.1671 +
  1.1672 +	/* we need to add another chain */
  1.1673 +	if ((tmp = evbuffer_chain_new(datlen)) == NULL)
  1.1674 +		goto done;
  1.1675 +	buf->first = tmp;
  1.1676 +	if (buf->last_with_datap == &buf->first)
  1.1677 +		buf->last_with_datap = &tmp->next;
  1.1678 +
  1.1679 +	tmp->next = chain;
  1.1680 +
  1.1681 +	tmp->off = datlen;
  1.1682 +	tmp->misalign = tmp->buffer_len - datlen;
  1.1683 +
  1.1684 +	memcpy(tmp->buffer + tmp->misalign, data, datlen);
  1.1685 +	buf->total_len += datlen;
  1.1686 +	buf->n_add_for_cb += (size_t)chain->misalign;
  1.1687 +
  1.1688 +out:
  1.1689 +	evbuffer_invoke_callbacks(buf);
  1.1690 +	result = 0;
  1.1691 +done:
  1.1692 +	EVBUFFER_UNLOCK(buf);
  1.1693 +	return result;
  1.1694 +}
  1.1695 +
  1.1696 +/** Helper: realigns the memory in chain->buffer so that misalign is 0. */
  1.1697 +static void
  1.1698 +evbuffer_chain_align(struct evbuffer_chain *chain)
  1.1699 +{
  1.1700 +	EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));
  1.1701 +	EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));
  1.1702 +	memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);
  1.1703 +	chain->misalign = 0;
  1.1704 +}
  1.1705 +
  1.1706 +#define MAX_TO_COPY_IN_EXPAND 4096
  1.1707 +#define MAX_TO_REALIGN_IN_EXPAND 2048
  1.1708 +
  1.1709 +/** Helper: return true iff we should realign chain to fit datalen bytes of
  1.1710 +    data in it. */
  1.1711 +static int
  1.1712 +evbuffer_chain_should_realign(struct evbuffer_chain *chain,
  1.1713 +    size_t datlen)
  1.1714 +{
  1.1715 +	return chain->buffer_len - chain->off >= datlen &&
  1.1716 +	    (chain->off < chain->buffer_len / 2) &&
  1.1717 +	    (chain->off <= MAX_TO_REALIGN_IN_EXPAND);
  1.1718 +}
  1.1719 +
  1.1720 +/* Expands the available space in the event buffer to at least datlen, all in
  1.1721 + * a single chunk.  Return that chunk. */
  1.1722 +static struct evbuffer_chain *
  1.1723 +evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
  1.1724 +{
  1.1725 +	struct evbuffer_chain *chain, **chainp;
  1.1726 +	struct evbuffer_chain *result = NULL;
  1.1727 +	ASSERT_EVBUFFER_LOCKED(buf);
  1.1728 +
  1.1729 +	chainp = buf->last_with_datap;
  1.1730 +
  1.1731 +	/* XXX If *chainp is no longer writeable, but has enough space in its
  1.1732 +	 * misalign, this might be a bad idea: we could still use *chainp, not
  1.1733 +	 * (*chainp)->next. */
  1.1734 +	if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)
  1.1735 +		chainp = &(*chainp)->next;
  1.1736 +
  1.1737 +	/* 'chain' now points to the first chain with writable space (if any)
  1.1738 +	 * We will either use it, realign it, replace it, or resize it. */
  1.1739 +	chain = *chainp;
  1.1740 +
  1.1741 +	if (chain == NULL ||
  1.1742 +	    (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
  1.1743 +		/* We can't use the last_with_data chain at all.  Just add a
  1.1744 +		 * new one that's big enough. */
  1.1745 +		goto insert_new;
  1.1746 +	}
  1.1747 +
  1.1748 +	/* If we can fit all the data, then we don't have to do anything */
  1.1749 +	if (CHAIN_SPACE_LEN(chain) >= datlen) {
  1.1750 +		result = chain;
  1.1751 +		goto ok;
  1.1752 +	}
  1.1753 +
  1.1754 +	/* If the chain is completely empty, just replace it by adding a new
  1.1755 +	 * empty chain. */
  1.1756 +	if (chain->off == 0) {
  1.1757 +		goto insert_new;
  1.1758 +	}
  1.1759 +
  1.1760 +	/* If the misalignment plus the remaining space fulfills our data
  1.1761 +	 * needs, we could just force an alignment to happen.  Afterwards, we
  1.1762 +	 * have enough space.  But only do this if we're saving a lot of space
  1.1763 +	 * and not moving too much data.  Otherwise the space savings are
  1.1764 +	 * probably offset by the time lost in copying.
  1.1765 +	 */
  1.1766 +	if (evbuffer_chain_should_realign(chain, datlen)) {
  1.1767 +		evbuffer_chain_align(chain);
  1.1768 +		result = chain;
  1.1769 +		goto ok;
  1.1770 +	}
  1.1771 +
  1.1772 +	/* At this point, we can either resize the last chunk with space in
  1.1773 +	 * it, use the next chunk after it, or   If we add a new chunk, we waste
  1.1774 +	 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk.  If we
  1.1775 +	 * resize, we have to copy chain->off bytes.
  1.1776 +	 */
  1.1777 +
  1.1778 +	/* Would expanding this chunk be affordable and worthwhile? */
  1.1779 +	if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||
  1.1780 +	    chain->off > MAX_TO_COPY_IN_EXPAND) {
  1.1781 +		/* It's not worth resizing this chain. Can the next one be
  1.1782 +		 * used? */
  1.1783 +		if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) {
  1.1784 +			/* Yes, we can just use the next chain (which should
  1.1785 +			 * be empty. */
  1.1786 +			result = chain->next;
  1.1787 +			goto ok;
  1.1788 +		} else {
  1.1789 +			/* No; append a new chain (which will free all
  1.1790 +			 * terminal empty chains.) */
  1.1791 +			goto insert_new;
  1.1792 +		}
  1.1793 +	} else {
  1.1794 +		/* Okay, we're going to try to resize this chain: Not doing so
  1.1795 +		 * would waste at least 1/8 of its current allocation, and we
  1.1796 +		 * can do so without having to copy more than
  1.1797 +		 * MAX_TO_COPY_IN_EXPAND bytes. */
  1.1798 +		/* figure out how much space we need */
  1.1799 +		size_t length = chain->off + datlen;
  1.1800 +		struct evbuffer_chain *tmp = evbuffer_chain_new(length);
  1.1801 +		if (tmp == NULL)
  1.1802 +			goto err;
  1.1803 +
  1.1804 +		/* copy the data over that we had so far */
  1.1805 +		tmp->off = chain->off;
  1.1806 +		memcpy(tmp->buffer, chain->buffer + chain->misalign,
  1.1807 +		    chain->off);
  1.1808 +		/* fix up the list */
  1.1809 +		EVUTIL_ASSERT(*chainp == chain);
  1.1810 +		result = *chainp = tmp;
  1.1811 +
  1.1812 +		if (buf->last == chain)
  1.1813 +			buf->last = tmp;
  1.1814 +
  1.1815 +		tmp->next = chain->next;
  1.1816 +		evbuffer_chain_free(chain);
  1.1817 +		goto ok;
  1.1818 +	}
  1.1819 +
  1.1820 +insert_new:
  1.1821 +	result = evbuffer_chain_insert_new(buf, datlen);
  1.1822 +	if (!result)
  1.1823 +		goto err;
  1.1824 +ok:
  1.1825 +	EVUTIL_ASSERT(result);
  1.1826 +	EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);
  1.1827 +err:
  1.1828 +	return result;
  1.1829 +}
  1.1830 +
  1.1831 +/* Make sure that datlen bytes are available for writing in the last n
  1.1832 + * chains.  Never copies or moves data. */
  1.1833 +int
  1.1834 +_evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n)
  1.1835 +{
  1.1836 +	struct evbuffer_chain *chain = buf->last, *tmp, *next;
  1.1837 +	size_t avail;
  1.1838 +	int used;
  1.1839 +
  1.1840 +	ASSERT_EVBUFFER_LOCKED(buf);
  1.1841 +	EVUTIL_ASSERT(n >= 2);
  1.1842 +
  1.1843 +	if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
  1.1844 +		/* There is no last chunk, or we can't touch the last chunk.
  1.1845 +		 * Just add a new chunk. */
  1.1846 +		chain = evbuffer_chain_new(datlen);
  1.1847 +		if (chain == NULL)
  1.1848 +			return (-1);
  1.1849 +
  1.1850 +		evbuffer_chain_insert(buf, chain);
  1.1851 +		return (0);
  1.1852 +	}
  1.1853 +
  1.1854 +	used = 0; /* number of chains we're using space in. */
  1.1855 +	avail = 0; /* how much space they have. */
  1.1856 +	/* How many bytes can we stick at the end of buffer as it is?  Iterate
  1.1857 +	 * over the chains at the end of the buffer, tring to see how much
  1.1858 +	 * space we have in the first n. */
  1.1859 +	for (chain = *buf->last_with_datap; chain; chain = chain->next) {
  1.1860 +		if (chain->off) {
  1.1861 +			size_t space = (size_t) CHAIN_SPACE_LEN(chain);
  1.1862 +			EVUTIL_ASSERT(chain == *buf->last_with_datap);
  1.1863 +			if (space) {
  1.1864 +				avail += space;
  1.1865 +				++used;
  1.1866 +			}
  1.1867 +		} else {
  1.1868 +			/* No data in chain; realign it. */
  1.1869 +			chain->misalign = 0;
  1.1870 +			avail += chain->buffer_len;
  1.1871 +			++used;
  1.1872 +		}
  1.1873 +		if (avail >= datlen) {
  1.1874 +			/* There is already enough space.  Just return */
  1.1875 +			return (0);
  1.1876 +		}
  1.1877 +		if (used == n)
  1.1878 +			break;
  1.1879 +	}
  1.1880 +
  1.1881 +	/* There wasn't enough space in the first n chains with space in
  1.1882 +	 * them. Either add a new chain with enough space, or replace all
  1.1883 +	 * empty chains with one that has enough space, depending on n. */
  1.1884 +	if (used < n) {
  1.1885 +		/* The loop ran off the end of the chains before it hit n
  1.1886 +		 * chains; we can add another. */
  1.1887 +		EVUTIL_ASSERT(chain == NULL);
  1.1888 +
  1.1889 +		tmp = evbuffer_chain_new(datlen - avail);
  1.1890 +		if (tmp == NULL)
  1.1891 +			return (-1);
  1.1892 +
  1.1893 +		buf->last->next = tmp;
  1.1894 +		buf->last = tmp;
  1.1895 +		/* (we would only set last_with_data if we added the first
  1.1896 +		 * chain. But if the buffer had no chains, we would have
  1.1897 +		 * just allocated a new chain earlier) */
  1.1898 +		return (0);
  1.1899 +	} else {
  1.1900 +		/* Nuke _all_ the empty chains. */
  1.1901 +		int rmv_all = 0; /* True iff we removed last_with_data. */
  1.1902 +		chain = *buf->last_with_datap;
  1.1903 +		if (!chain->off) {
  1.1904 +			EVUTIL_ASSERT(chain == buf->first);
  1.1905 +			rmv_all = 1;
  1.1906 +			avail = 0;
  1.1907 +		} else {
  1.1908 +			avail = (size_t) CHAIN_SPACE_LEN(chain);
  1.1909 +			chain = chain->next;
  1.1910 +		}
  1.1911 +
  1.1912 +
  1.1913 +		for (; chain; chain = next) {
  1.1914 +			next = chain->next;
  1.1915 +			EVUTIL_ASSERT(chain->off == 0);
  1.1916 +			evbuffer_chain_free(chain);
  1.1917 +		}
  1.1918 +		tmp = evbuffer_chain_new(datlen - avail);
  1.1919 +		if (tmp == NULL) {
  1.1920 +			if (rmv_all) {
  1.1921 +				ZERO_CHAIN(buf);
  1.1922 +			} else {
  1.1923 +				buf->last = *buf->last_with_datap;
  1.1924 +				(*buf->last_with_datap)->next = NULL;
  1.1925 +			}
  1.1926 +			return (-1);
  1.1927 +		}
  1.1928 +
  1.1929 +		if (rmv_all) {
  1.1930 +			buf->first = buf->last = tmp;
  1.1931 +			buf->last_with_datap = &buf->first;
  1.1932 +		} else {
  1.1933 +			(*buf->last_with_datap)->next = tmp;
  1.1934 +			buf->last = tmp;
  1.1935 +		}
  1.1936 +		return (0);
  1.1937 +	}
  1.1938 +}
  1.1939 +
  1.1940 +int
  1.1941 +evbuffer_expand(struct evbuffer *buf, size_t datlen)
  1.1942 +{
  1.1943 +	struct evbuffer_chain *chain;
  1.1944 +
  1.1945 +	EVBUFFER_LOCK(buf);
  1.1946 +	chain = evbuffer_expand_singlechain(buf, datlen);
  1.1947 +	EVBUFFER_UNLOCK(buf);
  1.1948 +	return chain ? 0 : -1;
  1.1949 +}
  1.1950 +
  1.1951 +/*
  1.1952 + * Reads data from a file descriptor into a buffer.
  1.1953 + */
  1.1954 +
  1.1955 +#if defined(_EVENT_HAVE_SYS_UIO_H) || defined(WIN32)
  1.1956 +#define USE_IOVEC_IMPL
  1.1957 +#endif
  1.1958 +
  1.1959 +#ifdef USE_IOVEC_IMPL
  1.1960 +
  1.1961 +#ifdef _EVENT_HAVE_SYS_UIO_H
  1.1962 +/* number of iovec we use for writev, fragmentation is going to determine
  1.1963 + * how much we end up writing */
  1.1964 +
  1.1965 +#define DEFAULT_WRITE_IOVEC 128
  1.1966 +
  1.1967 +#if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
  1.1968 +#define NUM_WRITE_IOVEC UIO_MAXIOV
  1.1969 +#elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
  1.1970 +#define NUM_WRITE_IOVEC IOV_MAX
  1.1971 +#else
  1.1972 +#define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
  1.1973 +#endif
  1.1974 +
  1.1975 +#define IOV_TYPE struct iovec
  1.1976 +#define IOV_PTR_FIELD iov_base
  1.1977 +#define IOV_LEN_FIELD iov_len
  1.1978 +#define IOV_LEN_TYPE size_t
  1.1979 +#else
  1.1980 +#define NUM_WRITE_IOVEC 16
  1.1981 +#define IOV_TYPE WSABUF
  1.1982 +#define IOV_PTR_FIELD buf
  1.1983 +#define IOV_LEN_FIELD len
  1.1984 +#define IOV_LEN_TYPE unsigned long
  1.1985 +#endif
  1.1986 +#endif
  1.1987 +#define NUM_READ_IOVEC 4
  1.1988 +
  1.1989 +#define EVBUFFER_MAX_READ	4096
  1.1990 +
  1.1991 +/** Helper function to figure out which space to use for reading data into
  1.1992 +    an evbuffer.  Internal use only.
  1.1993 +
  1.1994 +    @param buf The buffer to read into
  1.1995 +    @param howmuch How much we want to read.
  1.1996 +    @param vecs An array of two or more iovecs or WSABUFs.
  1.1997 +    @param n_vecs_avail The length of vecs
  1.1998 +    @param chainp A pointer to a variable to hold the first chain we're
  1.1999 +      reading into.
  1.2000 +    @param exact Boolean: if true, we do not provide more than 'howmuch'
  1.2001 +      space in the vectors, even if more space is available.
  1.2002 +    @return The number of buffers we're using.
  1.2003 + */
  1.2004 +int
  1.2005 +_evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
  1.2006 +    struct evbuffer_iovec *vecs, int n_vecs_avail,
  1.2007 +    struct evbuffer_chain ***chainp, int exact)
  1.2008 +{
  1.2009 +	struct evbuffer_chain *chain;
  1.2010 +	struct evbuffer_chain **firstchainp;
  1.2011 +	size_t so_far;
  1.2012 +	int i;
  1.2013 +	ASSERT_EVBUFFER_LOCKED(buf);
  1.2014 +
  1.2015 +	if (howmuch < 0)
  1.2016 +		return -1;
  1.2017 +
  1.2018 +	so_far = 0;
  1.2019 +	/* Let firstchain be the first chain with any space on it */
  1.2020 +	firstchainp = buf->last_with_datap;
  1.2021 +	if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
  1.2022 +		firstchainp = &(*firstchainp)->next;
  1.2023 +	}
  1.2024 +
  1.2025 +	chain = *firstchainp;
  1.2026 +	for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) {
  1.2027 +		size_t avail = (size_t) CHAIN_SPACE_LEN(chain);
  1.2028 +		if (avail > (howmuch - so_far) && exact)
  1.2029 +			avail = howmuch - so_far;
  1.2030 +		vecs[i].iov_base = CHAIN_SPACE_PTR(chain);
  1.2031 +		vecs[i].iov_len = avail;
  1.2032 +		so_far += avail;
  1.2033 +		chain = chain->next;
  1.2034 +	}
  1.2035 +
  1.2036 +	*chainp = firstchainp;
  1.2037 +	return i;
  1.2038 +}
  1.2039 +
  1.2040 +static int
  1.2041 +get_n_bytes_readable_on_socket(evutil_socket_t fd)
  1.2042 +{
  1.2043 +#if defined(FIONREAD) && defined(WIN32)
  1.2044 +	unsigned long lng = EVBUFFER_MAX_READ;
  1.2045 +	if (ioctlsocket(fd, FIONREAD, &lng) < 0)
  1.2046 +		return -1;
  1.2047 +	return (int)lng;
  1.2048 +#elif defined(FIONREAD)
  1.2049 +	int n = EVBUFFER_MAX_READ;
  1.2050 +	if (ioctl(fd, FIONREAD, &n) < 0)
  1.2051 +		return -1;
  1.2052 +	return n;
  1.2053 +#else
  1.2054 +	return EVBUFFER_MAX_READ;
  1.2055 +#endif
  1.2056 +}
  1.2057 +
  1.2058 +/* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
  1.2059 + * as howmuch? */
  1.2060 +int
  1.2061 +evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
  1.2062 +{
  1.2063 +	struct evbuffer_chain **chainp;
  1.2064 +	int n;
  1.2065 +	int result;
  1.2066 +
  1.2067 +#ifdef USE_IOVEC_IMPL
  1.2068 +	int nvecs, i, remaining;
  1.2069 +#else
  1.2070 +	struct evbuffer_chain *chain;
  1.2071 +	unsigned char *p;
  1.2072 +#endif
  1.2073 +
  1.2074 +	EVBUFFER_LOCK(buf);
  1.2075 +
  1.2076 +	if (buf->freeze_end) {
  1.2077 +		result = -1;
  1.2078 +		goto done;
  1.2079 +	}
  1.2080 +
  1.2081 +	n = get_n_bytes_readable_on_socket(fd);
  1.2082 +	if (n <= 0 || n > EVBUFFER_MAX_READ)
  1.2083 +		n = EVBUFFER_MAX_READ;
  1.2084 +	if (howmuch < 0 || howmuch > n)
  1.2085 +		howmuch = n;
  1.2086 +
  1.2087 +#ifdef USE_IOVEC_IMPL
  1.2088 +	/* Since we can use iovecs, we're willing to use the last
  1.2089 +	 * NUM_READ_IOVEC chains. */
  1.2090 +	if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) {
  1.2091 +		result = -1;
  1.2092 +		goto done;
  1.2093 +	} else {
  1.2094 +		IOV_TYPE vecs[NUM_READ_IOVEC];
  1.2095 +#ifdef _EVBUFFER_IOVEC_IS_NATIVE
  1.2096 +		nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs,
  1.2097 +		    NUM_READ_IOVEC, &chainp, 1);
  1.2098 +#else
  1.2099 +		/* We aren't using the native struct iovec.  Therefore,
  1.2100 +		   we are on win32. */
  1.2101 +		struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
  1.2102 +		nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2,
  1.2103 +		    &chainp, 1);
  1.2104 +
  1.2105 +		for (i=0; i < nvecs; ++i)
  1.2106 +			WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
  1.2107 +#endif
  1.2108 +
  1.2109 +#ifdef WIN32
  1.2110 +		{
  1.2111 +			DWORD bytesRead;
  1.2112 +			DWORD flags=0;
  1.2113 +			if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
  1.2114 +				/* The read failed. It might be a close,
  1.2115 +				 * or it might be an error. */
  1.2116 +				if (WSAGetLastError() == WSAECONNABORTED)
  1.2117 +					n = 0;
  1.2118 +				else
  1.2119 +					n = -1;
  1.2120 +			} else
  1.2121 +				n = bytesRead;
  1.2122 +		}
  1.2123 +#else
  1.2124 +		n = readv(fd, vecs, nvecs);
  1.2125 +#endif
  1.2126 +	}
  1.2127 +
  1.2128 +#else /*!USE_IOVEC_IMPL*/
  1.2129 +	/* If we don't have FIONREAD, we might waste some space here */
  1.2130 +	/* XXX we _will_ waste some space here if there is any space left
  1.2131 +	 * over on buf->last. */
  1.2132 +	if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
  1.2133 +		result = -1;
  1.2134 +		goto done;
  1.2135 +	}
  1.2136 +
  1.2137 +	/* We can append new data at this point */
  1.2138 +	p = chain->buffer + chain->misalign + chain->off;
  1.2139 +
  1.2140 +#ifndef WIN32
  1.2141 +	n = read(fd, p, howmuch);
  1.2142 +#else
  1.2143 +	n = recv(fd, p, howmuch, 0);
  1.2144 +#endif
  1.2145 +#endif /* USE_IOVEC_IMPL */
  1.2146 +
  1.2147 +	if (n == -1) {
  1.2148 +		result = -1;
  1.2149 +		goto done;
  1.2150 +	}
  1.2151 +	if (n == 0) {
  1.2152 +		result = 0;
  1.2153 +		goto done;
  1.2154 +	}
  1.2155 +
  1.2156 +#ifdef USE_IOVEC_IMPL
  1.2157 +	remaining = n;
  1.2158 +	for (i=0; i < nvecs; ++i) {
  1.2159 +		ev_ssize_t space = (ev_ssize_t) CHAIN_SPACE_LEN(*chainp);
  1.2160 +		if (space < remaining) {
  1.2161 +			(*chainp)->off += space;
  1.2162 +			remaining -= (int)space;
  1.2163 +		} else {
  1.2164 +			(*chainp)->off += remaining;
  1.2165 +			buf->last_with_datap = chainp;
  1.2166 +			break;
  1.2167 +		}
  1.2168 +		chainp = &(*chainp)->next;
  1.2169 +	}
  1.2170 +#else
  1.2171 +	chain->off += n;
  1.2172 +	advance_last_with_data(buf);
  1.2173 +#endif
  1.2174 +	buf->total_len += n;
  1.2175 +	buf->n_add_for_cb += n;
  1.2176 +
  1.2177 +	/* Tell someone about changes in this buffer */
  1.2178 +	evbuffer_invoke_callbacks(buf);
  1.2179 +	result = n;
  1.2180 +done:
  1.2181 +	EVBUFFER_UNLOCK(buf);
  1.2182 +	return result;
  1.2183 +}
  1.2184 +
  1.2185 +#ifdef WIN32
  1.2186 +static int
  1.2187 +evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd, ev_ssize_t howmuch)
  1.2188 +{
  1.2189 +	int result;
  1.2190 +	int nchains, n;
  1.2191 +	struct evbuffer_iovec v[2];
  1.2192 +
  1.2193 +	EVBUFFER_LOCK(buf);
  1.2194 +
  1.2195 +	if (buf->freeze_end) {
  1.2196 +		result = -1;
  1.2197 +		goto done;
  1.2198 +	}
  1.2199 +
  1.2200 +	if (howmuch < 0)
  1.2201 +		howmuch = 16384;
  1.2202 +
  1.2203 +
  1.2204 +	/* XXX we _will_ waste some space here if there is any space left
  1.2205 +	 * over on buf->last. */
  1.2206 +	nchains = evbuffer_reserve_space(buf, howmuch, v, 2);
  1.2207 +	if (nchains < 1 || nchains > 2) {
  1.2208 +		result = -1;
  1.2209 +		goto done;
  1.2210 +	}
  1.2211 +	n = read((int)fd, v[0].iov_base, (unsigned int)v[0].iov_len);
  1.2212 +	if (n <= 0) {
  1.2213 +		result = n;
  1.2214 +		goto done;
  1.2215 +	}
  1.2216 +	v[0].iov_len = (IOV_LEN_TYPE) n; /* XXXX another problem with big n.*/
  1.2217 +	if (nchains > 1) {
  1.2218 +		n = read((int)fd, v[1].iov_base, (unsigned int)v[1].iov_len);
  1.2219 +		if (n <= 0) {
  1.2220 +			result = (unsigned long) v[0].iov_len;
  1.2221 +			evbuffer_commit_space(buf, v, 1);
  1.2222 +			goto done;
  1.2223 +		}
  1.2224 +		v[1].iov_len = n;
  1.2225 +	}
  1.2226 +	evbuffer_commit_space(buf, v, nchains);
  1.2227 +
  1.2228 +	result = n;
  1.2229 +done:
  1.2230 +	EVBUFFER_UNLOCK(buf);
  1.2231 +	return result;
  1.2232 +}
  1.2233 +#endif
  1.2234 +
  1.2235 +#ifdef USE_IOVEC_IMPL
  1.2236 +static inline int
  1.2237 +evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
  1.2238 +    ev_ssize_t howmuch)
  1.2239 +{
  1.2240 +	IOV_TYPE iov[NUM_WRITE_IOVEC];
  1.2241 +	struct evbuffer_chain *chain = buffer->first;
  1.2242 +	int n, i = 0;
  1.2243 +
  1.2244 +	if (howmuch < 0)
  1.2245 +		return -1;
  1.2246 +
  1.2247 +	ASSERT_EVBUFFER_LOCKED(buffer);
  1.2248 +	/* XXX make this top out at some maximal data length?  if the
  1.2249 +	 * buffer has (say) 1MB in it, split over 128 chains, there's
  1.2250 +	 * no way it all gets written in one go. */
  1.2251 +	while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
  1.2252 +#ifdef USE_SENDFILE
  1.2253 +		/* we cannot write the file info via writev */
  1.2254 +		if (chain->flags & EVBUFFER_SENDFILE)
  1.2255 +			break;
  1.2256 +#endif
  1.2257 +		iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);
  1.2258 +		if ((size_t)howmuch >= chain->off) {
  1.2259 +			/* XXXcould be problematic when windows supports mmap*/
  1.2260 +			iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
  1.2261 +			howmuch -= chain->off;
  1.2262 +		} else {
  1.2263 +			/* XXXcould be problematic when windows supports mmap*/
  1.2264 +			iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;
  1.2265 +			break;
  1.2266 +		}
  1.2267 +		chain = chain->next;
  1.2268 +	}
  1.2269 +	if (! i)
  1.2270 +		return 0;
  1.2271 +#ifdef WIN32
  1.2272 +	{
  1.2273 +		DWORD bytesSent;
  1.2274 +		if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
  1.2275 +			n = -1;
  1.2276 +		else
  1.2277 +			n = bytesSent;
  1.2278 +	}
  1.2279 +#else
  1.2280 +	n = writev(fd, iov, i);
  1.2281 +#endif
  1.2282 +	return (n);
  1.2283 +}
  1.2284 +#endif
  1.2285 +
  1.2286 +#ifdef USE_SENDFILE
  1.2287 +static inline int
  1.2288 +evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t fd,
  1.2289 +    ev_ssize_t howmuch)
  1.2290 +{
  1.2291 +	struct evbuffer_chain *chain = buffer->first;
  1.2292 +	struct evbuffer_chain_fd *info =
  1.2293 +	    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
  1.2294 +#if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
  1.2295 +	int res;
  1.2296 +	off_t len = chain->off;
  1.2297 +#elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
  1.2298 +	ev_ssize_t res;
  1.2299 +	off_t offset = chain->misalign;
  1.2300 +#endif
  1.2301 +
  1.2302 +	ASSERT_EVBUFFER_LOCKED(buffer);
  1.2303 +
  1.2304 +#if defined(SENDFILE_IS_MACOSX)
  1.2305 +	res = sendfile(info->fd, fd, chain->misalign, &len, NULL, 0);
  1.2306 +	if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
  1.2307 +		return (-1);
  1.2308 +
  1.2309 +	return (len);
  1.2310 +#elif defined(SENDFILE_IS_FREEBSD)
  1.2311 +	res = sendfile(info->fd, fd, chain->misalign, chain->off, NULL, &len, 0);
  1.2312 +	if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
  1.2313 +		return (-1);
  1.2314 +
  1.2315 +	return (len);
  1.2316 +#elif defined(SENDFILE_IS_LINUX)
  1.2317 +	/* TODO(niels): implement splice */
  1.2318 +	res = sendfile(fd, info->fd, &offset, chain->off);
  1.2319 +	if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
  1.2320 +		/* if this is EAGAIN or EINTR return 0; otherwise, -1 */
  1.2321 +		return (0);
  1.2322 +	}
  1.2323 +	return (res);
  1.2324 +#elif defined(SENDFILE_IS_SOLARIS)
  1.2325 +	{
  1.2326 +		const off_t offset_orig = offset;
  1.2327 +		res = sendfile(fd, info->fd, &offset, chain->off);
  1.2328 +		if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
  1.2329 +			if (offset - offset_orig)
  1.2330 +				return offset - offset_orig;
  1.2331 +			/* if this is EAGAIN or EINTR and no bytes were
  1.2332 +			 * written, return 0 */
  1.2333 +			return (0);
  1.2334 +		}
  1.2335 +		return (res);
  1.2336 +	}
  1.2337 +#endif
  1.2338 +}
  1.2339 +#endif
  1.2340 +
  1.2341 +int
  1.2342 +evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
  1.2343 +    ev_ssize_t howmuch)
  1.2344 +{
  1.2345 +	int n = -1;
  1.2346 +
  1.2347 +	EVBUFFER_LOCK(buffer);
  1.2348 +
  1.2349 +	if (buffer->freeze_start) {
  1.2350 +		goto done;
  1.2351 +	}
  1.2352 +
  1.2353 +	if (howmuch < 0 || (size_t)howmuch > buffer->total_len)
  1.2354 +		howmuch = buffer->total_len;
  1.2355 +
  1.2356 +	if (howmuch > 0) {
  1.2357 +#ifdef USE_SENDFILE
  1.2358 +		struct evbuffer_chain *chain = buffer->first;
  1.2359 +		if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))
  1.2360 +			n = evbuffer_write_sendfile(buffer, fd, howmuch);
  1.2361 +		else {
  1.2362 +#endif
  1.2363 +#ifdef USE_IOVEC_IMPL
  1.2364 +		n = evbuffer_write_iovec(buffer, fd, howmuch);
  1.2365 +#elif defined(WIN32)
  1.2366 +		/* XXX(nickm) Don't disable this code until we know if
  1.2367 +		 * the WSARecv code above works. */
  1.2368 +		void *p = evbuffer_pullup(buffer, howmuch);
  1.2369 +		n = send(fd, p, howmuch, 0);
  1.2370 +#else
  1.2371 +		void *p = evbuffer_pullup(buffer, howmuch);
  1.2372 +		n = write(fd, p, howmuch);
  1.2373 +#endif
  1.2374 +#ifdef USE_SENDFILE
  1.2375 +		}
  1.2376 +#endif
  1.2377 +	}
  1.2378 +
  1.2379 +	if (n > 0)
  1.2380 +		evbuffer_drain(buffer, n);
  1.2381 +
  1.2382 +done:
  1.2383 +	EVBUFFER_UNLOCK(buffer);
  1.2384 +	return (n);
  1.2385 +}
  1.2386 +
  1.2387 +int
  1.2388 +evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)
  1.2389 +{
  1.2390 +	return evbuffer_write_atmost(buffer, fd, -1);
  1.2391 +}
  1.2392 +
  1.2393 +unsigned char *
  1.2394 +evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
  1.2395 +{
  1.2396 +	unsigned char *search;
  1.2397 +	struct evbuffer_ptr ptr;
  1.2398 +
  1.2399 +	EVBUFFER_LOCK(buffer);
  1.2400 +
  1.2401 +	ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
  1.2402 +	if (ptr.pos < 0) {
  1.2403 +		search = NULL;
  1.2404 +	} else {
  1.2405 +		search = evbuffer_pullup(buffer, ptr.pos + len);
  1.2406 +		if (search)
  1.2407 +			search += ptr.pos;
  1.2408 +	}
  1.2409 +	EVBUFFER_UNLOCK(buffer);
  1.2410 +	return search;
  1.2411 +}
  1.2412 +
  1.2413 +int
  1.2414 +evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
  1.2415 +    size_t position, enum evbuffer_ptr_how how)
  1.2416 +{
  1.2417 +	size_t left = position;
  1.2418 +	struct evbuffer_chain *chain = NULL;
  1.2419 +
  1.2420 +	EVBUFFER_LOCK(buf);
  1.2421 +
  1.2422 +	switch (how) {
  1.2423 +	case EVBUFFER_PTR_SET:
  1.2424 +		chain = buf->first;
  1.2425 +		pos->pos = position;
  1.2426 +		position = 0;
  1.2427 +		break;
  1.2428 +	case EVBUFFER_PTR_ADD:
  1.2429 +		/* this avoids iterating over all previous chains if
  1.2430 +		   we just want to advance the position */
  1.2431 +		chain = pos->_internal.chain;
  1.2432 +		pos->pos += position;
  1.2433 +		position = pos->_internal.pos_in_chain;
  1.2434 +		break;
  1.2435 +	}
  1.2436 +
  1.2437 +	while (chain && position + left >= chain->off) {
  1.2438 +		left -= chain->off - position;
  1.2439 +		chain = chain->next;
  1.2440 +		position = 0;
  1.2441 +	}
  1.2442 +	if (chain) {
  1.2443 +		pos->_internal.chain = chain;
  1.2444 +		pos->_internal.pos_in_chain = position + left;
  1.2445 +	} else {
  1.2446 +		pos->_internal.chain = NULL;
  1.2447 +		pos->pos = -1;
  1.2448 +	}
  1.2449 +
  1.2450 +	EVBUFFER_UNLOCK(buf);
  1.2451 +
  1.2452 +	return chain != NULL ? 0 : -1;
  1.2453 +}
  1.2454 +
  1.2455 +/**
  1.2456 +   Compare the bytes in buf at position pos to the len bytes in mem.  Return
  1.2457 +   less than 0, 0, or greater than 0 as memcmp.
  1.2458 + */
  1.2459 +static int
  1.2460 +evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
  1.2461 +    const char *mem, size_t len)
  1.2462 +{
  1.2463 +	struct evbuffer_chain *chain;
  1.2464 +	size_t position;
  1.2465 +	int r;
  1.2466 +
  1.2467 +	ASSERT_EVBUFFER_LOCKED(buf);
  1.2468 +
  1.2469 +	if (pos->pos + len > buf->total_len)
  1.2470 +		return -1;
  1.2471 +
  1.2472 +	chain = pos->_internal.chain;
  1.2473 +	position = pos->_internal.pos_in_chain;
  1.2474 +	while (len && chain) {
  1.2475 +		size_t n_comparable;
  1.2476 +		if (len + position > chain->off)
  1.2477 +			n_comparable = chain->off - position;
  1.2478 +		else
  1.2479 +			n_comparable = len;
  1.2480 +		r = memcmp(chain->buffer + chain->misalign + position, mem,
  1.2481 +		    n_comparable);
  1.2482 +		if (r)
  1.2483 +			return r;
  1.2484 +		mem += n_comparable;
  1.2485 +		len -= n_comparable;
  1.2486 +		position = 0;
  1.2487 +		chain = chain->next;
  1.2488 +	}
  1.2489 +
  1.2490 +	return 0;
  1.2491 +}
  1.2492 +
  1.2493 +struct evbuffer_ptr
  1.2494 +evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)
  1.2495 +{
  1.2496 +	return evbuffer_search_range(buffer, what, len, start, NULL);
  1.2497 +}
  1.2498 +
  1.2499 +struct evbuffer_ptr
  1.2500 +evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
  1.2501 +{
  1.2502 +	struct evbuffer_ptr pos;
  1.2503 +	struct evbuffer_chain *chain, *last_chain = NULL;
  1.2504 +	const unsigned char *p;
  1.2505 +	char first;
  1.2506 +
  1.2507 +	EVBUFFER_LOCK(buffer);
  1.2508 +
  1.2509 +	if (start) {
  1.2510 +		memcpy(&pos, start, sizeof(pos));
  1.2511 +		chain = pos._internal.chain;
  1.2512 +	} else {
  1.2513 +		pos.pos = 0;
  1.2514 +		chain = pos._internal.chain = buffer->first;
  1.2515 +		pos._internal.pos_in_chain = 0;
  1.2516 +	}
  1.2517 +
  1.2518 +	if (end)
  1.2519 +		last_chain = end->_internal.chain;
  1.2520 +
  1.2521 +	if (!len || len > EV_SSIZE_MAX)
  1.2522 +		goto done;
  1.2523 +
  1.2524 +	first = what[0];
  1.2525 +
  1.2526 +	while (chain) {
  1.2527 +		const unsigned char *start_at =
  1.2528 +		    chain->buffer + chain->misalign +
  1.2529 +		    pos._internal.pos_in_chain;
  1.2530 +		p = memchr(start_at, first,
  1.2531 +		    chain->off - pos._internal.pos_in_chain);
  1.2532 +		if (p) {
  1.2533 +			pos.pos += p - start_at;
  1.2534 +			pos._internal.pos_in_chain += p - start_at;
  1.2535 +			if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
  1.2536 +				if (end && pos.pos + (ev_ssize_t)len > end->pos)
  1.2537 +					goto not_found;
  1.2538 +				else
  1.2539 +					goto done;
  1.2540 +			}
  1.2541 +			++pos.pos;
  1.2542 +			++pos._internal.pos_in_chain;
  1.2543 +			if (pos._internal.pos_in_chain == chain->off) {
  1.2544 +				chain = pos._internal.chain = chain->next;
  1.2545 +				pos._internal.pos_in_chain = 0;
  1.2546 +			}
  1.2547 +		} else {
  1.2548 +			if (chain == last_chain)
  1.2549 +				goto not_found;
  1.2550 +			pos.pos += chain->off - pos._internal.pos_in_chain;
  1.2551 +			chain = pos._internal.chain = chain->next;
  1.2552 +			pos._internal.pos_in_chain = 0;
  1.2553 +		}
  1.2554 +	}
  1.2555 +
  1.2556 +not_found:
  1.2557 +	pos.pos = -1;
  1.2558 +	pos._internal.chain = NULL;
  1.2559 +done:
  1.2560 +	EVBUFFER_UNLOCK(buffer);
  1.2561 +	return pos;
  1.2562 +}
  1.2563 +
  1.2564 +int
  1.2565 +evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
  1.2566 +    struct evbuffer_ptr *start_at,
  1.2567 +    struct evbuffer_iovec *vec, int n_vec)
  1.2568 +{
  1.2569 +	struct evbuffer_chain *chain;
  1.2570 +	int idx = 0;
  1.2571 +	ev_ssize_t len_so_far = 0;
  1.2572 +
  1.2573 +	EVBUFFER_LOCK(buffer);
  1.2574 +
  1.2575 +	if (start_at) {
  1.2576 +		chain = start_at->_internal.chain;
  1.2577 +		len_so_far = chain->off
  1.2578 +		    - start_at->_internal.pos_in_chain;
  1.2579 +		idx = 1;
  1.2580 +		if (n_vec > 0) {
  1.2581 +			vec[0].iov_base = chain->buffer + chain->misalign
  1.2582 +			    + start_at->_internal.pos_in_chain;
  1.2583 +			vec[0].iov_len = len_so_far;
  1.2584 +		}
  1.2585 +		chain = chain->next;
  1.2586 +	} else {
  1.2587 +		chain = buffer->first;
  1.2588 +	}
  1.2589 +
  1.2590 +	if (n_vec == 0 && len < 0) {
  1.2591 +		/* If no vectors are provided and they asked for "everything",
  1.2592 +		 * pretend they asked for the actual available amount. */
  1.2593 +		len = buffer->total_len - len_so_far;
  1.2594 +	}
  1.2595 +
  1.2596 +	while (chain) {
  1.2597 +		if (len >= 0 && len_so_far >= len)
  1.2598 +			break;
  1.2599 +		if (idx<n_vec) {
  1.2600 +			vec[idx].iov_base = chain->buffer + chain->misalign;
  1.2601 +			vec[idx].iov_len = chain->off;
  1.2602 +		} else if (len<0) {
  1.2603 +			break;
  1.2604 +		}
  1.2605 +		++idx;
  1.2606 +		len_so_far += chain->off;
  1.2607 +		chain = chain->next;
  1.2608 +	}
  1.2609 +
  1.2610 +	EVBUFFER_UNLOCK(buffer);
  1.2611 +
  1.2612 +	return idx;
  1.2613 +}
  1.2614 +
  1.2615 +
  1.2616 +int
  1.2617 +evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
  1.2618 +{
  1.2619 +	char *buffer;
  1.2620 +	size_t space;
  1.2621 +	int sz, result = -1;
  1.2622 +	va_list aq;
  1.2623 +	struct evbuffer_chain *chain;
  1.2624 +
  1.2625 +
  1.2626 +	EVBUFFER_LOCK(buf);
  1.2627 +
  1.2628 +	if (buf->freeze_end) {
  1.2629 +		goto done;
  1.2630 +	}
  1.2631 +
  1.2632 +	/* make sure that at least some space is available */
  1.2633 +	if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL)
  1.2634 +		goto done;
  1.2635 +
  1.2636 +	for (;;) {
  1.2637 +#if 0
  1.2638 +		size_t used = chain->misalign + chain->off;
  1.2639 +		buffer = (char *)chain->buffer + chain->misalign + chain->off;
  1.2640 +		EVUTIL_ASSERT(chain->buffer_len >= used);
  1.2641 +		space = chain->buffer_len - used;
  1.2642 +#endif
  1.2643 +		buffer = (char*) CHAIN_SPACE_PTR(chain);
  1.2644 +		space = (size_t) CHAIN_SPACE_LEN(chain);
  1.2645 +
  1.2646 +#ifndef va_copy
  1.2647 +#define	va_copy(dst, src)	memcpy(&(dst), &(src), sizeof(va_list))
  1.2648 +#endif
  1.2649 +		va_copy(aq, ap);
  1.2650 +
  1.2651 +		sz = evutil_vsnprintf(buffer, space, fmt, aq);
  1.2652 +
  1.2653 +		va_end(aq);
  1.2654 +
  1.2655 +		if (sz < 0)
  1.2656 +			goto done;
  1.2657 +		if ((size_t)sz < space) {
  1.2658 +			chain->off += sz;
  1.2659 +			buf->total_len += sz;
  1.2660 +			buf->n_add_for_cb += sz;
  1.2661 +
  1.2662 +			advance_last_with_data(buf);
  1.2663 +			evbuffer_invoke_callbacks(buf);
  1.2664 +			result = sz;
  1.2665 +			goto done;
  1.2666 +		}
  1.2667 +		if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL)
  1.2668 +			goto done;
  1.2669 +	}
  1.2670 +	/* NOTREACHED */
  1.2671 +
  1.2672 +done:
  1.2673 +	EVBUFFER_UNLOCK(buf);
  1.2674 +	return result;
  1.2675 +}
  1.2676 +
  1.2677 +int
  1.2678 +evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
  1.2679 +{
  1.2680 +	int res = -1;
  1.2681 +	va_list ap;
  1.2682 +
  1.2683 +	va_start(ap, fmt);
  1.2684 +	res = evbuffer_add_vprintf(buf, fmt, ap);
  1.2685 +	va_end(ap);
  1.2686 +
  1.2687 +	return (res);
  1.2688 +}
  1.2689 +
  1.2690 +int
  1.2691 +evbuffer_add_reference(struct evbuffer *outbuf,
  1.2692 +    const void *data, size_t datlen,
  1.2693 +    evbuffer_ref_cleanup_cb cleanupfn, void *extra)
  1.2694 +{
  1.2695 +	struct evbuffer_chain *chain;
  1.2696 +	struct evbuffer_chain_reference *info;
  1.2697 +	int result = -1;
  1.2698 +
  1.2699 +	chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));
  1.2700 +	if (!chain)
  1.2701 +		return (-1);
  1.2702 +	chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;
  1.2703 +	chain->buffer = (u_char *)data;
  1.2704 +	chain->buffer_len = datlen;
  1.2705 +	chain->off = datlen;
  1.2706 +
  1.2707 +	info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);
  1.2708 +	info->cleanupfn = cleanupfn;
  1.2709 +	info->extra = extra;
  1.2710 +
  1.2711 +	EVBUFFER_LOCK(outbuf);
  1.2712 +	if (outbuf->freeze_end) {
  1.2713 +		/* don't call chain_free; we do not want to actually invoke
  1.2714 +		 * the cleanup function */
  1.2715 +		mm_free(chain);
  1.2716 +		goto done;
  1.2717 +	}
  1.2718 +	evbuffer_chain_insert(outbuf, chain);
  1.2719 +	outbuf->n_add_for_cb += datlen;
  1.2720 +
  1.2721 +	evbuffer_invoke_callbacks(outbuf);
  1.2722 +
  1.2723 +	result = 0;
  1.2724 +done:
  1.2725 +	EVBUFFER_UNLOCK(outbuf);
  1.2726 +
  1.2727 +	return result;
  1.2728 +}
  1.2729 +
  1.2730 +/* TODO(niels): maybe we don't want to own the fd, however, in that
  1.2731 + * case, we should dup it - dup is cheap.  Perhaps, we should use a
  1.2732 + * callback instead?
  1.2733 + */
  1.2734 +/* TODO(niels): we may want to add to automagically convert to mmap, in
  1.2735 + * case evbuffer_remove() or evbuffer_pullup() are being used.
  1.2736 + */
  1.2737 +int
  1.2738 +evbuffer_add_file(struct evbuffer *outbuf, int fd,
  1.2739 +    ev_off_t offset, ev_off_t length)
  1.2740 +{
  1.2741 +#if defined(USE_SENDFILE) || defined(_EVENT_HAVE_MMAP)
  1.2742 +	struct evbuffer_chain *chain;
  1.2743 +	struct evbuffer_chain_fd *info;
  1.2744 +#endif
  1.2745 +#if defined(USE_SENDFILE)
  1.2746 +	int sendfile_okay = 1;
  1.2747 +#endif
  1.2748 +	int ok = 1;
  1.2749 +
  1.2750 +#if defined(USE_SENDFILE)
  1.2751 +	if (use_sendfile) {
  1.2752 +		EVBUFFER_LOCK(outbuf);
  1.2753 +		sendfile_okay = outbuf->flags & EVBUFFER_FLAG_DRAINS_TO_FD;
  1.2754 +		EVBUFFER_UNLOCK(outbuf);
  1.2755 +	}
  1.2756 +
  1.2757 +	if (use_sendfile && sendfile_okay) {
  1.2758 +		chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
  1.2759 +		if (chain == NULL) {
  1.2760 +			event_warn("%s: out of memory", __func__);
  1.2761 +			return (-1);
  1.2762 +		}
  1.2763 +
  1.2764 +		chain->flags |= EVBUFFER_SENDFILE | EVBUFFER_IMMUTABLE;
  1.2765 +		chain->buffer = NULL;	/* no reading possible */
  1.2766 +		chain->buffer_len = length + offset;
  1.2767 +		chain->off = length;
  1.2768 +		chain->misalign = offset;
  1.2769 +
  1.2770 +		info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
  1.2771 +		info->fd = fd;
  1.2772 +
  1.2773 +		EVBUFFER_LOCK(outbuf);
  1.2774 +		if (outbuf->freeze_end) {
  1.2775 +			mm_free(chain);
  1.2776 +			ok = 0;
  1.2777 +		} else {
  1.2778 +			outbuf->n_add_for_cb += length;
  1.2779 +			evbuffer_chain_insert(outbuf, chain);
  1.2780 +		}
  1.2781 +	} else
  1.2782 +#endif
  1.2783 +#if defined(_EVENT_HAVE_MMAP)
  1.2784 +	if (use_mmap) {
  1.2785 +		void *mapped = mmap(NULL, length + offset, PROT_READ,
  1.2786 +#ifdef MAP_NOCACHE
  1.2787 +		    MAP_NOCACHE |
  1.2788 +#endif
  1.2789 +#ifdef MAP_FILE
  1.2790 +		    MAP_FILE |
  1.2791 +#endif
  1.2792 +		    MAP_PRIVATE,
  1.2793 +		    fd, 0);
  1.2794 +		/* some mmap implementations require offset to be a multiple of
  1.2795 +		 * the page size.  most users of this api, are likely to use 0
  1.2796 +		 * so mapping everything is not likely to be a problem.
  1.2797 +		 * TODO(niels): determine page size and round offset to that
  1.2798 +		 * page size to avoid mapping too much memory.
  1.2799 +		 */
  1.2800 +		if (mapped == MAP_FAILED) {
  1.2801 +			event_warn("%s: mmap(%d, %d, %zu) failed",
  1.2802 +			    __func__, fd, 0, (size_t)(offset + length));
  1.2803 +			return (-1);
  1.2804 +		}
  1.2805 +		chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
  1.2806 +		if (chain == NULL) {
  1.2807 +			event_warn("%s: out of memory", __func__);
  1.2808 +			munmap(mapped, length);
  1.2809 +			return (-1);
  1.2810 +		}
  1.2811 +
  1.2812 +		chain->flags |= EVBUFFER_MMAP | EVBUFFER_IMMUTABLE;
  1.2813 +		chain->buffer = mapped;
  1.2814 +		chain->buffer_len = length + offset;
  1.2815 +		chain->off = length + offset;
  1.2816 +
  1.2817 +		info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
  1.2818 +		info->fd = fd;
  1.2819 +
  1.2820 +		EVBUFFER_LOCK(outbuf);
  1.2821 +		if (outbuf->freeze_end) {
  1.2822 +			info->fd = -1;
  1.2823 +			evbuffer_chain_free(chain);
  1.2824 +			ok = 0;
  1.2825 +		} else {
  1.2826 +			outbuf->n_add_for_cb += length;
  1.2827 +
  1.2828 +			evbuffer_chain_insert(outbuf, chain);
  1.2829 +
  1.2830 +			/* we need to subtract whatever we don't need */
  1.2831 +			evbuffer_drain(outbuf, offset);
  1.2832 +		}
  1.2833 +	} else
  1.2834 +#endif
  1.2835 +	{
  1.2836 +		/* the default implementation */
  1.2837 +		struct evbuffer *tmp = evbuffer_new();
  1.2838 +		ev_ssize_t read;
  1.2839 +
  1.2840 +		if (tmp == NULL)
  1.2841 +			return (-1);
  1.2842 +
  1.2843 +#ifdef WIN32
  1.2844 +#define lseek _lseeki64
  1.2845 +#endif
  1.2846 +		if (lseek(fd, offset, SEEK_SET) == -1) {
  1.2847 +			evbuffer_free(tmp);
  1.2848 +			return (-1);
  1.2849 +		}
  1.2850 +
  1.2851 +		/* we add everything to a temporary buffer, so that we
  1.2852 +		 * can abort without side effects if the read fails.
  1.2853 +		 */
  1.2854 +		while (length) {
  1.2855 +			read = evbuffer_readfile(tmp, fd, (ev_ssize_t)length);
  1.2856 +			if (read == -1) {
  1.2857 +				evbuffer_free(tmp);
  1.2858 +				return (-1);
  1.2859 +			}
  1.2860 +
  1.2861 +			length -= read;
  1.2862 +		}
  1.2863 +
  1.2864 +		EVBUFFER_LOCK(outbuf);
  1.2865 +		if (outbuf->freeze_end) {
  1.2866 +			evbuffer_free(tmp);
  1.2867 +			ok = 0;
  1.2868 +		} else {
  1.2869 +			evbuffer_add_buffer(outbuf, tmp);
  1.2870 +			evbuffer_free(tmp);
  1.2871 +
  1.2872 +#ifdef WIN32
  1.2873 +#define close _close
  1.2874 +#endif
  1.2875 +			close(fd);
  1.2876 +		}
  1.2877 +	}
  1.2878 +
  1.2879 +	if (ok)
  1.2880 +		evbuffer_invoke_callbacks(outbuf);
  1.2881 +	EVBUFFER_UNLOCK(outbuf);
  1.2882 +
  1.2883 +	return ok ? 0 : -1;
  1.2884 +}
  1.2885 +
  1.2886 +
  1.2887 +void
  1.2888 +evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
  1.2889 +{
  1.2890 +	EVBUFFER_LOCK(buffer);
  1.2891 +
  1.2892 +	if (!TAILQ_EMPTY(&buffer->callbacks))
  1.2893 +		evbuffer_remove_all_callbacks(buffer);
  1.2894 +
  1.2895 +	if (cb) {
  1.2896 +		struct evbuffer_cb_entry *ent =
  1.2897 +		    evbuffer_add_cb(buffer, NULL, cbarg);
  1.2898 +		ent->cb.cb_obsolete = cb;
  1.2899 +		ent->flags |= EVBUFFER_CB_OBSOLETE;
  1.2900 +	}
  1.2901 +	EVBUFFER_UNLOCK(buffer);
  1.2902 +}
  1.2903 +
  1.2904 +struct evbuffer_cb_entry *
  1.2905 +evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
  1.2906 +{
  1.2907 +	struct evbuffer_cb_entry *e;
  1.2908 +	if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
  1.2909 +		return NULL;
  1.2910 +	EVBUFFER_LOCK(buffer);
  1.2911 +	e->cb.cb_func = cb;
  1.2912 +	e->cbarg = cbarg;
  1.2913 +	e->flags = EVBUFFER_CB_ENABLED;
  1.2914 +	TAILQ_INSERT_HEAD(&buffer->callbacks, e, next);
  1.2915 +	EVBUFFER_UNLOCK(buffer);
  1.2916 +	return e;
  1.2917 +}
  1.2918 +
  1.2919 +int
  1.2920 +evbuffer_remove_cb_entry(struct evbuffer *buffer,
  1.2921 +			 struct evbuffer_cb_entry *ent)
  1.2922 +{
  1.2923 +	EVBUFFER_LOCK(buffer);
  1.2924 +	TAILQ_REMOVE(&buffer->callbacks, ent, next);
  1.2925 +	EVBUFFER_UNLOCK(buffer);
  1.2926 +	mm_free(ent);
  1.2927 +	return 0;
  1.2928 +}
  1.2929 +
  1.2930 +int
  1.2931 +evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
  1.2932 +{
  1.2933 +	struct evbuffer_cb_entry *cbent;
  1.2934 +	int result = -1;
  1.2935 +	EVBUFFER_LOCK(buffer);
  1.2936 +	TAILQ_FOREACH(cbent, &buffer->callbacks, next) {
  1.2937 +		if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
  1.2938 +			result = evbuffer_remove_cb_entry(buffer, cbent);
  1.2939 +			goto done;
  1.2940 +		}
  1.2941 +	}
  1.2942 +done:
  1.2943 +	EVBUFFER_UNLOCK(buffer);
  1.2944 +	return result;
  1.2945 +}
  1.2946 +
  1.2947 +int
  1.2948 +evbuffer_cb_set_flags(struct evbuffer *buffer,
  1.2949 +		      struct evbuffer_cb_entry *cb, ev_uint32_t flags)
  1.2950 +{
  1.2951 +	/* the user isn't allowed to mess with these. */
  1.2952 +	flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
  1.2953 +	EVBUFFER_LOCK(buffer);
  1.2954 +	cb->flags |= flags;
  1.2955 +	EVBUFFER_UNLOCK(buffer);
  1.2956 +	return 0;
  1.2957 +}
  1.2958 +
  1.2959 +int
  1.2960 +evbuffer_cb_clear_flags(struct evbuffer *buffer,
  1.2961 +		      struct evbuffer_cb_entry *cb, ev_uint32_t flags)
  1.2962 +{
  1.2963 +	/* the user isn't allowed to mess with these. */
  1.2964 +	flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
  1.2965 +	EVBUFFER_LOCK(buffer);
  1.2966 +	cb->flags &= ~flags;
  1.2967 +	EVBUFFER_UNLOCK(buffer);
  1.2968 +	return 0;
  1.2969 +}
  1.2970 +
  1.2971 +int
  1.2972 +evbuffer_freeze(struct evbuffer *buffer, int start)
  1.2973 +{
  1.2974 +	EVBUFFER_LOCK(buffer);
  1.2975 +	if (start)
  1.2976 +		buffer->freeze_start = 1;
  1.2977 +	else
  1.2978 +		buffer->freeze_end = 1;
  1.2979 +	EVBUFFER_UNLOCK(buffer);
  1.2980 +	return 0;
  1.2981 +}
  1.2982 +
  1.2983 +int
  1.2984 +evbuffer_unfreeze(struct evbuffer *buffer, int start)
  1.2985 +{
  1.2986 +	EVBUFFER_LOCK(buffer);
  1.2987 +	if (start)
  1.2988 +		buffer->freeze_start = 0;
  1.2989 +	else
  1.2990 +		buffer->freeze_end = 0;
  1.2991 +	EVBUFFER_UNLOCK(buffer);
  1.2992 +	return 0;
  1.2993 +}
  1.2994 +
  1.2995 +#if 0
  1.2996 +void
  1.2997 +evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
  1.2998 +{
  1.2999 +	if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) {
  1.3000 +		cb->size_before_suspend = evbuffer_get_length(buffer);
  1.3001 +		cb->flags |= EVBUFFER_CB_SUSPENDED;
  1.3002 +	}
  1.3003 +}
  1.3004 +
  1.3005 +void
  1.3006 +evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
  1.3007 +{
  1.3008 +	if ((cb->flags & EVBUFFER_CB_SUSPENDED)) {
  1.3009 +		unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);
  1.3010 +		size_t sz = cb->size_before_suspend;
  1.3011 +		cb->flags &= ~(EVBUFFER_CB_SUSPENDED|
  1.3012 +			       EVBUFFER_CB_CALL_ON_UNSUSPEND);
  1.3013 +		cb->size_before_suspend = 0;
  1.3014 +		if (call && (cb->flags & EVBUFFER_CB_ENABLED)) {
  1.3015 +			cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);
  1.3016 +		}
  1.3017 +	}
  1.3018 +}
  1.3019 +#endif
  1.3020 +
  1.3021 +/* These hooks are exposed so that the unit tests can temporarily disable
  1.3022 + * sendfile support in order to test mmap, or both to test linear
  1.3023 + * access. Don't use it; if we need to add a way to disable sendfile support
  1.3024 + * in the future, it will probably be via an alternate version of
  1.3025 + * evbuffer_add_file() with a 'flags' argument.
  1.3026 + */
  1.3027 +int _evbuffer_testing_use_sendfile(void);
  1.3028 +int _evbuffer_testing_use_mmap(void);
  1.3029 +int _evbuffer_testing_use_linear_file_access(void);
  1.3030 +
  1.3031 +int
  1.3032 +_evbuffer_testing_use_sendfile(void)
  1.3033 +{
  1.3034 +	int ok = 0;
  1.3035 +#ifdef USE_SENDFILE
  1.3036 +	use_sendfile = 1;
  1.3037 +	ok = 1;
  1.3038 +#endif
  1.3039 +#ifdef _EVENT_HAVE_MMAP
  1.3040 +	use_mmap = 0;
  1.3041 +#endif
  1.3042 +	return ok;
  1.3043 +}
  1.3044 +int
  1.3045 +_evbuffer_testing_use_mmap(void)
  1.3046 +{
  1.3047 +	int ok = 0;
  1.3048 +#ifdef USE_SENDFILE
  1.3049 +	use_sendfile = 0;
  1.3050 +#endif
  1.3051 +#ifdef _EVENT_HAVE_MMAP
  1.3052 +	use_mmap = 1;
  1.3053 +	ok = 1;
  1.3054 +#endif
  1.3055 +	return ok;
  1.3056 +}
  1.3057 +int
  1.3058 +_evbuffer_testing_use_linear_file_access(void)
  1.3059 +{
  1.3060 +#ifdef USE_SENDFILE
  1.3061 +	use_sendfile = 0;
  1.3062 +#endif
  1.3063 +#ifdef _EVENT_HAVE_MMAP
  1.3064 +	use_mmap = 0;
  1.3065 +#endif
  1.3066 +	return 1;
  1.3067 +}

mercurial