michael@0: /* michael@0: * Copyright (c) 2002-2007 Niels Provos michael@0: * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson michael@0: * michael@0: * Redistribution and use in source and binary forms, with or without michael@0: * modification, are permitted provided that the following conditions michael@0: * are met: michael@0: * 1. Redistributions of source code must retain the above copyright michael@0: * notice, this list of conditions and the following disclaimer. michael@0: * 2. Redistributions in binary form must reproduce the above copyright michael@0: * notice, this list of conditions and the following disclaimer in the michael@0: * documentation and/or other materials provided with the distribution. michael@0: * 3. The name of the author may not be used to endorse or promote products michael@0: * derived from this software without specific prior written permission. michael@0: * michael@0: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR michael@0: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES michael@0: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. michael@0: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, michael@0: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT michael@0: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, michael@0: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY michael@0: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT michael@0: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF michael@0: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. michael@0: */ michael@0: michael@0: #include "event2/event-config.h" michael@0: michael@0: #ifdef WIN32 michael@0: #include michael@0: #include michael@0: #include michael@0: #endif michael@0: michael@0: #ifdef _EVENT_HAVE_VASPRINTF michael@0: /* If we have vasprintf, we need to define this before we include stdio.h. */ michael@0: #define _GNU_SOURCE michael@0: #endif michael@0: michael@0: #include michael@0: michael@0: #ifdef _EVENT_HAVE_SYS_TIME_H michael@0: #include michael@0: #endif michael@0: michael@0: #ifdef _EVENT_HAVE_SYS_SOCKET_H michael@0: #include michael@0: #endif michael@0: michael@0: #ifdef _EVENT_HAVE_SYS_UIO_H michael@0: #include michael@0: #endif michael@0: michael@0: #ifdef _EVENT_HAVE_SYS_IOCTL_H michael@0: #include michael@0: #endif michael@0: michael@0: #ifdef _EVENT_HAVE_SYS_MMAN_H michael@0: #include michael@0: #endif michael@0: michael@0: #ifdef _EVENT_HAVE_SYS_SENDFILE_H michael@0: #include michael@0: #endif michael@0: michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #ifdef _EVENT_HAVE_STDARG_H michael@0: #include michael@0: #endif michael@0: #ifdef _EVENT_HAVE_UNISTD_H michael@0: #include michael@0: #endif michael@0: #include michael@0: michael@0: #include "event2/event.h" michael@0: #include "event2/buffer.h" michael@0: #include "event2/buffer_compat.h" michael@0: #include "event2/bufferevent.h" michael@0: #include "event2/bufferevent_compat.h" michael@0: #include "event2/bufferevent_struct.h" michael@0: #include "event2/thread.h" michael@0: #include "event2/event-config.h" michael@0: #include "log-internal.h" michael@0: #include "mm-internal.h" michael@0: #include "util-internal.h" michael@0: #include "evthread-internal.h" michael@0: #include "evbuffer-internal.h" michael@0: #include "bufferevent-internal.h" michael@0: michael@0: /* some systems do not have MAP_FAILED */ michael@0: #ifndef MAP_FAILED michael@0: #define MAP_FAILED ((void *)-1) michael@0: #endif michael@0: michael@0: /* send file support */ michael@0: #if defined(_EVENT_HAVE_SYS_SENDFILE_H) && defined(_EVENT_HAVE_SENDFILE) && defined(__linux__) michael@0: #define USE_SENDFILE 1 michael@0: #define SENDFILE_IS_LINUX 1 michael@0: #elif defined(_EVENT_HAVE_SENDFILE) && defined(__FreeBSD__) michael@0: #define USE_SENDFILE 1 michael@0: #define SENDFILE_IS_FREEBSD 1 michael@0: #elif defined(_EVENT_HAVE_SENDFILE) && defined(__APPLE__) michael@0: #define USE_SENDFILE 1 michael@0: #define SENDFILE_IS_MACOSX 1 michael@0: #elif defined(_EVENT_HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__) michael@0: #define USE_SENDFILE 1 michael@0: #define SENDFILE_IS_SOLARIS 1 michael@0: #endif michael@0: michael@0: #ifdef USE_SENDFILE michael@0: static int use_sendfile = 1; michael@0: #endif michael@0: #ifdef _EVENT_HAVE_MMAP michael@0: static int use_mmap = 1; michael@0: #endif michael@0: michael@0: michael@0: /* Mask of user-selectable callback flags. */ michael@0: #define EVBUFFER_CB_USER_FLAGS 0xffff michael@0: /* Mask of all internal-use-only flags. */ michael@0: #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000 michael@0: michael@0: /* Flag set if the callback is using the cb_obsolete function pointer */ michael@0: #define EVBUFFER_CB_OBSOLETE 0x00040000 michael@0: michael@0: /* evbuffer_chain support */ michael@0: #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off) michael@0: #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \ michael@0: 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off)) michael@0: michael@0: #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0) michael@0: #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0) michael@0: michael@0: static void evbuffer_chain_align(struct evbuffer_chain *chain); michael@0: static int evbuffer_chain_should_realign(struct evbuffer_chain *chain, michael@0: size_t datalen); michael@0: static void evbuffer_deferred_callback(struct deferred_cb *cb, void *arg); michael@0: static int evbuffer_ptr_memcmp(const struct evbuffer *buf, michael@0: const struct evbuffer_ptr *pos, const char *mem, size_t len); michael@0: static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf, michael@0: size_t datlen); michael@0: michael@0: #ifdef WIN32 michael@0: static int evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd, michael@0: ev_ssize_t howmuch); michael@0: #else michael@0: #define evbuffer_readfile evbuffer_read michael@0: #endif michael@0: michael@0: static struct evbuffer_chain * michael@0: evbuffer_chain_new(size_t size) michael@0: { michael@0: struct evbuffer_chain *chain; michael@0: size_t to_alloc; michael@0: michael@0: size += EVBUFFER_CHAIN_SIZE; michael@0: michael@0: /* get the next largest memory that can hold the buffer */ michael@0: to_alloc = MIN_BUFFER_SIZE; michael@0: while (to_alloc < size) michael@0: to_alloc <<= 1; michael@0: michael@0: /* we get everything in one chunk */ michael@0: if ((chain = mm_malloc(to_alloc)) == NULL) michael@0: return (NULL); michael@0: michael@0: memset(chain, 0, EVBUFFER_CHAIN_SIZE); michael@0: michael@0: chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE; michael@0: michael@0: /* this way we can manipulate the buffer to different addresses, michael@0: * which is required for mmap for example. michael@0: */ michael@0: chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain); michael@0: michael@0: return (chain); michael@0: } michael@0: michael@0: static inline void michael@0: evbuffer_chain_free(struct evbuffer_chain *chain) michael@0: { michael@0: if (CHAIN_PINNED(chain)) { michael@0: chain->flags |= EVBUFFER_DANGLING; michael@0: return; michael@0: } michael@0: if (chain->flags & (EVBUFFER_MMAP|EVBUFFER_SENDFILE| michael@0: EVBUFFER_REFERENCE)) { michael@0: if (chain->flags & EVBUFFER_REFERENCE) { michael@0: struct evbuffer_chain_reference *info = michael@0: EVBUFFER_CHAIN_EXTRA( michael@0: struct evbuffer_chain_reference, michael@0: chain); michael@0: if (info->cleanupfn) michael@0: (*info->cleanupfn)(chain->buffer, michael@0: chain->buffer_len, michael@0: info->extra); michael@0: } michael@0: #ifdef _EVENT_HAVE_MMAP michael@0: if (chain->flags & EVBUFFER_MMAP) { michael@0: struct evbuffer_chain_fd *info = michael@0: EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, michael@0: chain); michael@0: if (munmap(chain->buffer, chain->buffer_len) == -1) michael@0: event_warn("%s: munmap failed", __func__); michael@0: if (close(info->fd) == -1) michael@0: event_warn("%s: close(%d) failed", michael@0: __func__, info->fd); michael@0: } michael@0: #endif michael@0: #ifdef USE_SENDFILE michael@0: if (chain->flags & EVBUFFER_SENDFILE) { michael@0: struct evbuffer_chain_fd *info = michael@0: EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, michael@0: chain); michael@0: if (close(info->fd) == -1) michael@0: event_warn("%s: close(%d) failed", michael@0: __func__, info->fd); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: mm_free(chain); michael@0: } michael@0: michael@0: static void michael@0: evbuffer_free_all_chains(struct evbuffer_chain *chain) michael@0: { michael@0: struct evbuffer_chain *next; michael@0: for (; chain; chain = next) { michael@0: next = chain->next; michael@0: evbuffer_chain_free(chain); michael@0: } michael@0: } michael@0: michael@0: #ifndef NDEBUG michael@0: static int michael@0: evbuffer_chains_all_empty(struct evbuffer_chain *chain) michael@0: { michael@0: for (; chain; chain = chain->next) { michael@0: if (chain->off) michael@0: return 0; michael@0: } michael@0: return 1; michael@0: } michael@0: #else michael@0: /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid michael@0: "unused variable" warnings. */ michael@0: static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) { michael@0: return 1; michael@0: } michael@0: #endif michael@0: michael@0: /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior michael@0: * to replacing them all with a new chain. Return a pointer to the place michael@0: * where the new chain will go. michael@0: * michael@0: * Internal; requires lock. The caller must fix up buf->last and buf->first michael@0: * as needed; they might have been freed. michael@0: */ michael@0: static struct evbuffer_chain ** michael@0: evbuffer_free_trailing_empty_chains(struct evbuffer *buf) michael@0: { michael@0: struct evbuffer_chain **ch = buf->last_with_datap; michael@0: /* Find the first victim chain. It might be *last_with_datap */ michael@0: while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) michael@0: ch = &(*ch)->next; michael@0: if (*ch) { michael@0: EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); michael@0: evbuffer_free_all_chains(*ch); michael@0: *ch = NULL; michael@0: } michael@0: return ch; michael@0: } michael@0: michael@0: /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty michael@0: * chains as necessary. Requires lock. Does not schedule callbacks. michael@0: */ michael@0: static void michael@0: evbuffer_chain_insert(struct evbuffer *buf, michael@0: struct evbuffer_chain *chain) michael@0: { michael@0: ASSERT_EVBUFFER_LOCKED(buf); michael@0: if (*buf->last_with_datap == NULL) { michael@0: /* There are no chains data on the buffer at all. */ michael@0: EVUTIL_ASSERT(buf->last_with_datap == &buf->first); michael@0: EVUTIL_ASSERT(buf->first == NULL); michael@0: buf->first = buf->last = chain; michael@0: } else { michael@0: struct evbuffer_chain **ch = buf->last_with_datap; michael@0: /* Find the first victim chain. It might be *last_with_datap */ michael@0: while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) michael@0: ch = &(*ch)->next; michael@0: if (*ch == NULL) { michael@0: /* There is no victim; just append this new chain. */ michael@0: buf->last->next = chain; michael@0: if (chain->off) michael@0: buf->last_with_datap = &buf->last->next; michael@0: } else { michael@0: /* Replace all victim chains with this chain. */ michael@0: EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); michael@0: evbuffer_free_all_chains(*ch); michael@0: *ch = chain; michael@0: } michael@0: buf->last = chain; michael@0: } michael@0: buf->total_len += chain->off; michael@0: } michael@0: michael@0: static inline struct evbuffer_chain * michael@0: evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen) michael@0: { michael@0: struct evbuffer_chain *chain; michael@0: if ((chain = evbuffer_chain_new(datlen)) == NULL) michael@0: return NULL; michael@0: evbuffer_chain_insert(buf, chain); michael@0: return chain; michael@0: } michael@0: michael@0: void michael@0: _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag) michael@0: { michael@0: EVUTIL_ASSERT((chain->flags & flag) == 0); michael@0: chain->flags |= flag; michael@0: } michael@0: michael@0: void michael@0: _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag) michael@0: { michael@0: EVUTIL_ASSERT((chain->flags & flag) != 0); michael@0: chain->flags &= ~flag; michael@0: if (chain->flags & EVBUFFER_DANGLING) michael@0: evbuffer_chain_free(chain); michael@0: } michael@0: michael@0: struct evbuffer * michael@0: evbuffer_new(void) michael@0: { michael@0: struct evbuffer *buffer; michael@0: michael@0: buffer = mm_calloc(1, sizeof(struct evbuffer)); michael@0: if (buffer == NULL) michael@0: return (NULL); michael@0: michael@0: TAILQ_INIT(&buffer->callbacks); michael@0: buffer->refcnt = 1; michael@0: buffer->last_with_datap = &buffer->first; michael@0: michael@0: return (buffer); michael@0: } michael@0: michael@0: int michael@0: evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags) michael@0: { michael@0: EVBUFFER_LOCK(buf); michael@0: buf->flags |= (ev_uint32_t)flags; michael@0: EVBUFFER_UNLOCK(buf); michael@0: return 0; michael@0: } michael@0: michael@0: int michael@0: evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags) michael@0: { michael@0: EVBUFFER_LOCK(buf); michael@0: buf->flags &= ~(ev_uint32_t)flags; michael@0: EVBUFFER_UNLOCK(buf); michael@0: return 0; michael@0: } michael@0: michael@0: void michael@0: _evbuffer_incref(struct evbuffer *buf) michael@0: { michael@0: EVBUFFER_LOCK(buf); michael@0: ++buf->refcnt; michael@0: EVBUFFER_UNLOCK(buf); michael@0: } michael@0: michael@0: void michael@0: _evbuffer_incref_and_lock(struct evbuffer *buf) michael@0: { michael@0: EVBUFFER_LOCK(buf); michael@0: ++buf->refcnt; michael@0: } michael@0: michael@0: int michael@0: evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base) michael@0: { michael@0: EVBUFFER_LOCK(buffer); michael@0: buffer->cb_queue = event_base_get_deferred_cb_queue(base); michael@0: buffer->deferred_cbs = 1; michael@0: event_deferred_cb_init(&buffer->deferred, michael@0: evbuffer_deferred_callback, buffer); michael@0: EVBUFFER_UNLOCK(buffer); michael@0: return 0; michael@0: } michael@0: michael@0: int michael@0: evbuffer_enable_locking(struct evbuffer *buf, void *lock) michael@0: { michael@0: #ifdef _EVENT_DISABLE_THREAD_SUPPORT michael@0: return -1; michael@0: #else michael@0: if (buf->lock) michael@0: return -1; michael@0: michael@0: if (!lock) { michael@0: EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); michael@0: if (!lock) michael@0: return -1; michael@0: buf->lock = lock; michael@0: buf->own_lock = 1; michael@0: } else { michael@0: buf->lock = lock; michael@0: buf->own_lock = 0; michael@0: } michael@0: michael@0: return 0; michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev) michael@0: { michael@0: EVBUFFER_LOCK(buf); michael@0: buf->parent = bev; michael@0: EVBUFFER_UNLOCK(buf); michael@0: } michael@0: michael@0: static void michael@0: evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred) michael@0: { michael@0: struct evbuffer_cb_entry *cbent, *next; michael@0: struct evbuffer_cb_info info; michael@0: size_t new_size; michael@0: ev_uint32_t mask, masked_val; michael@0: int clear = 1; michael@0: michael@0: if (running_deferred) { michael@0: mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; michael@0: masked_val = EVBUFFER_CB_ENABLED; michael@0: } else if (buffer->deferred_cbs) { michael@0: mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; michael@0: masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; michael@0: /* Don't zero-out n_add/n_del, since the deferred callbacks michael@0: will want to see them. */ michael@0: clear = 0; michael@0: } else { michael@0: mask = EVBUFFER_CB_ENABLED; michael@0: masked_val = EVBUFFER_CB_ENABLED; michael@0: } michael@0: michael@0: ASSERT_EVBUFFER_LOCKED(buffer); michael@0: michael@0: if (TAILQ_EMPTY(&buffer->callbacks)) { michael@0: buffer->n_add_for_cb = buffer->n_del_for_cb = 0; michael@0: return; michael@0: } michael@0: if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0) michael@0: return; michael@0: michael@0: new_size = buffer->total_len; michael@0: info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb; michael@0: info.n_added = buffer->n_add_for_cb; michael@0: info.n_deleted = buffer->n_del_for_cb; michael@0: if (clear) { michael@0: buffer->n_add_for_cb = 0; michael@0: buffer->n_del_for_cb = 0; michael@0: } michael@0: for (cbent = TAILQ_FIRST(&buffer->callbacks); michael@0: cbent != NULL; michael@0: cbent = next) { michael@0: /* Get the 'next' pointer now in case this callback decides michael@0: * to remove itself or something. */ michael@0: next = TAILQ_NEXT(cbent, next); michael@0: michael@0: if ((cbent->flags & mask) != masked_val) michael@0: continue; michael@0: michael@0: if ((cbent->flags & EVBUFFER_CB_OBSOLETE)) michael@0: cbent->cb.cb_obsolete(buffer, michael@0: info.orig_size, new_size, cbent->cbarg); michael@0: else michael@0: cbent->cb.cb_func(buffer, &info, cbent->cbarg); michael@0: } michael@0: } michael@0: michael@0: void michael@0: evbuffer_invoke_callbacks(struct evbuffer *buffer) michael@0: { michael@0: if (TAILQ_EMPTY(&buffer->callbacks)) { michael@0: buffer->n_add_for_cb = buffer->n_del_for_cb = 0; michael@0: return; michael@0: } michael@0: michael@0: if (buffer->deferred_cbs) { michael@0: if (buffer->deferred.queued) michael@0: return; michael@0: _evbuffer_incref_and_lock(buffer); michael@0: if (buffer->parent) michael@0: bufferevent_incref(buffer->parent); michael@0: EVBUFFER_UNLOCK(buffer); michael@0: event_deferred_cb_schedule(buffer->cb_queue, &buffer->deferred); michael@0: } michael@0: michael@0: evbuffer_run_callbacks(buffer, 0); michael@0: } michael@0: michael@0: static void michael@0: evbuffer_deferred_callback(struct deferred_cb *cb, void *arg) michael@0: { michael@0: struct bufferevent *parent = NULL; michael@0: struct evbuffer *buffer = arg; michael@0: michael@0: /* XXXX It would be better to run these callbacks without holding the michael@0: * lock */ michael@0: EVBUFFER_LOCK(buffer); michael@0: parent = buffer->parent; michael@0: evbuffer_run_callbacks(buffer, 1); michael@0: _evbuffer_decref_and_unlock(buffer); michael@0: if (parent) michael@0: bufferevent_decref(parent); michael@0: } michael@0: michael@0: static void michael@0: evbuffer_remove_all_callbacks(struct evbuffer *buffer) michael@0: { michael@0: struct evbuffer_cb_entry *cbent; michael@0: michael@0: while ((cbent = TAILQ_FIRST(&buffer->callbacks))) { michael@0: TAILQ_REMOVE(&buffer->callbacks, cbent, next); michael@0: mm_free(cbent); michael@0: } michael@0: } michael@0: michael@0: void michael@0: _evbuffer_decref_and_unlock(struct evbuffer *buffer) michael@0: { michael@0: struct evbuffer_chain *chain, *next; michael@0: ASSERT_EVBUFFER_LOCKED(buffer); michael@0: michael@0: EVUTIL_ASSERT(buffer->refcnt > 0); michael@0: michael@0: if (--buffer->refcnt > 0) { michael@0: EVBUFFER_UNLOCK(buffer); michael@0: return; michael@0: } michael@0: michael@0: for (chain = buffer->first; chain != NULL; chain = next) { michael@0: next = chain->next; michael@0: evbuffer_chain_free(chain); michael@0: } michael@0: evbuffer_remove_all_callbacks(buffer); michael@0: if (buffer->deferred_cbs) michael@0: event_deferred_cb_cancel(buffer->cb_queue, &buffer->deferred); michael@0: michael@0: EVBUFFER_UNLOCK(buffer); michael@0: if (buffer->own_lock) michael@0: EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE); michael@0: mm_free(buffer); michael@0: } michael@0: michael@0: void michael@0: evbuffer_free(struct evbuffer *buffer) michael@0: { michael@0: EVBUFFER_LOCK(buffer); michael@0: _evbuffer_decref_and_unlock(buffer); michael@0: } michael@0: michael@0: void michael@0: evbuffer_lock(struct evbuffer *buf) michael@0: { michael@0: EVBUFFER_LOCK(buf); michael@0: } michael@0: michael@0: void michael@0: evbuffer_unlock(struct evbuffer *buf) michael@0: { michael@0: EVBUFFER_UNLOCK(buf); michael@0: } michael@0: michael@0: size_t michael@0: evbuffer_get_length(const struct evbuffer *buffer) michael@0: { michael@0: size_t result; michael@0: michael@0: EVBUFFER_LOCK(buffer); michael@0: michael@0: result = (buffer->total_len); michael@0: michael@0: EVBUFFER_UNLOCK(buffer); michael@0: michael@0: return result; michael@0: } michael@0: michael@0: size_t michael@0: evbuffer_get_contiguous_space(const struct evbuffer *buf) michael@0: { michael@0: struct evbuffer_chain *chain; michael@0: size_t result; michael@0: michael@0: EVBUFFER_LOCK(buf); michael@0: chain = buf->first; michael@0: result = (chain != NULL ? chain->off : 0); michael@0: EVBUFFER_UNLOCK(buf); michael@0: michael@0: return result; michael@0: } michael@0: michael@0: int michael@0: evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, michael@0: struct evbuffer_iovec *vec, int n_vecs) michael@0: { michael@0: struct evbuffer_chain *chain, **chainp; michael@0: int n = -1; michael@0: michael@0: EVBUFFER_LOCK(buf); michael@0: if (buf->freeze_end) michael@0: goto done; michael@0: if (n_vecs < 1) michael@0: goto done; michael@0: if (n_vecs == 1) { michael@0: if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL) michael@0: goto done; michael@0: michael@0: vec[0].iov_base = CHAIN_SPACE_PTR(chain); michael@0: vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain); michael@0: EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size); michael@0: n = 1; michael@0: } else { michael@0: if (_evbuffer_expand_fast(buf, size, n_vecs)<0) michael@0: goto done; michael@0: n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs, michael@0: &chainp, 0); michael@0: } michael@0: michael@0: done: michael@0: EVBUFFER_UNLOCK(buf); michael@0: return n; michael@0: michael@0: } michael@0: michael@0: static int michael@0: advance_last_with_data(struct evbuffer *buf) michael@0: { michael@0: int n = 0; michael@0: ASSERT_EVBUFFER_LOCKED(buf); michael@0: michael@0: if (!*buf->last_with_datap) michael@0: return 0; michael@0: michael@0: while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) { michael@0: buf->last_with_datap = &(*buf->last_with_datap)->next; michael@0: ++n; michael@0: } michael@0: return n; michael@0: } michael@0: michael@0: int michael@0: evbuffer_commit_space(struct evbuffer *buf, michael@0: struct evbuffer_iovec *vec, int n_vecs) michael@0: { michael@0: struct evbuffer_chain *chain, **firstchainp, **chainp; michael@0: int result = -1; michael@0: size_t added = 0; michael@0: int i; michael@0: michael@0: EVBUFFER_LOCK(buf); michael@0: michael@0: if (buf->freeze_end) michael@0: goto done; michael@0: if (n_vecs == 0) { michael@0: result = 0; michael@0: goto done; michael@0: } else if (n_vecs == 1 && michael@0: (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) { michael@0: /* The user only got or used one chain; it might not michael@0: * be the first one with space in it. */ michael@0: if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last)) michael@0: goto done; michael@0: buf->last->off += vec[0].iov_len; michael@0: added = vec[0].iov_len; michael@0: if (added) michael@0: advance_last_with_data(buf); michael@0: goto okay; michael@0: } michael@0: michael@0: /* Advance 'firstchain' to the first chain with space in it. */ michael@0: firstchainp = buf->last_with_datap; michael@0: if (!*firstchainp) michael@0: goto done; michael@0: if (CHAIN_SPACE_LEN(*firstchainp) == 0) { michael@0: firstchainp = &(*firstchainp)->next; michael@0: } michael@0: michael@0: chain = *firstchainp; michael@0: /* pass 1: make sure that the pointers and lengths of vecs[] are in michael@0: * bounds before we try to commit anything. */ michael@0: for (i=0; i CHAIN_SPACE_LEN(chain)) michael@0: goto done; michael@0: chain = chain->next; michael@0: } michael@0: /* pass 2: actually adjust all the chains. */ michael@0: chainp = firstchainp; michael@0: for (i=0; ioff += vec[i].iov_len; michael@0: added += vec[i].iov_len; michael@0: if (vec[i].iov_len) { michael@0: buf->last_with_datap = chainp; michael@0: } michael@0: chainp = &(*chainp)->next; michael@0: } michael@0: michael@0: okay: michael@0: buf->total_len += added; michael@0: buf->n_add_for_cb += added; michael@0: result = 0; michael@0: evbuffer_invoke_callbacks(buf); michael@0: michael@0: done: michael@0: EVBUFFER_UNLOCK(buf); michael@0: return result; michael@0: } michael@0: michael@0: static inline int michael@0: HAS_PINNED_R(struct evbuffer *buf) michael@0: { michael@0: return (buf->last && CHAIN_PINNED_R(buf->last)); michael@0: } michael@0: michael@0: static inline void michael@0: ZERO_CHAIN(struct evbuffer *dst) michael@0: { michael@0: ASSERT_EVBUFFER_LOCKED(dst); michael@0: dst->first = NULL; michael@0: dst->last = NULL; michael@0: dst->last_with_datap = &(dst)->first; michael@0: dst->total_len = 0; michael@0: } michael@0: michael@0: /* Prepares the contents of src to be moved to another buffer by removing michael@0: * read-pinned chains. The first pinned chain is saved in first, and the michael@0: * last in last. If src has no read-pinned chains, first and last are set michael@0: * to NULL. */ michael@0: static int michael@0: PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first, michael@0: struct evbuffer_chain **last) michael@0: { michael@0: struct evbuffer_chain *chain, **pinned; michael@0: michael@0: ASSERT_EVBUFFER_LOCKED(src); michael@0: michael@0: if (!HAS_PINNED_R(src)) { michael@0: *first = *last = NULL; michael@0: return 0; michael@0: } michael@0: michael@0: pinned = src->last_with_datap; michael@0: if (!CHAIN_PINNED_R(*pinned)) michael@0: pinned = &(*pinned)->next; michael@0: EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned)); michael@0: chain = *first = *pinned; michael@0: *last = src->last; michael@0: michael@0: /* If there's data in the first pinned chain, we need to allocate michael@0: * a new chain and copy the data over. */ michael@0: if (chain->off) { michael@0: struct evbuffer_chain *tmp; michael@0: michael@0: EVUTIL_ASSERT(pinned == src->last_with_datap); michael@0: tmp = evbuffer_chain_new(chain->off); michael@0: if (!tmp) michael@0: return -1; michael@0: memcpy(tmp->buffer, chain->buffer + chain->misalign, michael@0: chain->off); michael@0: tmp->off = chain->off; michael@0: *src->last_with_datap = tmp; michael@0: src->last = tmp; michael@0: chain->misalign += chain->off; michael@0: chain->off = 0; michael@0: } else { michael@0: src->last = *src->last_with_datap; michael@0: *pinned = NULL; michael@0: } michael@0: michael@0: return 0; michael@0: } michael@0: michael@0: static inline void michael@0: RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned, michael@0: struct evbuffer_chain *last) michael@0: { michael@0: ASSERT_EVBUFFER_LOCKED(src); michael@0: michael@0: if (!pinned) { michael@0: ZERO_CHAIN(src); michael@0: return; michael@0: } michael@0: michael@0: src->first = pinned; michael@0: src->last = last; michael@0: src->last_with_datap = &src->first; michael@0: src->total_len = 0; michael@0: } michael@0: michael@0: static inline void michael@0: COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src) michael@0: { michael@0: ASSERT_EVBUFFER_LOCKED(dst); michael@0: ASSERT_EVBUFFER_LOCKED(src); michael@0: dst->first = src->first; michael@0: if (src->last_with_datap == &src->first) michael@0: dst->last_with_datap = &dst->first; michael@0: else michael@0: dst->last_with_datap = src->last_with_datap; michael@0: dst->last = src->last; michael@0: dst->total_len = src->total_len; michael@0: } michael@0: michael@0: static void michael@0: APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) michael@0: { michael@0: ASSERT_EVBUFFER_LOCKED(dst); michael@0: ASSERT_EVBUFFER_LOCKED(src); michael@0: dst->last->next = src->first; michael@0: if (src->last_with_datap == &src->first) michael@0: dst->last_with_datap = &dst->last->next; michael@0: else michael@0: dst->last_with_datap = src->last_with_datap; michael@0: dst->last = src->last; michael@0: dst->total_len += src->total_len; michael@0: } michael@0: michael@0: static void michael@0: PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) michael@0: { michael@0: ASSERT_EVBUFFER_LOCKED(dst); michael@0: ASSERT_EVBUFFER_LOCKED(src); michael@0: src->last->next = dst->first; michael@0: dst->first = src->first; michael@0: dst->total_len += src->total_len; michael@0: if (*dst->last_with_datap == NULL) { michael@0: if (src->last_with_datap == &(src)->first) michael@0: dst->last_with_datap = &dst->first; michael@0: else michael@0: dst->last_with_datap = src->last_with_datap; michael@0: } else if (dst->last_with_datap == &dst->first) { michael@0: dst->last_with_datap = &src->last->next; michael@0: } michael@0: } michael@0: michael@0: int michael@0: evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) michael@0: { michael@0: struct evbuffer_chain *pinned, *last; michael@0: size_t in_total_len, out_total_len; michael@0: int result = 0; michael@0: michael@0: EVBUFFER_LOCK2(inbuf, outbuf); michael@0: in_total_len = inbuf->total_len; michael@0: out_total_len = outbuf->total_len; michael@0: michael@0: if (in_total_len == 0 || outbuf == inbuf) michael@0: goto done; michael@0: michael@0: if (outbuf->freeze_end || inbuf->freeze_start) { michael@0: result = -1; michael@0: goto done; michael@0: } michael@0: michael@0: if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { michael@0: result = -1; michael@0: goto done; michael@0: } michael@0: michael@0: if (out_total_len == 0) { michael@0: /* There might be an empty chain at the start of outbuf; free michael@0: * it. */ michael@0: evbuffer_free_all_chains(outbuf->first); michael@0: COPY_CHAIN(outbuf, inbuf); michael@0: } else { michael@0: APPEND_CHAIN(outbuf, inbuf); michael@0: } michael@0: michael@0: RESTORE_PINNED(inbuf, pinned, last); michael@0: michael@0: inbuf->n_del_for_cb += in_total_len; michael@0: outbuf->n_add_for_cb += in_total_len; michael@0: michael@0: evbuffer_invoke_callbacks(inbuf); michael@0: evbuffer_invoke_callbacks(outbuf); michael@0: michael@0: done: michael@0: EVBUFFER_UNLOCK2(inbuf, outbuf); michael@0: return result; michael@0: } michael@0: michael@0: int michael@0: evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) michael@0: { michael@0: struct evbuffer_chain *pinned, *last; michael@0: size_t in_total_len, out_total_len; michael@0: int result = 0; michael@0: michael@0: EVBUFFER_LOCK2(inbuf, outbuf); michael@0: michael@0: in_total_len = inbuf->total_len; michael@0: out_total_len = outbuf->total_len; michael@0: michael@0: if (!in_total_len || inbuf == outbuf) michael@0: goto done; michael@0: michael@0: if (outbuf->freeze_start || inbuf->freeze_start) { michael@0: result = -1; michael@0: goto done; michael@0: } michael@0: michael@0: if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { michael@0: result = -1; michael@0: goto done; michael@0: } michael@0: michael@0: if (out_total_len == 0) { michael@0: /* There might be an empty chain at the start of outbuf; free michael@0: * it. */ michael@0: evbuffer_free_all_chains(outbuf->first); michael@0: COPY_CHAIN(outbuf, inbuf); michael@0: } else { michael@0: PREPEND_CHAIN(outbuf, inbuf); michael@0: } michael@0: michael@0: RESTORE_PINNED(inbuf, pinned, last); michael@0: michael@0: inbuf->n_del_for_cb += in_total_len; michael@0: outbuf->n_add_for_cb += in_total_len; michael@0: michael@0: evbuffer_invoke_callbacks(inbuf); michael@0: evbuffer_invoke_callbacks(outbuf); michael@0: done: michael@0: EVBUFFER_UNLOCK2(inbuf, outbuf); michael@0: return result; michael@0: } michael@0: michael@0: int michael@0: evbuffer_drain(struct evbuffer *buf, size_t len) michael@0: { michael@0: struct evbuffer_chain *chain, *next; michael@0: size_t remaining, old_len; michael@0: int result = 0; michael@0: michael@0: EVBUFFER_LOCK(buf); michael@0: old_len = buf->total_len; michael@0: michael@0: if (old_len == 0) michael@0: goto done; michael@0: michael@0: if (buf->freeze_start) { michael@0: result = -1; michael@0: goto done; michael@0: } michael@0: michael@0: if (len >= old_len && !HAS_PINNED_R(buf)) { michael@0: len = old_len; michael@0: for (chain = buf->first; chain != NULL; chain = next) { michael@0: next = chain->next; michael@0: evbuffer_chain_free(chain); michael@0: } michael@0: michael@0: ZERO_CHAIN(buf); michael@0: } else { michael@0: if (len >= old_len) michael@0: len = old_len; michael@0: michael@0: buf->total_len -= len; michael@0: remaining = len; michael@0: for (chain = buf->first; michael@0: remaining >= chain->off; michael@0: chain = next) { michael@0: next = chain->next; michael@0: remaining -= chain->off; michael@0: michael@0: if (chain == *buf->last_with_datap) { michael@0: buf->last_with_datap = &buf->first; michael@0: } michael@0: if (&chain->next == buf->last_with_datap) michael@0: buf->last_with_datap = &buf->first; michael@0: michael@0: if (CHAIN_PINNED_R(chain)) { michael@0: EVUTIL_ASSERT(remaining == 0); michael@0: chain->misalign += chain->off; michael@0: chain->off = 0; michael@0: break; michael@0: } else michael@0: evbuffer_chain_free(chain); michael@0: } michael@0: michael@0: buf->first = chain; michael@0: if (chain) { michael@0: chain->misalign += remaining; michael@0: chain->off -= remaining; michael@0: } michael@0: } michael@0: michael@0: buf->n_del_for_cb += len; michael@0: /* Tell someone about changes in this buffer */ michael@0: evbuffer_invoke_callbacks(buf); michael@0: michael@0: done: michael@0: EVBUFFER_UNLOCK(buf); michael@0: return result; michael@0: } michael@0: michael@0: /* Reads data from an event buffer and drains the bytes read */ michael@0: int michael@0: evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen) michael@0: { michael@0: ev_ssize_t n; michael@0: EVBUFFER_LOCK(buf); michael@0: n = evbuffer_copyout(buf, data_out, datlen); michael@0: if (n > 0) { michael@0: if (evbuffer_drain(buf, n)<0) michael@0: n = -1; michael@0: } michael@0: EVBUFFER_UNLOCK(buf); michael@0: return (int)n; michael@0: } michael@0: michael@0: ev_ssize_t michael@0: evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen) michael@0: { michael@0: /*XXX fails badly on sendfile case. */ michael@0: struct evbuffer_chain *chain; michael@0: char *data = data_out; michael@0: size_t nread; michael@0: ev_ssize_t result = 0; michael@0: michael@0: EVBUFFER_LOCK(buf); michael@0: michael@0: chain = buf->first; michael@0: michael@0: if (datlen >= buf->total_len) michael@0: datlen = buf->total_len; michael@0: michael@0: if (datlen == 0) michael@0: goto done; michael@0: michael@0: if (buf->freeze_start) { michael@0: result = -1; michael@0: goto done; michael@0: } michael@0: michael@0: nread = datlen; michael@0: michael@0: while (datlen && datlen >= chain->off) { michael@0: memcpy(data, chain->buffer + chain->misalign, chain->off); michael@0: data += chain->off; michael@0: datlen -= chain->off; michael@0: michael@0: chain = chain->next; michael@0: EVUTIL_ASSERT(chain || datlen==0); michael@0: } michael@0: michael@0: if (datlen) { michael@0: EVUTIL_ASSERT(chain); michael@0: memcpy(data, chain->buffer + chain->misalign, datlen); michael@0: } michael@0: michael@0: result = nread; michael@0: done: michael@0: EVBUFFER_UNLOCK(buf); michael@0: return result; michael@0: } michael@0: michael@0: /* reads data from the src buffer to the dst buffer, avoids memcpy as michael@0: * possible. */ michael@0: /* XXXX should return ev_ssize_t */ michael@0: int michael@0: evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst, michael@0: size_t datlen) michael@0: { michael@0: /*XXX We should have an option to force this to be zero-copy.*/ michael@0: michael@0: /*XXX can fail badly on sendfile case. */ michael@0: struct evbuffer_chain *chain, *previous; michael@0: size_t nread = 0; michael@0: int result; michael@0: michael@0: EVBUFFER_LOCK2(src, dst); michael@0: michael@0: chain = previous = src->first; michael@0: michael@0: if (datlen == 0 || dst == src) { michael@0: result = 0; michael@0: goto done; michael@0: } michael@0: michael@0: if (dst->freeze_end || src->freeze_start) { michael@0: result = -1; michael@0: goto done; michael@0: } michael@0: michael@0: /* short-cut if there is no more data buffered */ michael@0: if (datlen >= src->total_len) { michael@0: datlen = src->total_len; michael@0: evbuffer_add_buffer(dst, src); michael@0: result = (int)datlen; /*XXXX should return ev_ssize_t*/ michael@0: goto done; michael@0: } michael@0: michael@0: /* removes chains if possible */ michael@0: while (chain->off <= datlen) { michael@0: /* We can't remove the last with data from src unless we michael@0: * remove all chains, in which case we would have done the if michael@0: * block above */ michael@0: EVUTIL_ASSERT(chain != *src->last_with_datap); michael@0: nread += chain->off; michael@0: datlen -= chain->off; michael@0: previous = chain; michael@0: if (src->last_with_datap == &chain->next) michael@0: src->last_with_datap = &src->first; michael@0: chain = chain->next; michael@0: } michael@0: michael@0: if (nread) { michael@0: /* we can remove the chain */ michael@0: struct evbuffer_chain **chp; michael@0: chp = evbuffer_free_trailing_empty_chains(dst); michael@0: michael@0: if (dst->first == NULL) { michael@0: dst->first = src->first; michael@0: } else { michael@0: *chp = src->first; michael@0: } michael@0: dst->last = previous; michael@0: previous->next = NULL; michael@0: src->first = chain; michael@0: advance_last_with_data(dst); michael@0: michael@0: dst->total_len += nread; michael@0: dst->n_add_for_cb += nread; michael@0: } michael@0: michael@0: /* we know that there is more data in the src buffer than michael@0: * we want to read, so we manually drain the chain */ michael@0: evbuffer_add(dst, chain->buffer + chain->misalign, datlen); michael@0: chain->misalign += datlen; michael@0: chain->off -= datlen; michael@0: nread += datlen; michael@0: michael@0: /* You might think we would want to increment dst->n_add_for_cb michael@0: * here too. But evbuffer_add above already took care of that. michael@0: */ michael@0: src->total_len -= nread; michael@0: src->n_del_for_cb += nread; michael@0: michael@0: if (nread) { michael@0: evbuffer_invoke_callbacks(dst); michael@0: evbuffer_invoke_callbacks(src); michael@0: } michael@0: result = (int)nread;/*XXXX should change return type */ michael@0: michael@0: done: michael@0: EVBUFFER_UNLOCK2(src, dst); michael@0: return result; michael@0: } michael@0: michael@0: unsigned char * michael@0: evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size) michael@0: { michael@0: struct evbuffer_chain *chain, *next, *tmp, *last_with_data; michael@0: unsigned char *buffer, *result = NULL; michael@0: ev_ssize_t remaining; michael@0: int removed_last_with_data = 0; michael@0: int removed_last_with_datap = 0; michael@0: michael@0: EVBUFFER_LOCK(buf); michael@0: michael@0: chain = buf->first; michael@0: michael@0: if (size < 0) michael@0: size = buf->total_len; michael@0: /* if size > buf->total_len, we cannot guarantee to the user that she michael@0: * is going to have a long enough buffer afterwards; so we return michael@0: * NULL */ michael@0: if (size == 0 || (size_t)size > buf->total_len) michael@0: goto done; michael@0: michael@0: /* No need to pull up anything; the first size bytes are michael@0: * already here. */ michael@0: if (chain->off >= (size_t)size) { michael@0: result = chain->buffer + chain->misalign; michael@0: goto done; michael@0: } michael@0: michael@0: /* Make sure that none of the chains we need to copy from is pinned. */ michael@0: remaining = size - chain->off; michael@0: EVUTIL_ASSERT(remaining >= 0); michael@0: for (tmp=chain->next; tmp; tmp=tmp->next) { michael@0: if (CHAIN_PINNED(tmp)) michael@0: goto done; michael@0: if (tmp->off >= (size_t)remaining) michael@0: break; michael@0: remaining -= tmp->off; michael@0: } michael@0: michael@0: if (CHAIN_PINNED(chain)) { michael@0: size_t old_off = chain->off; michael@0: if (CHAIN_SPACE_LEN(chain) < size - chain->off) { michael@0: /* not enough room at end of chunk. */ michael@0: goto done; michael@0: } michael@0: buffer = CHAIN_SPACE_PTR(chain); michael@0: tmp = chain; michael@0: tmp->off = size; michael@0: size -= old_off; michael@0: chain = chain->next; michael@0: } else if (chain->buffer_len - chain->misalign >= (size_t)size) { michael@0: /* already have enough space in the first chain */ michael@0: size_t old_off = chain->off; michael@0: buffer = chain->buffer + chain->misalign + chain->off; michael@0: tmp = chain; michael@0: tmp->off = size; michael@0: size -= old_off; michael@0: chain = chain->next; michael@0: } else { michael@0: if ((tmp = evbuffer_chain_new(size)) == NULL) { michael@0: event_warn("%s: out of memory", __func__); michael@0: goto done; michael@0: } michael@0: buffer = tmp->buffer; michael@0: tmp->off = size; michael@0: buf->first = tmp; michael@0: } michael@0: michael@0: /* TODO(niels): deal with buffers that point to NULL like sendfile */ michael@0: michael@0: /* Copy and free every chunk that will be entirely pulled into tmp */ michael@0: last_with_data = *buf->last_with_datap; michael@0: for (; chain != NULL && (size_t)size >= chain->off; chain = next) { michael@0: next = chain->next; michael@0: michael@0: memcpy(buffer, chain->buffer + chain->misalign, chain->off); michael@0: size -= chain->off; michael@0: buffer += chain->off; michael@0: if (chain == last_with_data) michael@0: removed_last_with_data = 1; michael@0: if (&chain->next == buf->last_with_datap) michael@0: removed_last_with_datap = 1; michael@0: michael@0: evbuffer_chain_free(chain); michael@0: } michael@0: michael@0: if (chain != NULL) { michael@0: memcpy(buffer, chain->buffer + chain->misalign, size); michael@0: chain->misalign += size; michael@0: chain->off -= size; michael@0: } else { michael@0: buf->last = tmp; michael@0: } michael@0: michael@0: tmp->next = chain; michael@0: michael@0: if (removed_last_with_data) { michael@0: buf->last_with_datap = &buf->first; michael@0: } else if (removed_last_with_datap) { michael@0: if (buf->first->next && buf->first->next->off) michael@0: buf->last_with_datap = &buf->first->next; michael@0: else michael@0: buf->last_with_datap = &buf->first; michael@0: } michael@0: michael@0: result = (tmp->buffer + tmp->misalign); michael@0: michael@0: done: michael@0: EVBUFFER_UNLOCK(buf); michael@0: return result; michael@0: } michael@0: michael@0: /* michael@0: * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'. michael@0: * The returned buffer needs to be freed by the called. michael@0: */ michael@0: char * michael@0: evbuffer_readline(struct evbuffer *buffer) michael@0: { michael@0: return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY); michael@0: } michael@0: michael@0: static inline ev_ssize_t michael@0: evbuffer_strchr(struct evbuffer_ptr *it, const char chr) michael@0: { michael@0: struct evbuffer_chain *chain = it->_internal.chain; michael@0: size_t i = it->_internal.pos_in_chain; michael@0: while (chain != NULL) { michael@0: char *buffer = (char *)chain->buffer + chain->misalign; michael@0: char *cp = memchr(buffer+i, chr, chain->off-i); michael@0: if (cp) { michael@0: it->_internal.chain = chain; michael@0: it->_internal.pos_in_chain = cp - buffer; michael@0: it->pos += (cp - buffer - i); michael@0: return it->pos; michael@0: } michael@0: it->pos += chain->off - i; michael@0: i = 0; michael@0: chain = chain->next; michael@0: } michael@0: michael@0: return (-1); michael@0: } michael@0: michael@0: static inline char * michael@0: find_eol_char(char *s, size_t len) michael@0: { michael@0: #define CHUNK_SZ 128 michael@0: /* Lots of benchmarking found this approach to be faster in practice michael@0: * than doing two memchrs over the whole buffer, doin a memchr on each michael@0: * char of the buffer, or trying to emulate memchr by hand. */ michael@0: char *s_end, *cr, *lf; michael@0: s_end = s+len; michael@0: while (s < s_end) { michael@0: size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s); michael@0: cr = memchr(s, '\r', chunk); michael@0: lf = memchr(s, '\n', chunk); michael@0: if (cr) { michael@0: if (lf && lf < cr) michael@0: return lf; michael@0: return cr; michael@0: } else if (lf) { michael@0: return lf; michael@0: } michael@0: s += CHUNK_SZ; michael@0: } michael@0: michael@0: return NULL; michael@0: #undef CHUNK_SZ michael@0: } michael@0: michael@0: static ev_ssize_t michael@0: evbuffer_find_eol_char(struct evbuffer_ptr *it) michael@0: { michael@0: struct evbuffer_chain *chain = it->_internal.chain; michael@0: size_t i = it->_internal.pos_in_chain; michael@0: while (chain != NULL) { michael@0: char *buffer = (char *)chain->buffer + chain->misalign; michael@0: char *cp = find_eol_char(buffer+i, chain->off-i); michael@0: if (cp) { michael@0: it->_internal.chain = chain; michael@0: it->_internal.pos_in_chain = cp - buffer; michael@0: it->pos += (cp - buffer) - i; michael@0: return it->pos; michael@0: } michael@0: it->pos += chain->off - i; michael@0: i = 0; michael@0: chain = chain->next; michael@0: } michael@0: michael@0: return (-1); michael@0: } michael@0: michael@0: static inline int michael@0: evbuffer_strspn( michael@0: struct evbuffer_ptr *ptr, const char *chrset) michael@0: { michael@0: int count = 0; michael@0: struct evbuffer_chain *chain = ptr->_internal.chain; michael@0: size_t i = ptr->_internal.pos_in_chain; michael@0: michael@0: if (!chain) michael@0: return -1; michael@0: michael@0: while (1) { michael@0: char *buffer = (char *)chain->buffer + chain->misalign; michael@0: for (; i < chain->off; ++i) { michael@0: const char *p = chrset; michael@0: while (*p) { michael@0: if (buffer[i] == *p++) michael@0: goto next; michael@0: } michael@0: ptr->_internal.chain = chain; michael@0: ptr->_internal.pos_in_chain = i; michael@0: ptr->pos += count; michael@0: return count; michael@0: next: michael@0: ++count; michael@0: } michael@0: i = 0; michael@0: michael@0: if (! chain->next) { michael@0: ptr->_internal.chain = chain; michael@0: ptr->_internal.pos_in_chain = i; michael@0: ptr->pos += count; michael@0: return count; michael@0: } michael@0: michael@0: chain = chain->next; michael@0: } michael@0: } michael@0: michael@0: michael@0: static inline char michael@0: evbuffer_getchr(struct evbuffer_ptr *it) michael@0: { michael@0: struct evbuffer_chain *chain = it->_internal.chain; michael@0: size_t off = it->_internal.pos_in_chain; michael@0: michael@0: return chain->buffer[chain->misalign + off]; michael@0: } michael@0: michael@0: struct evbuffer_ptr michael@0: evbuffer_search_eol(struct evbuffer *buffer, michael@0: struct evbuffer_ptr *start, size_t *eol_len_out, michael@0: enum evbuffer_eol_style eol_style) michael@0: { michael@0: struct evbuffer_ptr it, it2; michael@0: size_t extra_drain = 0; michael@0: int ok = 0; michael@0: michael@0: EVBUFFER_LOCK(buffer); michael@0: michael@0: if (start) { michael@0: memcpy(&it, start, sizeof(it)); michael@0: } else { michael@0: it.pos = 0; michael@0: it._internal.chain = buffer->first; michael@0: it._internal.pos_in_chain = 0; michael@0: } michael@0: michael@0: /* the eol_style determines our first stop character and how many michael@0: * characters we are going to drain afterwards. */ michael@0: switch (eol_style) { michael@0: case EVBUFFER_EOL_ANY: michael@0: if (evbuffer_find_eol_char(&it) < 0) michael@0: goto done; michael@0: memcpy(&it2, &it, sizeof(it)); michael@0: extra_drain = evbuffer_strspn(&it2, "\r\n"); michael@0: break; michael@0: case EVBUFFER_EOL_CRLF_STRICT: { michael@0: it = evbuffer_search(buffer, "\r\n", 2, &it); michael@0: if (it.pos < 0) michael@0: goto done; michael@0: extra_drain = 2; michael@0: break; michael@0: } michael@0: case EVBUFFER_EOL_CRLF: michael@0: while (1) { michael@0: if (evbuffer_find_eol_char(&it) < 0) michael@0: goto done; michael@0: if (evbuffer_getchr(&it) == '\n') { michael@0: extra_drain = 1; michael@0: break; michael@0: } else if (!evbuffer_ptr_memcmp( michael@0: buffer, &it, "\r\n", 2)) { michael@0: extra_drain = 2; michael@0: break; michael@0: } else { michael@0: if (evbuffer_ptr_set(buffer, &it, 1, michael@0: EVBUFFER_PTR_ADD)<0) michael@0: goto done; michael@0: } michael@0: } michael@0: break; michael@0: case EVBUFFER_EOL_LF: michael@0: if (evbuffer_strchr(&it, '\n') < 0) michael@0: goto done; michael@0: extra_drain = 1; michael@0: break; michael@0: default: michael@0: goto done; michael@0: } michael@0: michael@0: ok = 1; michael@0: done: michael@0: EVBUFFER_UNLOCK(buffer); michael@0: michael@0: if (!ok) { michael@0: it.pos = -1; michael@0: } michael@0: if (eol_len_out) michael@0: *eol_len_out = extra_drain; michael@0: michael@0: return it; michael@0: } michael@0: michael@0: char * michael@0: evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out, michael@0: enum evbuffer_eol_style eol_style) michael@0: { michael@0: struct evbuffer_ptr it; michael@0: char *line; michael@0: size_t n_to_copy=0, extra_drain=0; michael@0: char *result = NULL; michael@0: michael@0: EVBUFFER_LOCK(buffer); michael@0: michael@0: if (buffer->freeze_start) { michael@0: goto done; michael@0: } michael@0: michael@0: it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style); michael@0: if (it.pos < 0) michael@0: goto done; michael@0: n_to_copy = it.pos; michael@0: michael@0: if ((line = mm_malloc(n_to_copy+1)) == NULL) { michael@0: event_warn("%s: out of memory", __func__); michael@0: goto done; michael@0: } michael@0: michael@0: evbuffer_remove(buffer, line, n_to_copy); michael@0: line[n_to_copy] = '\0'; michael@0: michael@0: evbuffer_drain(buffer, extra_drain); michael@0: result = line; michael@0: done: michael@0: EVBUFFER_UNLOCK(buffer); michael@0: michael@0: if (n_read_out) michael@0: *n_read_out = result ? n_to_copy : 0; michael@0: michael@0: return result; michael@0: } michael@0: michael@0: #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096 michael@0: michael@0: /* Adds data to an event buffer */ michael@0: michael@0: int michael@0: evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen) michael@0: { michael@0: struct evbuffer_chain *chain, *tmp; michael@0: const unsigned char *data = data_in; michael@0: size_t remain, to_alloc; michael@0: int result = -1; michael@0: michael@0: EVBUFFER_LOCK(buf); michael@0: michael@0: if (buf->freeze_end) { michael@0: goto done; michael@0: } michael@0: michael@0: chain = buf->last; michael@0: michael@0: /* If there are no chains allocated for this buffer, allocate one michael@0: * big enough to hold all the data. */ michael@0: if (chain == NULL) { michael@0: chain = evbuffer_chain_new(datlen); michael@0: if (!chain) michael@0: goto done; michael@0: evbuffer_chain_insert(buf, chain); michael@0: } michael@0: michael@0: if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { michael@0: remain = (size_t)(chain->buffer_len - chain->misalign - chain->off); michael@0: if (remain >= datlen) { michael@0: /* there's enough space to hold all the data in the michael@0: * current last chain */ michael@0: memcpy(chain->buffer + chain->misalign + chain->off, michael@0: data, datlen); michael@0: chain->off += datlen; michael@0: buf->total_len += datlen; michael@0: buf->n_add_for_cb += datlen; michael@0: goto out; michael@0: } else if (!CHAIN_PINNED(chain) && michael@0: evbuffer_chain_should_realign(chain, datlen)) { michael@0: /* we can fit the data into the misalignment */ michael@0: evbuffer_chain_align(chain); michael@0: michael@0: memcpy(chain->buffer + chain->off, data, datlen); michael@0: chain->off += datlen; michael@0: buf->total_len += datlen; michael@0: buf->n_add_for_cb += datlen; michael@0: goto out; michael@0: } michael@0: } else { michael@0: /* we cannot write any data to the last chain */ michael@0: remain = 0; michael@0: } michael@0: michael@0: /* we need to add another chain */ michael@0: to_alloc = chain->buffer_len; michael@0: if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2) michael@0: to_alloc <<= 1; michael@0: if (datlen > to_alloc) michael@0: to_alloc = datlen; michael@0: tmp = evbuffer_chain_new(to_alloc); michael@0: if (tmp == NULL) michael@0: goto done; michael@0: michael@0: if (remain) { michael@0: memcpy(chain->buffer + chain->misalign + chain->off, michael@0: data, remain); michael@0: chain->off += remain; michael@0: buf->total_len += remain; michael@0: buf->n_add_for_cb += remain; michael@0: } michael@0: michael@0: data += remain; michael@0: datlen -= remain; michael@0: michael@0: memcpy(tmp->buffer, data, datlen); michael@0: tmp->off = datlen; michael@0: evbuffer_chain_insert(buf, tmp); michael@0: buf->n_add_for_cb += datlen; michael@0: michael@0: out: michael@0: evbuffer_invoke_callbacks(buf); michael@0: result = 0; michael@0: done: michael@0: EVBUFFER_UNLOCK(buf); michael@0: return result; michael@0: } michael@0: michael@0: int michael@0: evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen) michael@0: { michael@0: struct evbuffer_chain *chain, *tmp; michael@0: int result = -1; michael@0: michael@0: EVBUFFER_LOCK(buf); michael@0: michael@0: if (buf->freeze_start) { michael@0: goto done; michael@0: } michael@0: michael@0: chain = buf->first; michael@0: michael@0: if (chain == NULL) { michael@0: chain = evbuffer_chain_new(datlen); michael@0: if (!chain) michael@0: goto done; michael@0: evbuffer_chain_insert(buf, chain); michael@0: } michael@0: michael@0: /* we cannot touch immutable buffers */ michael@0: if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { michael@0: /* If this chain is empty, we can treat it as michael@0: * 'empty at the beginning' rather than 'empty at the end' */ michael@0: if (chain->off == 0) michael@0: chain->misalign = chain->buffer_len; michael@0: michael@0: if ((size_t)chain->misalign >= datlen) { michael@0: /* we have enough space to fit everything */ michael@0: memcpy(chain->buffer + chain->misalign - datlen, michael@0: data, datlen); michael@0: chain->off += datlen; michael@0: chain->misalign -= datlen; michael@0: buf->total_len += datlen; michael@0: buf->n_add_for_cb += datlen; michael@0: goto out; michael@0: } else if (chain->misalign) { michael@0: /* we can only fit some of the data. */ michael@0: memcpy(chain->buffer, michael@0: (char*)data + datlen - chain->misalign, michael@0: (size_t)chain->misalign); michael@0: chain->off += (size_t)chain->misalign; michael@0: buf->total_len += (size_t)chain->misalign; michael@0: buf->n_add_for_cb += (size_t)chain->misalign; michael@0: datlen -= (size_t)chain->misalign; michael@0: chain->misalign = 0; michael@0: } michael@0: } michael@0: michael@0: /* we need to add another chain */ michael@0: if ((tmp = evbuffer_chain_new(datlen)) == NULL) michael@0: goto done; michael@0: buf->first = tmp; michael@0: if (buf->last_with_datap == &buf->first) michael@0: buf->last_with_datap = &tmp->next; michael@0: michael@0: tmp->next = chain; michael@0: michael@0: tmp->off = datlen; michael@0: tmp->misalign = tmp->buffer_len - datlen; michael@0: michael@0: memcpy(tmp->buffer + tmp->misalign, data, datlen); michael@0: buf->total_len += datlen; michael@0: buf->n_add_for_cb += (size_t)chain->misalign; michael@0: michael@0: out: michael@0: evbuffer_invoke_callbacks(buf); michael@0: result = 0; michael@0: done: michael@0: EVBUFFER_UNLOCK(buf); michael@0: return result; michael@0: } michael@0: michael@0: /** Helper: realigns the memory in chain->buffer so that misalign is 0. */ michael@0: static void michael@0: evbuffer_chain_align(struct evbuffer_chain *chain) michael@0: { michael@0: EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE)); michael@0: EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY)); michael@0: memmove(chain->buffer, chain->buffer + chain->misalign, chain->off); michael@0: chain->misalign = 0; michael@0: } michael@0: michael@0: #define MAX_TO_COPY_IN_EXPAND 4096 michael@0: #define MAX_TO_REALIGN_IN_EXPAND 2048 michael@0: michael@0: /** Helper: return true iff we should realign chain to fit datalen bytes of michael@0: data in it. */ michael@0: static int michael@0: evbuffer_chain_should_realign(struct evbuffer_chain *chain, michael@0: size_t datlen) michael@0: { michael@0: return chain->buffer_len - chain->off >= datlen && michael@0: (chain->off < chain->buffer_len / 2) && michael@0: (chain->off <= MAX_TO_REALIGN_IN_EXPAND); michael@0: } michael@0: michael@0: /* Expands the available space in the event buffer to at least datlen, all in michael@0: * a single chunk. Return that chunk. */ michael@0: static struct evbuffer_chain * michael@0: evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen) michael@0: { michael@0: struct evbuffer_chain *chain, **chainp; michael@0: struct evbuffer_chain *result = NULL; michael@0: ASSERT_EVBUFFER_LOCKED(buf); michael@0: michael@0: chainp = buf->last_with_datap; michael@0: michael@0: /* XXX If *chainp is no longer writeable, but has enough space in its michael@0: * misalign, this might be a bad idea: we could still use *chainp, not michael@0: * (*chainp)->next. */ michael@0: if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0) michael@0: chainp = &(*chainp)->next; michael@0: michael@0: /* 'chain' now points to the first chain with writable space (if any) michael@0: * We will either use it, realign it, replace it, or resize it. */ michael@0: chain = *chainp; michael@0: michael@0: if (chain == NULL || michael@0: (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { michael@0: /* We can't use the last_with_data chain at all. Just add a michael@0: * new one that's big enough. */ michael@0: goto insert_new; michael@0: } michael@0: michael@0: /* If we can fit all the data, then we don't have to do anything */ michael@0: if (CHAIN_SPACE_LEN(chain) >= datlen) { michael@0: result = chain; michael@0: goto ok; michael@0: } michael@0: michael@0: /* If the chain is completely empty, just replace it by adding a new michael@0: * empty chain. */ michael@0: if (chain->off == 0) { michael@0: goto insert_new; michael@0: } michael@0: michael@0: /* If the misalignment plus the remaining space fulfills our data michael@0: * needs, we could just force an alignment to happen. Afterwards, we michael@0: * have enough space. But only do this if we're saving a lot of space michael@0: * and not moving too much data. Otherwise the space savings are michael@0: * probably offset by the time lost in copying. michael@0: */ michael@0: if (evbuffer_chain_should_realign(chain, datlen)) { michael@0: evbuffer_chain_align(chain); michael@0: result = chain; michael@0: goto ok; michael@0: } michael@0: michael@0: /* At this point, we can either resize the last chunk with space in michael@0: * it, use the next chunk after it, or If we add a new chunk, we waste michael@0: * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we michael@0: * resize, we have to copy chain->off bytes. michael@0: */ michael@0: michael@0: /* Would expanding this chunk be affordable and worthwhile? */ michael@0: if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 || michael@0: chain->off > MAX_TO_COPY_IN_EXPAND) { michael@0: /* It's not worth resizing this chain. Can the next one be michael@0: * used? */ michael@0: if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { michael@0: /* Yes, we can just use the next chain (which should michael@0: * be empty. */ michael@0: result = chain->next; michael@0: goto ok; michael@0: } else { michael@0: /* No; append a new chain (which will free all michael@0: * terminal empty chains.) */ michael@0: goto insert_new; michael@0: } michael@0: } else { michael@0: /* Okay, we're going to try to resize this chain: Not doing so michael@0: * would waste at least 1/8 of its current allocation, and we michael@0: * can do so without having to copy more than michael@0: * MAX_TO_COPY_IN_EXPAND bytes. */ michael@0: /* figure out how much space we need */ michael@0: size_t length = chain->off + datlen; michael@0: struct evbuffer_chain *tmp = evbuffer_chain_new(length); michael@0: if (tmp == NULL) michael@0: goto err; michael@0: michael@0: /* copy the data over that we had so far */ michael@0: tmp->off = chain->off; michael@0: memcpy(tmp->buffer, chain->buffer + chain->misalign, michael@0: chain->off); michael@0: /* fix up the list */ michael@0: EVUTIL_ASSERT(*chainp == chain); michael@0: result = *chainp = tmp; michael@0: michael@0: if (buf->last == chain) michael@0: buf->last = tmp; michael@0: michael@0: tmp->next = chain->next; michael@0: evbuffer_chain_free(chain); michael@0: goto ok; michael@0: } michael@0: michael@0: insert_new: michael@0: result = evbuffer_chain_insert_new(buf, datlen); michael@0: if (!result) michael@0: goto err; michael@0: ok: michael@0: EVUTIL_ASSERT(result); michael@0: EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen); michael@0: err: michael@0: return result; michael@0: } michael@0: michael@0: /* Make sure that datlen bytes are available for writing in the last n michael@0: * chains. Never copies or moves data. */ michael@0: int michael@0: _evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n) michael@0: { michael@0: struct evbuffer_chain *chain = buf->last, *tmp, *next; michael@0: size_t avail; michael@0: int used; michael@0: michael@0: ASSERT_EVBUFFER_LOCKED(buf); michael@0: EVUTIL_ASSERT(n >= 2); michael@0: michael@0: if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { michael@0: /* There is no last chunk, or we can't touch the last chunk. michael@0: * Just add a new chunk. */ michael@0: chain = evbuffer_chain_new(datlen); michael@0: if (chain == NULL) michael@0: return (-1); michael@0: michael@0: evbuffer_chain_insert(buf, chain); michael@0: return (0); michael@0: } michael@0: michael@0: used = 0; /* number of chains we're using space in. */ michael@0: avail = 0; /* how much space they have. */ michael@0: /* How many bytes can we stick at the end of buffer as it is? Iterate michael@0: * over the chains at the end of the buffer, tring to see how much michael@0: * space we have in the first n. */ michael@0: for (chain = *buf->last_with_datap; chain; chain = chain->next) { michael@0: if (chain->off) { michael@0: size_t space = (size_t) CHAIN_SPACE_LEN(chain); michael@0: EVUTIL_ASSERT(chain == *buf->last_with_datap); michael@0: if (space) { michael@0: avail += space; michael@0: ++used; michael@0: } michael@0: } else { michael@0: /* No data in chain; realign it. */ michael@0: chain->misalign = 0; michael@0: avail += chain->buffer_len; michael@0: ++used; michael@0: } michael@0: if (avail >= datlen) { michael@0: /* There is already enough space. Just return */ michael@0: return (0); michael@0: } michael@0: if (used == n) michael@0: break; michael@0: } michael@0: michael@0: /* There wasn't enough space in the first n chains with space in michael@0: * them. Either add a new chain with enough space, or replace all michael@0: * empty chains with one that has enough space, depending on n. */ michael@0: if (used < n) { michael@0: /* The loop ran off the end of the chains before it hit n michael@0: * chains; we can add another. */ michael@0: EVUTIL_ASSERT(chain == NULL); michael@0: michael@0: tmp = evbuffer_chain_new(datlen - avail); michael@0: if (tmp == NULL) michael@0: return (-1); michael@0: michael@0: buf->last->next = tmp; michael@0: buf->last = tmp; michael@0: /* (we would only set last_with_data if we added the first michael@0: * chain. But if the buffer had no chains, we would have michael@0: * just allocated a new chain earlier) */ michael@0: return (0); michael@0: } else { michael@0: /* Nuke _all_ the empty chains. */ michael@0: int rmv_all = 0; /* True iff we removed last_with_data. */ michael@0: chain = *buf->last_with_datap; michael@0: if (!chain->off) { michael@0: EVUTIL_ASSERT(chain == buf->first); michael@0: rmv_all = 1; michael@0: avail = 0; michael@0: } else { michael@0: avail = (size_t) CHAIN_SPACE_LEN(chain); michael@0: chain = chain->next; michael@0: } michael@0: michael@0: michael@0: for (; chain; chain = next) { michael@0: next = chain->next; michael@0: EVUTIL_ASSERT(chain->off == 0); michael@0: evbuffer_chain_free(chain); michael@0: } michael@0: tmp = evbuffer_chain_new(datlen - avail); michael@0: if (tmp == NULL) { michael@0: if (rmv_all) { michael@0: ZERO_CHAIN(buf); michael@0: } else { michael@0: buf->last = *buf->last_with_datap; michael@0: (*buf->last_with_datap)->next = NULL; michael@0: } michael@0: return (-1); michael@0: } michael@0: michael@0: if (rmv_all) { michael@0: buf->first = buf->last = tmp; michael@0: buf->last_with_datap = &buf->first; michael@0: } else { michael@0: (*buf->last_with_datap)->next = tmp; michael@0: buf->last = tmp; michael@0: } michael@0: return (0); michael@0: } michael@0: } michael@0: michael@0: int michael@0: evbuffer_expand(struct evbuffer *buf, size_t datlen) michael@0: { michael@0: struct evbuffer_chain *chain; michael@0: michael@0: EVBUFFER_LOCK(buf); michael@0: chain = evbuffer_expand_singlechain(buf, datlen); michael@0: EVBUFFER_UNLOCK(buf); michael@0: return chain ? 0 : -1; michael@0: } michael@0: michael@0: /* michael@0: * Reads data from a file descriptor into a buffer. michael@0: */ michael@0: michael@0: #if defined(_EVENT_HAVE_SYS_UIO_H) || defined(WIN32) michael@0: #define USE_IOVEC_IMPL michael@0: #endif michael@0: michael@0: #ifdef USE_IOVEC_IMPL michael@0: michael@0: #ifdef _EVENT_HAVE_SYS_UIO_H michael@0: /* number of iovec we use for writev, fragmentation is going to determine michael@0: * how much we end up writing */ michael@0: michael@0: #define DEFAULT_WRITE_IOVEC 128 michael@0: michael@0: #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC michael@0: #define NUM_WRITE_IOVEC UIO_MAXIOV michael@0: #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC michael@0: #define NUM_WRITE_IOVEC IOV_MAX michael@0: #else michael@0: #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC michael@0: #endif michael@0: michael@0: #define IOV_TYPE struct iovec michael@0: #define IOV_PTR_FIELD iov_base michael@0: #define IOV_LEN_FIELD iov_len michael@0: #define IOV_LEN_TYPE size_t michael@0: #else michael@0: #define NUM_WRITE_IOVEC 16 michael@0: #define IOV_TYPE WSABUF michael@0: #define IOV_PTR_FIELD buf michael@0: #define IOV_LEN_FIELD len michael@0: #define IOV_LEN_TYPE unsigned long michael@0: #endif michael@0: #endif michael@0: #define NUM_READ_IOVEC 4 michael@0: michael@0: #define EVBUFFER_MAX_READ 4096 michael@0: michael@0: /** Helper function to figure out which space to use for reading data into michael@0: an evbuffer. Internal use only. michael@0: michael@0: @param buf The buffer to read into michael@0: @param howmuch How much we want to read. michael@0: @param vecs An array of two or more iovecs or WSABUFs. michael@0: @param n_vecs_avail The length of vecs michael@0: @param chainp A pointer to a variable to hold the first chain we're michael@0: reading into. michael@0: @param exact Boolean: if true, we do not provide more than 'howmuch' michael@0: space in the vectors, even if more space is available. michael@0: @return The number of buffers we're using. michael@0: */ michael@0: int michael@0: _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch, michael@0: struct evbuffer_iovec *vecs, int n_vecs_avail, michael@0: struct evbuffer_chain ***chainp, int exact) michael@0: { michael@0: struct evbuffer_chain *chain; michael@0: struct evbuffer_chain **firstchainp; michael@0: size_t so_far; michael@0: int i; michael@0: ASSERT_EVBUFFER_LOCKED(buf); michael@0: michael@0: if (howmuch < 0) michael@0: return -1; michael@0: michael@0: so_far = 0; michael@0: /* Let firstchain be the first chain with any space on it */ michael@0: firstchainp = buf->last_with_datap; michael@0: if (CHAIN_SPACE_LEN(*firstchainp) == 0) { michael@0: firstchainp = &(*firstchainp)->next; michael@0: } michael@0: michael@0: chain = *firstchainp; michael@0: for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) { michael@0: size_t avail = (size_t) CHAIN_SPACE_LEN(chain); michael@0: if (avail > (howmuch - so_far) && exact) michael@0: avail = howmuch - so_far; michael@0: vecs[i].iov_base = CHAIN_SPACE_PTR(chain); michael@0: vecs[i].iov_len = avail; michael@0: so_far += avail; michael@0: chain = chain->next; michael@0: } michael@0: michael@0: *chainp = firstchainp; michael@0: return i; michael@0: } michael@0: michael@0: static int michael@0: get_n_bytes_readable_on_socket(evutil_socket_t fd) michael@0: { michael@0: #if defined(FIONREAD) && defined(WIN32) michael@0: unsigned long lng = EVBUFFER_MAX_READ; michael@0: if (ioctlsocket(fd, FIONREAD, &lng) < 0) michael@0: return -1; michael@0: return (int)lng; michael@0: #elif defined(FIONREAD) michael@0: int n = EVBUFFER_MAX_READ; michael@0: if (ioctl(fd, FIONREAD, &n) < 0) michael@0: return -1; michael@0: return n; michael@0: #else michael@0: return EVBUFFER_MAX_READ; michael@0: #endif michael@0: } michael@0: michael@0: /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t michael@0: * as howmuch? */ michael@0: int michael@0: evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch) michael@0: { michael@0: struct evbuffer_chain **chainp; michael@0: int n; michael@0: int result; michael@0: michael@0: #ifdef USE_IOVEC_IMPL michael@0: int nvecs, i, remaining; michael@0: #else michael@0: struct evbuffer_chain *chain; michael@0: unsigned char *p; michael@0: #endif michael@0: michael@0: EVBUFFER_LOCK(buf); michael@0: michael@0: if (buf->freeze_end) { michael@0: result = -1; michael@0: goto done; michael@0: } michael@0: michael@0: n = get_n_bytes_readable_on_socket(fd); michael@0: if (n <= 0 || n > EVBUFFER_MAX_READ) michael@0: n = EVBUFFER_MAX_READ; michael@0: if (howmuch < 0 || howmuch > n) michael@0: howmuch = n; michael@0: michael@0: #ifdef USE_IOVEC_IMPL michael@0: /* Since we can use iovecs, we're willing to use the last michael@0: * NUM_READ_IOVEC chains. */ michael@0: if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) { michael@0: result = -1; michael@0: goto done; michael@0: } else { michael@0: IOV_TYPE vecs[NUM_READ_IOVEC]; michael@0: #ifdef _EVBUFFER_IOVEC_IS_NATIVE michael@0: nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs, michael@0: NUM_READ_IOVEC, &chainp, 1); michael@0: #else michael@0: /* We aren't using the native struct iovec. Therefore, michael@0: we are on win32. */ michael@0: struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC]; michael@0: nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2, michael@0: &chainp, 1); michael@0: michael@0: for (i=0; i < nvecs; ++i) michael@0: WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]); michael@0: #endif michael@0: michael@0: #ifdef WIN32 michael@0: { michael@0: DWORD bytesRead; michael@0: DWORD flags=0; michael@0: if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { michael@0: /* The read failed. It might be a close, michael@0: * or it might be an error. */ michael@0: if (WSAGetLastError() == WSAECONNABORTED) michael@0: n = 0; michael@0: else michael@0: n = -1; michael@0: } else michael@0: n = bytesRead; michael@0: } michael@0: #else michael@0: n = readv(fd, vecs, nvecs); michael@0: #endif michael@0: } michael@0: michael@0: #else /*!USE_IOVEC_IMPL*/ michael@0: /* If we don't have FIONREAD, we might waste some space here */ michael@0: /* XXX we _will_ waste some space here if there is any space left michael@0: * over on buf->last. */ michael@0: if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) { michael@0: result = -1; michael@0: goto done; michael@0: } michael@0: michael@0: /* We can append new data at this point */ michael@0: p = chain->buffer + chain->misalign + chain->off; michael@0: michael@0: #ifndef WIN32 michael@0: n = read(fd, p, howmuch); michael@0: #else michael@0: n = recv(fd, p, howmuch, 0); michael@0: #endif michael@0: #endif /* USE_IOVEC_IMPL */ michael@0: michael@0: if (n == -1) { michael@0: result = -1; michael@0: goto done; michael@0: } michael@0: if (n == 0) { michael@0: result = 0; michael@0: goto done; michael@0: } michael@0: michael@0: #ifdef USE_IOVEC_IMPL michael@0: remaining = n; michael@0: for (i=0; i < nvecs; ++i) { michael@0: ev_ssize_t space = (ev_ssize_t) CHAIN_SPACE_LEN(*chainp); michael@0: if (space < remaining) { michael@0: (*chainp)->off += space; michael@0: remaining -= (int)space; michael@0: } else { michael@0: (*chainp)->off += remaining; michael@0: buf->last_with_datap = chainp; michael@0: break; michael@0: } michael@0: chainp = &(*chainp)->next; michael@0: } michael@0: #else michael@0: chain->off += n; michael@0: advance_last_with_data(buf); michael@0: #endif michael@0: buf->total_len += n; michael@0: buf->n_add_for_cb += n; michael@0: michael@0: /* Tell someone about changes in this buffer */ michael@0: evbuffer_invoke_callbacks(buf); michael@0: result = n; michael@0: done: michael@0: EVBUFFER_UNLOCK(buf); michael@0: return result; michael@0: } michael@0: michael@0: #ifdef WIN32 michael@0: static int michael@0: evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd, ev_ssize_t howmuch) michael@0: { michael@0: int result; michael@0: int nchains, n; michael@0: struct evbuffer_iovec v[2]; michael@0: michael@0: EVBUFFER_LOCK(buf); michael@0: michael@0: if (buf->freeze_end) { michael@0: result = -1; michael@0: goto done; michael@0: } michael@0: michael@0: if (howmuch < 0) michael@0: howmuch = 16384; michael@0: michael@0: michael@0: /* XXX we _will_ waste some space here if there is any space left michael@0: * over on buf->last. */ michael@0: nchains = evbuffer_reserve_space(buf, howmuch, v, 2); michael@0: if (nchains < 1 || nchains > 2) { michael@0: result = -1; michael@0: goto done; michael@0: } michael@0: n = read((int)fd, v[0].iov_base, (unsigned int)v[0].iov_len); michael@0: if (n <= 0) { michael@0: result = n; michael@0: goto done; michael@0: } michael@0: v[0].iov_len = (IOV_LEN_TYPE) n; /* XXXX another problem with big n.*/ michael@0: if (nchains > 1) { michael@0: n = read((int)fd, v[1].iov_base, (unsigned int)v[1].iov_len); michael@0: if (n <= 0) { michael@0: result = (unsigned long) v[0].iov_len; michael@0: evbuffer_commit_space(buf, v, 1); michael@0: goto done; michael@0: } michael@0: v[1].iov_len = n; michael@0: } michael@0: evbuffer_commit_space(buf, v, nchains); michael@0: michael@0: result = n; michael@0: done: michael@0: EVBUFFER_UNLOCK(buf); michael@0: return result; michael@0: } michael@0: #endif michael@0: michael@0: #ifdef USE_IOVEC_IMPL michael@0: static inline int michael@0: evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd, michael@0: ev_ssize_t howmuch) michael@0: { michael@0: IOV_TYPE iov[NUM_WRITE_IOVEC]; michael@0: struct evbuffer_chain *chain = buffer->first; michael@0: int n, i = 0; michael@0: michael@0: if (howmuch < 0) michael@0: return -1; michael@0: michael@0: ASSERT_EVBUFFER_LOCKED(buffer); michael@0: /* XXX make this top out at some maximal data length? if the michael@0: * buffer has (say) 1MB in it, split over 128 chains, there's michael@0: * no way it all gets written in one go. */ michael@0: while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { michael@0: #ifdef USE_SENDFILE michael@0: /* we cannot write the file info via writev */ michael@0: if (chain->flags & EVBUFFER_SENDFILE) michael@0: break; michael@0: #endif michael@0: iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign); michael@0: if ((size_t)howmuch >= chain->off) { michael@0: /* XXXcould be problematic when windows supports mmap*/ michael@0: iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off; michael@0: howmuch -= chain->off; michael@0: } else { michael@0: /* XXXcould be problematic when windows supports mmap*/ michael@0: iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch; michael@0: break; michael@0: } michael@0: chain = chain->next; michael@0: } michael@0: if (! i) michael@0: return 0; michael@0: #ifdef WIN32 michael@0: { michael@0: DWORD bytesSent; michael@0: if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL)) michael@0: n = -1; michael@0: else michael@0: n = bytesSent; michael@0: } michael@0: #else michael@0: n = writev(fd, iov, i); michael@0: #endif michael@0: return (n); michael@0: } michael@0: #endif michael@0: michael@0: #ifdef USE_SENDFILE michael@0: static inline int michael@0: evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t fd, michael@0: ev_ssize_t howmuch) michael@0: { michael@0: struct evbuffer_chain *chain = buffer->first; michael@0: struct evbuffer_chain_fd *info = michael@0: EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain); michael@0: #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD) michael@0: int res; michael@0: off_t len = chain->off; michael@0: #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS) michael@0: ev_ssize_t res; michael@0: off_t offset = chain->misalign; michael@0: #endif michael@0: michael@0: ASSERT_EVBUFFER_LOCKED(buffer); michael@0: michael@0: #if defined(SENDFILE_IS_MACOSX) michael@0: res = sendfile(info->fd, fd, chain->misalign, &len, NULL, 0); michael@0: if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) michael@0: return (-1); michael@0: michael@0: return (len); michael@0: #elif defined(SENDFILE_IS_FREEBSD) michael@0: res = sendfile(info->fd, fd, chain->misalign, chain->off, NULL, &len, 0); michael@0: if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) michael@0: return (-1); michael@0: michael@0: return (len); michael@0: #elif defined(SENDFILE_IS_LINUX) michael@0: /* TODO(niels): implement splice */ michael@0: res = sendfile(fd, info->fd, &offset, chain->off); michael@0: if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { michael@0: /* if this is EAGAIN or EINTR return 0; otherwise, -1 */ michael@0: return (0); michael@0: } michael@0: return (res); michael@0: #elif defined(SENDFILE_IS_SOLARIS) michael@0: { michael@0: const off_t offset_orig = offset; michael@0: res = sendfile(fd, info->fd, &offset, chain->off); michael@0: if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { michael@0: if (offset - offset_orig) michael@0: return offset - offset_orig; michael@0: /* if this is EAGAIN or EINTR and no bytes were michael@0: * written, return 0 */ michael@0: return (0); michael@0: } michael@0: return (res); michael@0: } michael@0: #endif michael@0: } michael@0: #endif michael@0: michael@0: int michael@0: evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd, michael@0: ev_ssize_t howmuch) michael@0: { michael@0: int n = -1; michael@0: michael@0: EVBUFFER_LOCK(buffer); michael@0: michael@0: if (buffer->freeze_start) { michael@0: goto done; michael@0: } michael@0: michael@0: if (howmuch < 0 || (size_t)howmuch > buffer->total_len) michael@0: howmuch = buffer->total_len; michael@0: michael@0: if (howmuch > 0) { michael@0: #ifdef USE_SENDFILE michael@0: struct evbuffer_chain *chain = buffer->first; michael@0: if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE)) michael@0: n = evbuffer_write_sendfile(buffer, fd, howmuch); michael@0: else { michael@0: #endif michael@0: #ifdef USE_IOVEC_IMPL michael@0: n = evbuffer_write_iovec(buffer, fd, howmuch); michael@0: #elif defined(WIN32) michael@0: /* XXX(nickm) Don't disable this code until we know if michael@0: * the WSARecv code above works. */ michael@0: void *p = evbuffer_pullup(buffer, howmuch); michael@0: n = send(fd, p, howmuch, 0); michael@0: #else michael@0: void *p = evbuffer_pullup(buffer, howmuch); michael@0: n = write(fd, p, howmuch); michael@0: #endif michael@0: #ifdef USE_SENDFILE michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: if (n > 0) michael@0: evbuffer_drain(buffer, n); michael@0: michael@0: done: michael@0: EVBUFFER_UNLOCK(buffer); michael@0: return (n); michael@0: } michael@0: michael@0: int michael@0: evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd) michael@0: { michael@0: return evbuffer_write_atmost(buffer, fd, -1); michael@0: } michael@0: michael@0: unsigned char * michael@0: evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len) michael@0: { michael@0: unsigned char *search; michael@0: struct evbuffer_ptr ptr; michael@0: michael@0: EVBUFFER_LOCK(buffer); michael@0: michael@0: ptr = evbuffer_search(buffer, (const char *)what, len, NULL); michael@0: if (ptr.pos < 0) { michael@0: search = NULL; michael@0: } else { michael@0: search = evbuffer_pullup(buffer, ptr.pos + len); michael@0: if (search) michael@0: search += ptr.pos; michael@0: } michael@0: EVBUFFER_UNLOCK(buffer); michael@0: return search; michael@0: } michael@0: michael@0: int michael@0: evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos, michael@0: size_t position, enum evbuffer_ptr_how how) michael@0: { michael@0: size_t left = position; michael@0: struct evbuffer_chain *chain = NULL; michael@0: michael@0: EVBUFFER_LOCK(buf); michael@0: michael@0: switch (how) { michael@0: case EVBUFFER_PTR_SET: michael@0: chain = buf->first; michael@0: pos->pos = position; michael@0: position = 0; michael@0: break; michael@0: case EVBUFFER_PTR_ADD: michael@0: /* this avoids iterating over all previous chains if michael@0: we just want to advance the position */ michael@0: chain = pos->_internal.chain; michael@0: pos->pos += position; michael@0: position = pos->_internal.pos_in_chain; michael@0: break; michael@0: } michael@0: michael@0: while (chain && position + left >= chain->off) { michael@0: left -= chain->off - position; michael@0: chain = chain->next; michael@0: position = 0; michael@0: } michael@0: if (chain) { michael@0: pos->_internal.chain = chain; michael@0: pos->_internal.pos_in_chain = position + left; michael@0: } else { michael@0: pos->_internal.chain = NULL; michael@0: pos->pos = -1; michael@0: } michael@0: michael@0: EVBUFFER_UNLOCK(buf); michael@0: michael@0: return chain != NULL ? 0 : -1; michael@0: } michael@0: michael@0: /** michael@0: Compare the bytes in buf at position pos to the len bytes in mem. Return michael@0: less than 0, 0, or greater than 0 as memcmp. michael@0: */ michael@0: static int michael@0: evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos, michael@0: const char *mem, size_t len) michael@0: { michael@0: struct evbuffer_chain *chain; michael@0: size_t position; michael@0: int r; michael@0: michael@0: ASSERT_EVBUFFER_LOCKED(buf); michael@0: michael@0: if (pos->pos + len > buf->total_len) michael@0: return -1; michael@0: michael@0: chain = pos->_internal.chain; michael@0: position = pos->_internal.pos_in_chain; michael@0: while (len && chain) { michael@0: size_t n_comparable; michael@0: if (len + position > chain->off) michael@0: n_comparable = chain->off - position; michael@0: else michael@0: n_comparable = len; michael@0: r = memcmp(chain->buffer + chain->misalign + position, mem, michael@0: n_comparable); michael@0: if (r) michael@0: return r; michael@0: mem += n_comparable; michael@0: len -= n_comparable; michael@0: position = 0; michael@0: chain = chain->next; michael@0: } michael@0: michael@0: return 0; michael@0: } michael@0: michael@0: struct evbuffer_ptr michael@0: evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start) michael@0: { michael@0: return evbuffer_search_range(buffer, what, len, start, NULL); michael@0: } michael@0: michael@0: struct evbuffer_ptr michael@0: evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end) michael@0: { michael@0: struct evbuffer_ptr pos; michael@0: struct evbuffer_chain *chain, *last_chain = NULL; michael@0: const unsigned char *p; michael@0: char first; michael@0: michael@0: EVBUFFER_LOCK(buffer); michael@0: michael@0: if (start) { michael@0: memcpy(&pos, start, sizeof(pos)); michael@0: chain = pos._internal.chain; michael@0: } else { michael@0: pos.pos = 0; michael@0: chain = pos._internal.chain = buffer->first; michael@0: pos._internal.pos_in_chain = 0; michael@0: } michael@0: michael@0: if (end) michael@0: last_chain = end->_internal.chain; michael@0: michael@0: if (!len || len > EV_SSIZE_MAX) michael@0: goto done; michael@0: michael@0: first = what[0]; michael@0: michael@0: while (chain) { michael@0: const unsigned char *start_at = michael@0: chain->buffer + chain->misalign + michael@0: pos._internal.pos_in_chain; michael@0: p = memchr(start_at, first, michael@0: chain->off - pos._internal.pos_in_chain); michael@0: if (p) { michael@0: pos.pos += p - start_at; michael@0: pos._internal.pos_in_chain += p - start_at; michael@0: if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { michael@0: if (end && pos.pos + (ev_ssize_t)len > end->pos) michael@0: goto not_found; michael@0: else michael@0: goto done; michael@0: } michael@0: ++pos.pos; michael@0: ++pos._internal.pos_in_chain; michael@0: if (pos._internal.pos_in_chain == chain->off) { michael@0: chain = pos._internal.chain = chain->next; michael@0: pos._internal.pos_in_chain = 0; michael@0: } michael@0: } else { michael@0: if (chain == last_chain) michael@0: goto not_found; michael@0: pos.pos += chain->off - pos._internal.pos_in_chain; michael@0: chain = pos._internal.chain = chain->next; michael@0: pos._internal.pos_in_chain = 0; michael@0: } michael@0: } michael@0: michael@0: not_found: michael@0: pos.pos = -1; michael@0: pos._internal.chain = NULL; michael@0: done: michael@0: EVBUFFER_UNLOCK(buffer); michael@0: return pos; michael@0: } michael@0: michael@0: int michael@0: evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len, michael@0: struct evbuffer_ptr *start_at, michael@0: struct evbuffer_iovec *vec, int n_vec) michael@0: { michael@0: struct evbuffer_chain *chain; michael@0: int idx = 0; michael@0: ev_ssize_t len_so_far = 0; michael@0: michael@0: EVBUFFER_LOCK(buffer); michael@0: michael@0: if (start_at) { michael@0: chain = start_at->_internal.chain; michael@0: len_so_far = chain->off michael@0: - start_at->_internal.pos_in_chain; michael@0: idx = 1; michael@0: if (n_vec > 0) { michael@0: vec[0].iov_base = chain->buffer + chain->misalign michael@0: + start_at->_internal.pos_in_chain; michael@0: vec[0].iov_len = len_so_far; michael@0: } michael@0: chain = chain->next; michael@0: } else { michael@0: chain = buffer->first; michael@0: } michael@0: michael@0: if (n_vec == 0 && len < 0) { michael@0: /* If no vectors are provided and they asked for "everything", michael@0: * pretend they asked for the actual available amount. */ michael@0: len = buffer->total_len - len_so_far; michael@0: } michael@0: michael@0: while (chain) { michael@0: if (len >= 0 && len_so_far >= len) michael@0: break; michael@0: if (idxbuffer + chain->misalign; michael@0: vec[idx].iov_len = chain->off; michael@0: } else if (len<0) { michael@0: break; michael@0: } michael@0: ++idx; michael@0: len_so_far += chain->off; michael@0: chain = chain->next; michael@0: } michael@0: michael@0: EVBUFFER_UNLOCK(buffer); michael@0: michael@0: return idx; michael@0: } michael@0: michael@0: michael@0: int michael@0: evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap) michael@0: { michael@0: char *buffer; michael@0: size_t space; michael@0: int sz, result = -1; michael@0: va_list aq; michael@0: struct evbuffer_chain *chain; michael@0: michael@0: michael@0: EVBUFFER_LOCK(buf); michael@0: michael@0: if (buf->freeze_end) { michael@0: goto done; michael@0: } michael@0: michael@0: /* make sure that at least some space is available */ michael@0: if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL) michael@0: goto done; michael@0: michael@0: for (;;) { michael@0: #if 0 michael@0: size_t used = chain->misalign + chain->off; michael@0: buffer = (char *)chain->buffer + chain->misalign + chain->off; michael@0: EVUTIL_ASSERT(chain->buffer_len >= used); michael@0: space = chain->buffer_len - used; michael@0: #endif michael@0: buffer = (char*) CHAIN_SPACE_PTR(chain); michael@0: space = (size_t) CHAIN_SPACE_LEN(chain); michael@0: michael@0: #ifndef va_copy michael@0: #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list)) michael@0: #endif michael@0: va_copy(aq, ap); michael@0: michael@0: sz = evutil_vsnprintf(buffer, space, fmt, aq); michael@0: michael@0: va_end(aq); michael@0: michael@0: if (sz < 0) michael@0: goto done; michael@0: if ((size_t)sz < space) { michael@0: chain->off += sz; michael@0: buf->total_len += sz; michael@0: buf->n_add_for_cb += sz; michael@0: michael@0: advance_last_with_data(buf); michael@0: evbuffer_invoke_callbacks(buf); michael@0: result = sz; michael@0: goto done; michael@0: } michael@0: if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL) michael@0: goto done; michael@0: } michael@0: /* NOTREACHED */ michael@0: michael@0: done: michael@0: EVBUFFER_UNLOCK(buf); michael@0: return result; michael@0: } michael@0: michael@0: int michael@0: evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...) michael@0: { michael@0: int res = -1; michael@0: va_list ap; michael@0: michael@0: va_start(ap, fmt); michael@0: res = evbuffer_add_vprintf(buf, fmt, ap); michael@0: va_end(ap); michael@0: michael@0: return (res); michael@0: } michael@0: michael@0: int michael@0: evbuffer_add_reference(struct evbuffer *outbuf, michael@0: const void *data, size_t datlen, michael@0: evbuffer_ref_cleanup_cb cleanupfn, void *extra) michael@0: { michael@0: struct evbuffer_chain *chain; michael@0: struct evbuffer_chain_reference *info; michael@0: int result = -1; michael@0: michael@0: chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference)); michael@0: if (!chain) michael@0: return (-1); michael@0: chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE; michael@0: chain->buffer = (u_char *)data; michael@0: chain->buffer_len = datlen; michael@0: chain->off = datlen; michael@0: michael@0: info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain); michael@0: info->cleanupfn = cleanupfn; michael@0: info->extra = extra; michael@0: michael@0: EVBUFFER_LOCK(outbuf); michael@0: if (outbuf->freeze_end) { michael@0: /* don't call chain_free; we do not want to actually invoke michael@0: * the cleanup function */ michael@0: mm_free(chain); michael@0: goto done; michael@0: } michael@0: evbuffer_chain_insert(outbuf, chain); michael@0: outbuf->n_add_for_cb += datlen; michael@0: michael@0: evbuffer_invoke_callbacks(outbuf); michael@0: michael@0: result = 0; michael@0: done: michael@0: EVBUFFER_UNLOCK(outbuf); michael@0: michael@0: return result; michael@0: } michael@0: michael@0: /* TODO(niels): maybe we don't want to own the fd, however, in that michael@0: * case, we should dup it - dup is cheap. Perhaps, we should use a michael@0: * callback instead? michael@0: */ michael@0: /* TODO(niels): we may want to add to automagically convert to mmap, in michael@0: * case evbuffer_remove() or evbuffer_pullup() are being used. michael@0: */ michael@0: int michael@0: evbuffer_add_file(struct evbuffer *outbuf, int fd, michael@0: ev_off_t offset, ev_off_t length) michael@0: { michael@0: #if defined(USE_SENDFILE) || defined(_EVENT_HAVE_MMAP) michael@0: struct evbuffer_chain *chain; michael@0: struct evbuffer_chain_fd *info; michael@0: #endif michael@0: #if defined(USE_SENDFILE) michael@0: int sendfile_okay = 1; michael@0: #endif michael@0: int ok = 1; michael@0: michael@0: #if defined(USE_SENDFILE) michael@0: if (use_sendfile) { michael@0: EVBUFFER_LOCK(outbuf); michael@0: sendfile_okay = outbuf->flags & EVBUFFER_FLAG_DRAINS_TO_FD; michael@0: EVBUFFER_UNLOCK(outbuf); michael@0: } michael@0: michael@0: if (use_sendfile && sendfile_okay) { michael@0: chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd)); michael@0: if (chain == NULL) { michael@0: event_warn("%s: out of memory", __func__); michael@0: return (-1); michael@0: } michael@0: michael@0: chain->flags |= EVBUFFER_SENDFILE | EVBUFFER_IMMUTABLE; michael@0: chain->buffer = NULL; /* no reading possible */ michael@0: chain->buffer_len = length + offset; michael@0: chain->off = length; michael@0: chain->misalign = offset; michael@0: michael@0: info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain); michael@0: info->fd = fd; michael@0: michael@0: EVBUFFER_LOCK(outbuf); michael@0: if (outbuf->freeze_end) { michael@0: mm_free(chain); michael@0: ok = 0; michael@0: } else { michael@0: outbuf->n_add_for_cb += length; michael@0: evbuffer_chain_insert(outbuf, chain); michael@0: } michael@0: } else michael@0: #endif michael@0: #if defined(_EVENT_HAVE_MMAP) michael@0: if (use_mmap) { michael@0: void *mapped = mmap(NULL, length + offset, PROT_READ, michael@0: #ifdef MAP_NOCACHE michael@0: MAP_NOCACHE | michael@0: #endif michael@0: #ifdef MAP_FILE michael@0: MAP_FILE | michael@0: #endif michael@0: MAP_PRIVATE, michael@0: fd, 0); michael@0: /* some mmap implementations require offset to be a multiple of michael@0: * the page size. most users of this api, are likely to use 0 michael@0: * so mapping everything is not likely to be a problem. michael@0: * TODO(niels): determine page size and round offset to that michael@0: * page size to avoid mapping too much memory. michael@0: */ michael@0: if (mapped == MAP_FAILED) { michael@0: event_warn("%s: mmap(%d, %d, %zu) failed", michael@0: __func__, fd, 0, (size_t)(offset + length)); michael@0: return (-1); michael@0: } michael@0: chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd)); michael@0: if (chain == NULL) { michael@0: event_warn("%s: out of memory", __func__); michael@0: munmap(mapped, length); michael@0: return (-1); michael@0: } michael@0: michael@0: chain->flags |= EVBUFFER_MMAP | EVBUFFER_IMMUTABLE; michael@0: chain->buffer = mapped; michael@0: chain->buffer_len = length + offset; michael@0: chain->off = length + offset; michael@0: michael@0: info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain); michael@0: info->fd = fd; michael@0: michael@0: EVBUFFER_LOCK(outbuf); michael@0: if (outbuf->freeze_end) { michael@0: info->fd = -1; michael@0: evbuffer_chain_free(chain); michael@0: ok = 0; michael@0: } else { michael@0: outbuf->n_add_for_cb += length; michael@0: michael@0: evbuffer_chain_insert(outbuf, chain); michael@0: michael@0: /* we need to subtract whatever we don't need */ michael@0: evbuffer_drain(outbuf, offset); michael@0: } michael@0: } else michael@0: #endif michael@0: { michael@0: /* the default implementation */ michael@0: struct evbuffer *tmp = evbuffer_new(); michael@0: ev_ssize_t read; michael@0: michael@0: if (tmp == NULL) michael@0: return (-1); michael@0: michael@0: #ifdef WIN32 michael@0: #define lseek _lseeki64 michael@0: #endif michael@0: if (lseek(fd, offset, SEEK_SET) == -1) { michael@0: evbuffer_free(tmp); michael@0: return (-1); michael@0: } michael@0: michael@0: /* we add everything to a temporary buffer, so that we michael@0: * can abort without side effects if the read fails. michael@0: */ michael@0: while (length) { michael@0: read = evbuffer_readfile(tmp, fd, (ev_ssize_t)length); michael@0: if (read == -1) { michael@0: evbuffer_free(tmp); michael@0: return (-1); michael@0: } michael@0: michael@0: length -= read; michael@0: } michael@0: michael@0: EVBUFFER_LOCK(outbuf); michael@0: if (outbuf->freeze_end) { michael@0: evbuffer_free(tmp); michael@0: ok = 0; michael@0: } else { michael@0: evbuffer_add_buffer(outbuf, tmp); michael@0: evbuffer_free(tmp); michael@0: michael@0: #ifdef WIN32 michael@0: #define close _close michael@0: #endif michael@0: close(fd); michael@0: } michael@0: } michael@0: michael@0: if (ok) michael@0: evbuffer_invoke_callbacks(outbuf); michael@0: EVBUFFER_UNLOCK(outbuf); michael@0: michael@0: return ok ? 0 : -1; michael@0: } michael@0: michael@0: michael@0: void michael@0: evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg) michael@0: { michael@0: EVBUFFER_LOCK(buffer); michael@0: michael@0: if (!TAILQ_EMPTY(&buffer->callbacks)) michael@0: evbuffer_remove_all_callbacks(buffer); michael@0: michael@0: if (cb) { michael@0: struct evbuffer_cb_entry *ent = michael@0: evbuffer_add_cb(buffer, NULL, cbarg); michael@0: ent->cb.cb_obsolete = cb; michael@0: ent->flags |= EVBUFFER_CB_OBSOLETE; michael@0: } michael@0: EVBUFFER_UNLOCK(buffer); michael@0: } michael@0: michael@0: struct evbuffer_cb_entry * michael@0: evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) michael@0: { michael@0: struct evbuffer_cb_entry *e; michael@0: if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry)))) michael@0: return NULL; michael@0: EVBUFFER_LOCK(buffer); michael@0: e->cb.cb_func = cb; michael@0: e->cbarg = cbarg; michael@0: e->flags = EVBUFFER_CB_ENABLED; michael@0: TAILQ_INSERT_HEAD(&buffer->callbacks, e, next); michael@0: EVBUFFER_UNLOCK(buffer); michael@0: return e; michael@0: } michael@0: michael@0: int michael@0: evbuffer_remove_cb_entry(struct evbuffer *buffer, michael@0: struct evbuffer_cb_entry *ent) michael@0: { michael@0: EVBUFFER_LOCK(buffer); michael@0: TAILQ_REMOVE(&buffer->callbacks, ent, next); michael@0: EVBUFFER_UNLOCK(buffer); michael@0: mm_free(ent); michael@0: return 0; michael@0: } michael@0: michael@0: int michael@0: evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) michael@0: { michael@0: struct evbuffer_cb_entry *cbent; michael@0: int result = -1; michael@0: EVBUFFER_LOCK(buffer); michael@0: TAILQ_FOREACH(cbent, &buffer->callbacks, next) { michael@0: if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) { michael@0: result = evbuffer_remove_cb_entry(buffer, cbent); michael@0: goto done; michael@0: } michael@0: } michael@0: done: michael@0: EVBUFFER_UNLOCK(buffer); michael@0: return result; michael@0: } michael@0: michael@0: int michael@0: evbuffer_cb_set_flags(struct evbuffer *buffer, michael@0: struct evbuffer_cb_entry *cb, ev_uint32_t flags) michael@0: { michael@0: /* the user isn't allowed to mess with these. */ michael@0: flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; michael@0: EVBUFFER_LOCK(buffer); michael@0: cb->flags |= flags; michael@0: EVBUFFER_UNLOCK(buffer); michael@0: return 0; michael@0: } michael@0: michael@0: int michael@0: evbuffer_cb_clear_flags(struct evbuffer *buffer, michael@0: struct evbuffer_cb_entry *cb, ev_uint32_t flags) michael@0: { michael@0: /* the user isn't allowed to mess with these. */ michael@0: flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; michael@0: EVBUFFER_LOCK(buffer); michael@0: cb->flags &= ~flags; michael@0: EVBUFFER_UNLOCK(buffer); michael@0: return 0; michael@0: } michael@0: michael@0: int michael@0: evbuffer_freeze(struct evbuffer *buffer, int start) michael@0: { michael@0: EVBUFFER_LOCK(buffer); michael@0: if (start) michael@0: buffer->freeze_start = 1; michael@0: else michael@0: buffer->freeze_end = 1; michael@0: EVBUFFER_UNLOCK(buffer); michael@0: return 0; michael@0: } michael@0: michael@0: int michael@0: evbuffer_unfreeze(struct evbuffer *buffer, int start) michael@0: { michael@0: EVBUFFER_LOCK(buffer); michael@0: if (start) michael@0: buffer->freeze_start = 0; michael@0: else michael@0: buffer->freeze_end = 0; michael@0: EVBUFFER_UNLOCK(buffer); michael@0: return 0; michael@0: } michael@0: michael@0: #if 0 michael@0: void michael@0: evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) michael@0: { michael@0: if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) { michael@0: cb->size_before_suspend = evbuffer_get_length(buffer); michael@0: cb->flags |= EVBUFFER_CB_SUSPENDED; michael@0: } michael@0: } michael@0: michael@0: void michael@0: evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) michael@0: { michael@0: if ((cb->flags & EVBUFFER_CB_SUSPENDED)) { michael@0: unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND); michael@0: size_t sz = cb->size_before_suspend; michael@0: cb->flags &= ~(EVBUFFER_CB_SUSPENDED| michael@0: EVBUFFER_CB_CALL_ON_UNSUSPEND); michael@0: cb->size_before_suspend = 0; michael@0: if (call && (cb->flags & EVBUFFER_CB_ENABLED)) { michael@0: cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg); michael@0: } michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: /* These hooks are exposed so that the unit tests can temporarily disable michael@0: * sendfile support in order to test mmap, or both to test linear michael@0: * access. Don't use it; if we need to add a way to disable sendfile support michael@0: * in the future, it will probably be via an alternate version of michael@0: * evbuffer_add_file() with a 'flags' argument. michael@0: */ michael@0: int _evbuffer_testing_use_sendfile(void); michael@0: int _evbuffer_testing_use_mmap(void); michael@0: int _evbuffer_testing_use_linear_file_access(void); michael@0: michael@0: int michael@0: _evbuffer_testing_use_sendfile(void) michael@0: { michael@0: int ok = 0; michael@0: #ifdef USE_SENDFILE michael@0: use_sendfile = 1; michael@0: ok = 1; michael@0: #endif michael@0: #ifdef _EVENT_HAVE_MMAP michael@0: use_mmap = 0; michael@0: #endif michael@0: return ok; michael@0: } michael@0: int michael@0: _evbuffer_testing_use_mmap(void) michael@0: { michael@0: int ok = 0; michael@0: #ifdef USE_SENDFILE michael@0: use_sendfile = 0; michael@0: #endif michael@0: #ifdef _EVENT_HAVE_MMAP michael@0: use_mmap = 1; michael@0: ok = 1; michael@0: #endif michael@0: return ok; michael@0: } michael@0: int michael@0: _evbuffer_testing_use_linear_file_access(void) michael@0: { michael@0: #ifdef USE_SENDFILE michael@0: use_sendfile = 0; michael@0: #endif michael@0: #ifdef _EVENT_HAVE_MMAP michael@0: use_mmap = 0; michael@0: #endif michael@0: return 1; michael@0: }