michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * michael@0: * ***** BEGIN LICENSE BLOCK ***** michael@0: * Copyright (C) 2010 Apple Inc. All rights reserved. michael@0: * michael@0: * Redistribution and use in source and binary forms, with or without michael@0: * modification, are permitted provided that the following conditions michael@0: * are met: michael@0: * 1. Redistributions of source code must retain the above copyright michael@0: * notice, this list of conditions and the following disclaimer. michael@0: * 2. Redistributions in binary form must reproduce the above copyright michael@0: * notice, this list of conditions and the following disclaimer in the michael@0: * documentation and/or other materials provided with the distribution. michael@0: * michael@0: * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY michael@0: * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE michael@0: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR michael@0: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR michael@0: * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, michael@0: * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, michael@0: * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR michael@0: * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY michael@0: * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT michael@0: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE michael@0: * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. michael@0: * michael@0: * ***** END LICENSE BLOCK ***** */ michael@0: michael@0: #ifndef yarr_BumpPointerAllocator_h michael@0: #define yarr_BumpPointerAllocator_h michael@0: michael@0: #include "yarr/PageAllocation.h" michael@0: michael@0: namespace WTF { michael@0: michael@0: #if WTF_CPU_SPARC michael@0: #define MINIMUM_BUMP_POOL_SIZE 0x2000 michael@0: #elif WTF_CPU_IA64 michael@0: #define MINIMUM_BUMP_POOL_SIZE 0x4000 michael@0: #else michael@0: #define MINIMUM_BUMP_POOL_SIZE 0x1000 michael@0: #endif michael@0: michael@0: class BumpPointerPool { michael@0: public: michael@0: // ensureCapacity will check whether the current pool has capacity to michael@0: // allocate 'size' bytes of memory If it does not, it will attempt to michael@0: // allocate a new pool (which will be added to this one in a chain). michael@0: // michael@0: // If allocation fails (out of memory) this method will return null. michael@0: // If the return value is non-null, then callers should update any michael@0: // references they have to this current (possibly full) BumpPointerPool michael@0: // to instead point to the newly returned BumpPointerPool. michael@0: BumpPointerPool* ensureCapacity(size_t size) michael@0: { michael@0: void* allocationEnd = static_cast(m_current) + size; michael@0: ASSERT(allocationEnd > m_current); // check for overflow michael@0: if (allocationEnd <= static_cast(this)) michael@0: return this; michael@0: return ensureCapacityCrossPool(this, size); michael@0: } michael@0: michael@0: // alloc should only be called after calling ensureCapacity; as such michael@0: // alloc will never fail. michael@0: void* alloc(size_t size) michael@0: { michael@0: void* current = m_current; michael@0: void* allocationEnd = static_cast(current) + size; michael@0: ASSERT(allocationEnd > current); // check for overflow michael@0: ASSERT(allocationEnd <= static_cast(this)); michael@0: m_current = allocationEnd; michael@0: return current; michael@0: } michael@0: michael@0: // The dealloc method releases memory allocated using alloc. Memory michael@0: // must be released in a LIFO fashion, e.g. if the client calls alloc michael@0: // four times, returning pointer A, B, C, D, then the only valid order michael@0: // in which these may be deallocaed is D, C, B, A. michael@0: // michael@0: // The client may optionally skip some deallocations. In the example michael@0: // above, it would be valid to only explicitly dealloc C, A (D being michael@0: // dealloced along with C, B along with A). michael@0: // michael@0: // If pointer was not allocated from this pool (or pools) then dealloc michael@0: // will CRASH(). Callers should update any references they have to michael@0: // this current BumpPointerPool to instead point to the returned michael@0: // BumpPointerPool. michael@0: BumpPointerPool* dealloc(void* position) michael@0: { michael@0: if ((position >= m_start) && (position <= static_cast(this))) { michael@0: ASSERT(position <= m_current); michael@0: m_current = position; michael@0: return this; michael@0: } michael@0: return deallocCrossPool(this, position); michael@0: } michael@0: michael@0: size_t sizeOfNonHeapData() const michael@0: { michael@0: ASSERT(!m_previous); michael@0: size_t n = 0; michael@0: const BumpPointerPool *curr = this; michael@0: while (curr) { michael@0: n += m_allocation.size(); michael@0: curr = curr->m_next; michael@0: } michael@0: return n; michael@0: } michael@0: michael@0: private: michael@0: // Placement operator new, returns the last 'size' bytes of allocation for use as this. michael@0: void* operator new(size_t size, const PageAllocation& allocation) michael@0: { michael@0: ASSERT(size < allocation.size()); michael@0: return reinterpret_cast(reinterpret_cast(allocation.base()) + allocation.size()) - size; michael@0: } michael@0: michael@0: BumpPointerPool(const PageAllocation& allocation) michael@0: : m_current(allocation.base()) michael@0: , m_start(allocation.base()) michael@0: , m_next(0) michael@0: , m_previous(0) michael@0: , m_allocation(allocation) michael@0: { michael@0: } michael@0: michael@0: static BumpPointerPool* create(size_t minimumCapacity = 0) michael@0: { michael@0: // Add size of BumpPointerPool object, check for overflow. michael@0: minimumCapacity += sizeof(BumpPointerPool); michael@0: if (minimumCapacity < sizeof(BumpPointerPool)) michael@0: return 0; michael@0: michael@0: size_t poolSize = MINIMUM_BUMP_POOL_SIZE; michael@0: while (poolSize < minimumCapacity) { michael@0: poolSize <<= 1; michael@0: // The following if check relies on MINIMUM_BUMP_POOL_SIZE being a power of 2! michael@0: ASSERT(!(MINIMUM_BUMP_POOL_SIZE & (MINIMUM_BUMP_POOL_SIZE - 1))); michael@0: if (!poolSize) michael@0: return 0; michael@0: } michael@0: michael@0: PageAllocation allocation = PageAllocation::allocate(poolSize); michael@0: if (!!allocation) michael@0: return new(allocation) BumpPointerPool(allocation); michael@0: return 0; michael@0: } michael@0: michael@0: void shrink() michael@0: { michael@0: ASSERT(!m_previous); michael@0: m_current = m_start; michael@0: while (m_next) { michael@0: BumpPointerPool* nextNext = m_next->m_next; michael@0: m_next->destroy(); michael@0: m_next = nextNext; michael@0: } michael@0: } michael@0: michael@0: void destroy() michael@0: { michael@0: m_allocation.deallocate(); michael@0: } michael@0: michael@0: static BumpPointerPool* ensureCapacityCrossPool(BumpPointerPool* previousPool, size_t size) michael@0: { michael@0: // The pool passed should not have capacity, so we'll start with the next one. michael@0: ASSERT(previousPool); michael@0: ASSERT((static_cast(previousPool->m_current) + size) > previousPool->m_current); // check for overflow michael@0: ASSERT((static_cast(previousPool->m_current) + size) > static_cast(previousPool)); michael@0: BumpPointerPool* pool = previousPool->m_next; michael@0: michael@0: while (true) { michael@0: if (!pool) { michael@0: // We've run to the end; allocate a new pool. michael@0: pool = BumpPointerPool::create(size); michael@0: previousPool->m_next = pool; michael@0: pool->m_previous = previousPool; michael@0: return pool; michael@0: } michael@0: michael@0: void* current = pool->m_current; michael@0: void* allocationEnd = static_cast(current) + size; michael@0: ASSERT(allocationEnd > current); // check for overflow michael@0: if (allocationEnd <= static_cast(pool)) michael@0: return pool; michael@0: } michael@0: } michael@0: michael@0: static BumpPointerPool* deallocCrossPool(BumpPointerPool* pool, void* position) michael@0: { michael@0: // Should only be called if position is not in the current pool. michael@0: ASSERT((position < pool->m_start) || (position > static_cast(pool))); michael@0: michael@0: while (true) { michael@0: // Unwind the current pool to the start, move back in the chain to the previous pool. michael@0: pool->m_current = pool->m_start; michael@0: pool = pool->m_previous; michael@0: michael@0: // position was nowhere in the chain! michael@0: if (!pool) michael@0: CRASH(); michael@0: michael@0: if ((position >= pool->m_start) && (position <= static_cast(pool))) { michael@0: ASSERT(position <= pool->m_current); michael@0: pool->m_current = position; michael@0: return pool; michael@0: } michael@0: } michael@0: } michael@0: michael@0: void* m_current; michael@0: void* m_start; michael@0: BumpPointerPool* m_next; michael@0: BumpPointerPool* m_previous; michael@0: PageAllocation m_allocation; michael@0: michael@0: friend class BumpPointerAllocator; michael@0: }; michael@0: michael@0: // A BumpPointerAllocator manages a set of BumpPointerPool objects, which michael@0: // can be used for LIFO (stack like) allocation. michael@0: // michael@0: // To begin allocating using this class call startAllocator(). The result michael@0: // of this method will be null if the initial pool allocation fails, or a michael@0: // pointer to a BumpPointerPool object that can be used to perform michael@0: // allocations. Whilst running no memory will be released until michael@0: // stopAllocator() is called. At this point all allocations made through michael@0: // this allocator will be reaped, and underlying memory may be freed. michael@0: // michael@0: // (In practice we will still hold on to the initial pool to allow allocation michael@0: // to be quickly restared, but aditional pools will be freed). michael@0: // michael@0: // This allocator is non-renetrant, it is encumbant on the clients to ensure michael@0: // startAllocator() is not called again until stopAllocator() has been called. michael@0: class BumpPointerAllocator { michael@0: public: michael@0: BumpPointerAllocator() michael@0: : m_head(0) michael@0: { michael@0: } michael@0: michael@0: ~BumpPointerAllocator() michael@0: { michael@0: if (m_head) michael@0: m_head->destroy(); michael@0: } michael@0: michael@0: BumpPointerPool* startAllocator() michael@0: { michael@0: if (!m_head) michael@0: m_head = BumpPointerPool::create(); michael@0: return m_head; michael@0: } michael@0: michael@0: void stopAllocator() michael@0: { michael@0: if (m_head) michael@0: m_head->shrink(); michael@0: } michael@0: michael@0: size_t sizeOfNonHeapData() const michael@0: { michael@0: return m_head ? m_head->sizeOfNonHeapData() : 0; michael@0: } michael@0: michael@0: private: michael@0: BumpPointerPool* m_head; michael@0: }; michael@0: michael@0: } michael@0: michael@0: using WTF::BumpPointerAllocator; michael@0: michael@0: #endif /* yarr_BumpPointerAllocator_h */