1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/js/src/yarr/BumpPointerAllocator.h Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,276 @@ 1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99: 1.6 + * 1.7 + * ***** BEGIN LICENSE BLOCK ***** 1.8 + * Copyright (C) 2010 Apple Inc. All rights reserved. 1.9 + * 1.10 + * Redistribution and use in source and binary forms, with or without 1.11 + * modification, are permitted provided that the following conditions 1.12 + * are met: 1.13 + * 1. Redistributions of source code must retain the above copyright 1.14 + * notice, this list of conditions and the following disclaimer. 1.15 + * 2. Redistributions in binary form must reproduce the above copyright 1.16 + * notice, this list of conditions and the following disclaimer in the 1.17 + * documentation and/or other materials provided with the distribution. 1.18 + * 1.19 + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 1.20 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1.21 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 1.22 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 1.23 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 1.24 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 1.25 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 1.26 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 1.27 + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 1.28 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 1.29 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 1.30 + * 1.31 + * ***** END LICENSE BLOCK ***** */ 1.32 + 1.33 +#ifndef yarr_BumpPointerAllocator_h 1.34 +#define yarr_BumpPointerAllocator_h 1.35 + 1.36 +#include "yarr/PageAllocation.h" 1.37 + 1.38 +namespace WTF { 1.39 + 1.40 +#if WTF_CPU_SPARC 1.41 +#define MINIMUM_BUMP_POOL_SIZE 0x2000 1.42 +#elif WTF_CPU_IA64 1.43 +#define MINIMUM_BUMP_POOL_SIZE 0x4000 1.44 +#else 1.45 +#define MINIMUM_BUMP_POOL_SIZE 0x1000 1.46 +#endif 1.47 + 1.48 +class BumpPointerPool { 1.49 +public: 1.50 + // ensureCapacity will check whether the current pool has capacity to 1.51 + // allocate 'size' bytes of memory If it does not, it will attempt to 1.52 + // allocate a new pool (which will be added to this one in a chain). 1.53 + // 1.54 + // If allocation fails (out of memory) this method will return null. 1.55 + // If the return value is non-null, then callers should update any 1.56 + // references they have to this current (possibly full) BumpPointerPool 1.57 + // to instead point to the newly returned BumpPointerPool. 1.58 + BumpPointerPool* ensureCapacity(size_t size) 1.59 + { 1.60 + void* allocationEnd = static_cast<char*>(m_current) + size; 1.61 + ASSERT(allocationEnd > m_current); // check for overflow 1.62 + if (allocationEnd <= static_cast<void*>(this)) 1.63 + return this; 1.64 + return ensureCapacityCrossPool(this, size); 1.65 + } 1.66 + 1.67 + // alloc should only be called after calling ensureCapacity; as such 1.68 + // alloc will never fail. 1.69 + void* alloc(size_t size) 1.70 + { 1.71 + void* current = m_current; 1.72 + void* allocationEnd = static_cast<char*>(current) + size; 1.73 + ASSERT(allocationEnd > current); // check for overflow 1.74 + ASSERT(allocationEnd <= static_cast<void*>(this)); 1.75 + m_current = allocationEnd; 1.76 + return current; 1.77 + } 1.78 + 1.79 + // The dealloc method releases memory allocated using alloc. Memory 1.80 + // must be released in a LIFO fashion, e.g. if the client calls alloc 1.81 + // four times, returning pointer A, B, C, D, then the only valid order 1.82 + // in which these may be deallocaed is D, C, B, A. 1.83 + // 1.84 + // The client may optionally skip some deallocations. In the example 1.85 + // above, it would be valid to only explicitly dealloc C, A (D being 1.86 + // dealloced along with C, B along with A). 1.87 + // 1.88 + // If pointer was not allocated from this pool (or pools) then dealloc 1.89 + // will CRASH(). Callers should update any references they have to 1.90 + // this current BumpPointerPool to instead point to the returned 1.91 + // BumpPointerPool. 1.92 + BumpPointerPool* dealloc(void* position) 1.93 + { 1.94 + if ((position >= m_start) && (position <= static_cast<void*>(this))) { 1.95 + ASSERT(position <= m_current); 1.96 + m_current = position; 1.97 + return this; 1.98 + } 1.99 + return deallocCrossPool(this, position); 1.100 + } 1.101 + 1.102 + size_t sizeOfNonHeapData() const 1.103 + { 1.104 + ASSERT(!m_previous); 1.105 + size_t n = 0; 1.106 + const BumpPointerPool *curr = this; 1.107 + while (curr) { 1.108 + n += m_allocation.size(); 1.109 + curr = curr->m_next; 1.110 + } 1.111 + return n; 1.112 + } 1.113 + 1.114 +private: 1.115 + // Placement operator new, returns the last 'size' bytes of allocation for use as this. 1.116 + void* operator new(size_t size, const PageAllocation& allocation) 1.117 + { 1.118 + ASSERT(size < allocation.size()); 1.119 + return reinterpret_cast<char*>(reinterpret_cast<intptr_t>(allocation.base()) + allocation.size()) - size; 1.120 + } 1.121 + 1.122 + BumpPointerPool(const PageAllocation& allocation) 1.123 + : m_current(allocation.base()) 1.124 + , m_start(allocation.base()) 1.125 + , m_next(0) 1.126 + , m_previous(0) 1.127 + , m_allocation(allocation) 1.128 + { 1.129 + } 1.130 + 1.131 + static BumpPointerPool* create(size_t minimumCapacity = 0) 1.132 + { 1.133 + // Add size of BumpPointerPool object, check for overflow. 1.134 + minimumCapacity += sizeof(BumpPointerPool); 1.135 + if (minimumCapacity < sizeof(BumpPointerPool)) 1.136 + return 0; 1.137 + 1.138 + size_t poolSize = MINIMUM_BUMP_POOL_SIZE; 1.139 + while (poolSize < minimumCapacity) { 1.140 + poolSize <<= 1; 1.141 + // The following if check relies on MINIMUM_BUMP_POOL_SIZE being a power of 2! 1.142 + ASSERT(!(MINIMUM_BUMP_POOL_SIZE & (MINIMUM_BUMP_POOL_SIZE - 1))); 1.143 + if (!poolSize) 1.144 + return 0; 1.145 + } 1.146 + 1.147 + PageAllocation allocation = PageAllocation::allocate(poolSize); 1.148 + if (!!allocation) 1.149 + return new(allocation) BumpPointerPool(allocation); 1.150 + return 0; 1.151 + } 1.152 + 1.153 + void shrink() 1.154 + { 1.155 + ASSERT(!m_previous); 1.156 + m_current = m_start; 1.157 + while (m_next) { 1.158 + BumpPointerPool* nextNext = m_next->m_next; 1.159 + m_next->destroy(); 1.160 + m_next = nextNext; 1.161 + } 1.162 + } 1.163 + 1.164 + void destroy() 1.165 + { 1.166 + m_allocation.deallocate(); 1.167 + } 1.168 + 1.169 + static BumpPointerPool* ensureCapacityCrossPool(BumpPointerPool* previousPool, size_t size) 1.170 + { 1.171 + // The pool passed should not have capacity, so we'll start with the next one. 1.172 + ASSERT(previousPool); 1.173 + ASSERT((static_cast<char*>(previousPool->m_current) + size) > previousPool->m_current); // check for overflow 1.174 + ASSERT((static_cast<char*>(previousPool->m_current) + size) > static_cast<void*>(previousPool)); 1.175 + BumpPointerPool* pool = previousPool->m_next; 1.176 + 1.177 + while (true) { 1.178 + if (!pool) { 1.179 + // We've run to the end; allocate a new pool. 1.180 + pool = BumpPointerPool::create(size); 1.181 + previousPool->m_next = pool; 1.182 + pool->m_previous = previousPool; 1.183 + return pool; 1.184 + } 1.185 + 1.186 + void* current = pool->m_current; 1.187 + void* allocationEnd = static_cast<char*>(current) + size; 1.188 + ASSERT(allocationEnd > current); // check for overflow 1.189 + if (allocationEnd <= static_cast<void*>(pool)) 1.190 + return pool; 1.191 + } 1.192 + } 1.193 + 1.194 + static BumpPointerPool* deallocCrossPool(BumpPointerPool* pool, void* position) 1.195 + { 1.196 + // Should only be called if position is not in the current pool. 1.197 + ASSERT((position < pool->m_start) || (position > static_cast<void*>(pool))); 1.198 + 1.199 + while (true) { 1.200 + // Unwind the current pool to the start, move back in the chain to the previous pool. 1.201 + pool->m_current = pool->m_start; 1.202 + pool = pool->m_previous; 1.203 + 1.204 + // position was nowhere in the chain! 1.205 + if (!pool) 1.206 + CRASH(); 1.207 + 1.208 + if ((position >= pool->m_start) && (position <= static_cast<void*>(pool))) { 1.209 + ASSERT(position <= pool->m_current); 1.210 + pool->m_current = position; 1.211 + return pool; 1.212 + } 1.213 + } 1.214 + } 1.215 + 1.216 + void* m_current; 1.217 + void* m_start; 1.218 + BumpPointerPool* m_next; 1.219 + BumpPointerPool* m_previous; 1.220 + PageAllocation m_allocation; 1.221 + 1.222 + friend class BumpPointerAllocator; 1.223 +}; 1.224 + 1.225 +// A BumpPointerAllocator manages a set of BumpPointerPool objects, which 1.226 +// can be used for LIFO (stack like) allocation. 1.227 +// 1.228 +// To begin allocating using this class call startAllocator(). The result 1.229 +// of this method will be null if the initial pool allocation fails, or a 1.230 +// pointer to a BumpPointerPool object that can be used to perform 1.231 +// allocations. Whilst running no memory will be released until 1.232 +// stopAllocator() is called. At this point all allocations made through 1.233 +// this allocator will be reaped, and underlying memory may be freed. 1.234 +// 1.235 +// (In practice we will still hold on to the initial pool to allow allocation 1.236 +// to be quickly restared, but aditional pools will be freed). 1.237 +// 1.238 +// This allocator is non-renetrant, it is encumbant on the clients to ensure 1.239 +// startAllocator() is not called again until stopAllocator() has been called. 1.240 +class BumpPointerAllocator { 1.241 +public: 1.242 + BumpPointerAllocator() 1.243 + : m_head(0) 1.244 + { 1.245 + } 1.246 + 1.247 + ~BumpPointerAllocator() 1.248 + { 1.249 + if (m_head) 1.250 + m_head->destroy(); 1.251 + } 1.252 + 1.253 + BumpPointerPool* startAllocator() 1.254 + { 1.255 + if (!m_head) 1.256 + m_head = BumpPointerPool::create(); 1.257 + return m_head; 1.258 + } 1.259 + 1.260 + void stopAllocator() 1.261 + { 1.262 + if (m_head) 1.263 + m_head->shrink(); 1.264 + } 1.265 + 1.266 + size_t sizeOfNonHeapData() const 1.267 + { 1.268 + return m_head ? m_head->sizeOfNonHeapData() : 0; 1.269 + } 1.270 + 1.271 +private: 1.272 + BumpPointerPool* m_head; 1.273 +}; 1.274 + 1.275 +} 1.276 + 1.277 +using WTF::BumpPointerAllocator; 1.278 + 1.279 +#endif /* yarr_BumpPointerAllocator_h */