Wed, 31 Dec 2014 07:53:36 +0100
Correct small whitespace inconsistency, lost while renaming variables.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * |
michael@0 | 4 | * ***** BEGIN LICENSE BLOCK ***** |
michael@0 | 5 | * Copyright (C) 2010 Apple Inc. All rights reserved. |
michael@0 | 6 | * |
michael@0 | 7 | * Redistribution and use in source and binary forms, with or without |
michael@0 | 8 | * modification, are permitted provided that the following conditions |
michael@0 | 9 | * are met: |
michael@0 | 10 | * 1. Redistributions of source code must retain the above copyright |
michael@0 | 11 | * notice, this list of conditions and the following disclaimer. |
michael@0 | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
michael@0 | 13 | * notice, this list of conditions and the following disclaimer in the |
michael@0 | 14 | * documentation and/or other materials provided with the distribution. |
michael@0 | 15 | * |
michael@0 | 16 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
michael@0 | 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
michael@0 | 18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
michael@0 | 19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
michael@0 | 20 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
michael@0 | 21 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
michael@0 | 22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
michael@0 | 23 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
michael@0 | 24 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
michael@0 | 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
michael@0 | 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
michael@0 | 27 | * |
michael@0 | 28 | * ***** END LICENSE BLOCK ***** */ |
michael@0 | 29 | |
michael@0 | 30 | #ifndef yarr_BumpPointerAllocator_h |
michael@0 | 31 | #define yarr_BumpPointerAllocator_h |
michael@0 | 32 | |
michael@0 | 33 | #include "yarr/PageAllocation.h" |
michael@0 | 34 | |
michael@0 | 35 | namespace WTF { |
michael@0 | 36 | |
michael@0 | 37 | #if WTF_CPU_SPARC |
michael@0 | 38 | #define MINIMUM_BUMP_POOL_SIZE 0x2000 |
michael@0 | 39 | #elif WTF_CPU_IA64 |
michael@0 | 40 | #define MINIMUM_BUMP_POOL_SIZE 0x4000 |
michael@0 | 41 | #else |
michael@0 | 42 | #define MINIMUM_BUMP_POOL_SIZE 0x1000 |
michael@0 | 43 | #endif |
michael@0 | 44 | |
michael@0 | 45 | class BumpPointerPool { |
michael@0 | 46 | public: |
michael@0 | 47 | // ensureCapacity will check whether the current pool has capacity to |
michael@0 | 48 | // allocate 'size' bytes of memory If it does not, it will attempt to |
michael@0 | 49 | // allocate a new pool (which will be added to this one in a chain). |
michael@0 | 50 | // |
michael@0 | 51 | // If allocation fails (out of memory) this method will return null. |
michael@0 | 52 | // If the return value is non-null, then callers should update any |
michael@0 | 53 | // references they have to this current (possibly full) BumpPointerPool |
michael@0 | 54 | // to instead point to the newly returned BumpPointerPool. |
michael@0 | 55 | BumpPointerPool* ensureCapacity(size_t size) |
michael@0 | 56 | { |
michael@0 | 57 | void* allocationEnd = static_cast<char*>(m_current) + size; |
michael@0 | 58 | ASSERT(allocationEnd > m_current); // check for overflow |
michael@0 | 59 | if (allocationEnd <= static_cast<void*>(this)) |
michael@0 | 60 | return this; |
michael@0 | 61 | return ensureCapacityCrossPool(this, size); |
michael@0 | 62 | } |
michael@0 | 63 | |
michael@0 | 64 | // alloc should only be called after calling ensureCapacity; as such |
michael@0 | 65 | // alloc will never fail. |
michael@0 | 66 | void* alloc(size_t size) |
michael@0 | 67 | { |
michael@0 | 68 | void* current = m_current; |
michael@0 | 69 | void* allocationEnd = static_cast<char*>(current) + size; |
michael@0 | 70 | ASSERT(allocationEnd > current); // check for overflow |
michael@0 | 71 | ASSERT(allocationEnd <= static_cast<void*>(this)); |
michael@0 | 72 | m_current = allocationEnd; |
michael@0 | 73 | return current; |
michael@0 | 74 | } |
michael@0 | 75 | |
michael@0 | 76 | // The dealloc method releases memory allocated using alloc. Memory |
michael@0 | 77 | // must be released in a LIFO fashion, e.g. if the client calls alloc |
michael@0 | 78 | // four times, returning pointer A, B, C, D, then the only valid order |
michael@0 | 79 | // in which these may be deallocaed is D, C, B, A. |
michael@0 | 80 | // |
michael@0 | 81 | // The client may optionally skip some deallocations. In the example |
michael@0 | 82 | // above, it would be valid to only explicitly dealloc C, A (D being |
michael@0 | 83 | // dealloced along with C, B along with A). |
michael@0 | 84 | // |
michael@0 | 85 | // If pointer was not allocated from this pool (or pools) then dealloc |
michael@0 | 86 | // will CRASH(). Callers should update any references they have to |
michael@0 | 87 | // this current BumpPointerPool to instead point to the returned |
michael@0 | 88 | // BumpPointerPool. |
michael@0 | 89 | BumpPointerPool* dealloc(void* position) |
michael@0 | 90 | { |
michael@0 | 91 | if ((position >= m_start) && (position <= static_cast<void*>(this))) { |
michael@0 | 92 | ASSERT(position <= m_current); |
michael@0 | 93 | m_current = position; |
michael@0 | 94 | return this; |
michael@0 | 95 | } |
michael@0 | 96 | return deallocCrossPool(this, position); |
michael@0 | 97 | } |
michael@0 | 98 | |
michael@0 | 99 | size_t sizeOfNonHeapData() const |
michael@0 | 100 | { |
michael@0 | 101 | ASSERT(!m_previous); |
michael@0 | 102 | size_t n = 0; |
michael@0 | 103 | const BumpPointerPool *curr = this; |
michael@0 | 104 | while (curr) { |
michael@0 | 105 | n += m_allocation.size(); |
michael@0 | 106 | curr = curr->m_next; |
michael@0 | 107 | } |
michael@0 | 108 | return n; |
michael@0 | 109 | } |
michael@0 | 110 | |
michael@0 | 111 | private: |
michael@0 | 112 | // Placement operator new, returns the last 'size' bytes of allocation for use as this. |
michael@0 | 113 | void* operator new(size_t size, const PageAllocation& allocation) |
michael@0 | 114 | { |
michael@0 | 115 | ASSERT(size < allocation.size()); |
michael@0 | 116 | return reinterpret_cast<char*>(reinterpret_cast<intptr_t>(allocation.base()) + allocation.size()) - size; |
michael@0 | 117 | } |
michael@0 | 118 | |
michael@0 | 119 | BumpPointerPool(const PageAllocation& allocation) |
michael@0 | 120 | : m_current(allocation.base()) |
michael@0 | 121 | , m_start(allocation.base()) |
michael@0 | 122 | , m_next(0) |
michael@0 | 123 | , m_previous(0) |
michael@0 | 124 | , m_allocation(allocation) |
michael@0 | 125 | { |
michael@0 | 126 | } |
michael@0 | 127 | |
michael@0 | 128 | static BumpPointerPool* create(size_t minimumCapacity = 0) |
michael@0 | 129 | { |
michael@0 | 130 | // Add size of BumpPointerPool object, check for overflow. |
michael@0 | 131 | minimumCapacity += sizeof(BumpPointerPool); |
michael@0 | 132 | if (minimumCapacity < sizeof(BumpPointerPool)) |
michael@0 | 133 | return 0; |
michael@0 | 134 | |
michael@0 | 135 | size_t poolSize = MINIMUM_BUMP_POOL_SIZE; |
michael@0 | 136 | while (poolSize < minimumCapacity) { |
michael@0 | 137 | poolSize <<= 1; |
michael@0 | 138 | // The following if check relies on MINIMUM_BUMP_POOL_SIZE being a power of 2! |
michael@0 | 139 | ASSERT(!(MINIMUM_BUMP_POOL_SIZE & (MINIMUM_BUMP_POOL_SIZE - 1))); |
michael@0 | 140 | if (!poolSize) |
michael@0 | 141 | return 0; |
michael@0 | 142 | } |
michael@0 | 143 | |
michael@0 | 144 | PageAllocation allocation = PageAllocation::allocate(poolSize); |
michael@0 | 145 | if (!!allocation) |
michael@0 | 146 | return new(allocation) BumpPointerPool(allocation); |
michael@0 | 147 | return 0; |
michael@0 | 148 | } |
michael@0 | 149 | |
michael@0 | 150 | void shrink() |
michael@0 | 151 | { |
michael@0 | 152 | ASSERT(!m_previous); |
michael@0 | 153 | m_current = m_start; |
michael@0 | 154 | while (m_next) { |
michael@0 | 155 | BumpPointerPool* nextNext = m_next->m_next; |
michael@0 | 156 | m_next->destroy(); |
michael@0 | 157 | m_next = nextNext; |
michael@0 | 158 | } |
michael@0 | 159 | } |
michael@0 | 160 | |
michael@0 | 161 | void destroy() |
michael@0 | 162 | { |
michael@0 | 163 | m_allocation.deallocate(); |
michael@0 | 164 | } |
michael@0 | 165 | |
michael@0 | 166 | static BumpPointerPool* ensureCapacityCrossPool(BumpPointerPool* previousPool, size_t size) |
michael@0 | 167 | { |
michael@0 | 168 | // The pool passed should not have capacity, so we'll start with the next one. |
michael@0 | 169 | ASSERT(previousPool); |
michael@0 | 170 | ASSERT((static_cast<char*>(previousPool->m_current) + size) > previousPool->m_current); // check for overflow |
michael@0 | 171 | ASSERT((static_cast<char*>(previousPool->m_current) + size) > static_cast<void*>(previousPool)); |
michael@0 | 172 | BumpPointerPool* pool = previousPool->m_next; |
michael@0 | 173 | |
michael@0 | 174 | while (true) { |
michael@0 | 175 | if (!pool) { |
michael@0 | 176 | // We've run to the end; allocate a new pool. |
michael@0 | 177 | pool = BumpPointerPool::create(size); |
michael@0 | 178 | previousPool->m_next = pool; |
michael@0 | 179 | pool->m_previous = previousPool; |
michael@0 | 180 | return pool; |
michael@0 | 181 | } |
michael@0 | 182 | |
michael@0 | 183 | void* current = pool->m_current; |
michael@0 | 184 | void* allocationEnd = static_cast<char*>(current) + size; |
michael@0 | 185 | ASSERT(allocationEnd > current); // check for overflow |
michael@0 | 186 | if (allocationEnd <= static_cast<void*>(pool)) |
michael@0 | 187 | return pool; |
michael@0 | 188 | } |
michael@0 | 189 | } |
michael@0 | 190 | |
michael@0 | 191 | static BumpPointerPool* deallocCrossPool(BumpPointerPool* pool, void* position) |
michael@0 | 192 | { |
michael@0 | 193 | // Should only be called if position is not in the current pool. |
michael@0 | 194 | ASSERT((position < pool->m_start) || (position > static_cast<void*>(pool))); |
michael@0 | 195 | |
michael@0 | 196 | while (true) { |
michael@0 | 197 | // Unwind the current pool to the start, move back in the chain to the previous pool. |
michael@0 | 198 | pool->m_current = pool->m_start; |
michael@0 | 199 | pool = pool->m_previous; |
michael@0 | 200 | |
michael@0 | 201 | // position was nowhere in the chain! |
michael@0 | 202 | if (!pool) |
michael@0 | 203 | CRASH(); |
michael@0 | 204 | |
michael@0 | 205 | if ((position >= pool->m_start) && (position <= static_cast<void*>(pool))) { |
michael@0 | 206 | ASSERT(position <= pool->m_current); |
michael@0 | 207 | pool->m_current = position; |
michael@0 | 208 | return pool; |
michael@0 | 209 | } |
michael@0 | 210 | } |
michael@0 | 211 | } |
michael@0 | 212 | |
michael@0 | 213 | void* m_current; |
michael@0 | 214 | void* m_start; |
michael@0 | 215 | BumpPointerPool* m_next; |
michael@0 | 216 | BumpPointerPool* m_previous; |
michael@0 | 217 | PageAllocation m_allocation; |
michael@0 | 218 | |
michael@0 | 219 | friend class BumpPointerAllocator; |
michael@0 | 220 | }; |
michael@0 | 221 | |
michael@0 | 222 | // A BumpPointerAllocator manages a set of BumpPointerPool objects, which |
michael@0 | 223 | // can be used for LIFO (stack like) allocation. |
michael@0 | 224 | // |
michael@0 | 225 | // To begin allocating using this class call startAllocator(). The result |
michael@0 | 226 | // of this method will be null if the initial pool allocation fails, or a |
michael@0 | 227 | // pointer to a BumpPointerPool object that can be used to perform |
michael@0 | 228 | // allocations. Whilst running no memory will be released until |
michael@0 | 229 | // stopAllocator() is called. At this point all allocations made through |
michael@0 | 230 | // this allocator will be reaped, and underlying memory may be freed. |
michael@0 | 231 | // |
michael@0 | 232 | // (In practice we will still hold on to the initial pool to allow allocation |
michael@0 | 233 | // to be quickly restared, but aditional pools will be freed). |
michael@0 | 234 | // |
michael@0 | 235 | // This allocator is non-renetrant, it is encumbant on the clients to ensure |
michael@0 | 236 | // startAllocator() is not called again until stopAllocator() has been called. |
michael@0 | 237 | class BumpPointerAllocator { |
michael@0 | 238 | public: |
michael@0 | 239 | BumpPointerAllocator() |
michael@0 | 240 | : m_head(0) |
michael@0 | 241 | { |
michael@0 | 242 | } |
michael@0 | 243 | |
michael@0 | 244 | ~BumpPointerAllocator() |
michael@0 | 245 | { |
michael@0 | 246 | if (m_head) |
michael@0 | 247 | m_head->destroy(); |
michael@0 | 248 | } |
michael@0 | 249 | |
michael@0 | 250 | BumpPointerPool* startAllocator() |
michael@0 | 251 | { |
michael@0 | 252 | if (!m_head) |
michael@0 | 253 | m_head = BumpPointerPool::create(); |
michael@0 | 254 | return m_head; |
michael@0 | 255 | } |
michael@0 | 256 | |
michael@0 | 257 | void stopAllocator() |
michael@0 | 258 | { |
michael@0 | 259 | if (m_head) |
michael@0 | 260 | m_head->shrink(); |
michael@0 | 261 | } |
michael@0 | 262 | |
michael@0 | 263 | size_t sizeOfNonHeapData() const |
michael@0 | 264 | { |
michael@0 | 265 | return m_head ? m_head->sizeOfNonHeapData() : 0; |
michael@0 | 266 | } |
michael@0 | 267 | |
michael@0 | 268 | private: |
michael@0 | 269 | BumpPointerPool* m_head; |
michael@0 | 270 | }; |
michael@0 | 271 | |
michael@0 | 272 | } |
michael@0 | 273 | |
michael@0 | 274 | using WTF::BumpPointerAllocator; |
michael@0 | 275 | |
michael@0 | 276 | #endif /* yarr_BumpPointerAllocator_h */ |