Wed, 31 Dec 2014 07:16:47 +0100
Revert simplistic fix pending revisit of Mozilla integration attempt.
michael@0 | 1 | // |
michael@0 | 2 | // Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved. |
michael@0 | 3 | // Use of this source code is governed by a BSD-style license that can be |
michael@0 | 4 | // found in the LICENSE file. |
michael@0 | 5 | // |
michael@0 | 6 | |
michael@0 | 7 | #include "compiler/PoolAlloc.h" |
michael@0 | 8 | |
michael@0 | 9 | #ifndef _MSC_VER |
michael@0 | 10 | #include <stdint.h> |
michael@0 | 11 | #endif |
michael@0 | 12 | #include <stdio.h> |
michael@0 | 13 | |
michael@0 | 14 | #include "common/angleutils.h" |
michael@0 | 15 | #include "compiler/InitializeGlobals.h" |
michael@0 | 16 | #include "compiler/osinclude.h" |
michael@0 | 17 | |
michael@0 | 18 | OS_TLSIndex PoolIndex = OS_INVALID_TLS_INDEX; |
michael@0 | 19 | |
michael@0 | 20 | bool InitializePoolIndex() |
michael@0 | 21 | { |
michael@0 | 22 | assert(PoolIndex == OS_INVALID_TLS_INDEX); |
michael@0 | 23 | |
michael@0 | 24 | PoolIndex = OS_AllocTLSIndex(); |
michael@0 | 25 | return PoolIndex != OS_INVALID_TLS_INDEX; |
michael@0 | 26 | } |
michael@0 | 27 | |
michael@0 | 28 | void FreePoolIndex() |
michael@0 | 29 | { |
michael@0 | 30 | assert(PoolIndex != OS_INVALID_TLS_INDEX); |
michael@0 | 31 | |
michael@0 | 32 | OS_FreeTLSIndex(PoolIndex); |
michael@0 | 33 | PoolIndex = OS_INVALID_TLS_INDEX; |
michael@0 | 34 | } |
michael@0 | 35 | |
michael@0 | 36 | TPoolAllocator* GetGlobalPoolAllocator() |
michael@0 | 37 | { |
michael@0 | 38 | assert(PoolIndex != OS_INVALID_TLS_INDEX); |
michael@0 | 39 | return static_cast<TPoolAllocator*>(OS_GetTLSValue(PoolIndex)); |
michael@0 | 40 | } |
michael@0 | 41 | |
michael@0 | 42 | void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator) |
michael@0 | 43 | { |
michael@0 | 44 | assert(PoolIndex != OS_INVALID_TLS_INDEX); |
michael@0 | 45 | OS_SetTLSValue(PoolIndex, poolAllocator); |
michael@0 | 46 | } |
michael@0 | 47 | |
michael@0 | 48 | // |
michael@0 | 49 | // Implement the functionality of the TPoolAllocator class, which |
michael@0 | 50 | // is documented in PoolAlloc.h. |
michael@0 | 51 | // |
michael@0 | 52 | TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) : |
michael@0 | 53 | pageSize(growthIncrement), |
michael@0 | 54 | alignment(allocationAlignment), |
michael@0 | 55 | freeList(0), |
michael@0 | 56 | inUseList(0), |
michael@0 | 57 | numCalls(0), |
michael@0 | 58 | totalBytes(0) |
michael@0 | 59 | { |
michael@0 | 60 | // |
michael@0 | 61 | // Don't allow page sizes we know are smaller than all common |
michael@0 | 62 | // OS page sizes. |
michael@0 | 63 | // |
michael@0 | 64 | if (pageSize < 4*1024) |
michael@0 | 65 | pageSize = 4*1024; |
michael@0 | 66 | |
michael@0 | 67 | // |
michael@0 | 68 | // A large currentPageOffset indicates a new page needs to |
michael@0 | 69 | // be obtained to allocate memory. |
michael@0 | 70 | // |
michael@0 | 71 | currentPageOffset = pageSize; |
michael@0 | 72 | |
michael@0 | 73 | // |
michael@0 | 74 | // Adjust alignment to be at least pointer aligned and |
michael@0 | 75 | // power of 2. |
michael@0 | 76 | // |
michael@0 | 77 | size_t minAlign = sizeof(void*); |
michael@0 | 78 | alignment &= ~(minAlign - 1); |
michael@0 | 79 | if (alignment < minAlign) |
michael@0 | 80 | alignment = minAlign; |
michael@0 | 81 | size_t a = 1; |
michael@0 | 82 | while (a < alignment) |
michael@0 | 83 | a <<= 1; |
michael@0 | 84 | alignment = a; |
michael@0 | 85 | alignmentMask = a - 1; |
michael@0 | 86 | |
michael@0 | 87 | // |
michael@0 | 88 | // Align header skip |
michael@0 | 89 | // |
michael@0 | 90 | headerSkip = minAlign; |
michael@0 | 91 | if (headerSkip < sizeof(tHeader)) { |
michael@0 | 92 | headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask; |
michael@0 | 93 | } |
michael@0 | 94 | } |
michael@0 | 95 | |
michael@0 | 96 | TPoolAllocator::~TPoolAllocator() |
michael@0 | 97 | { |
michael@0 | 98 | while (inUseList) { |
michael@0 | 99 | tHeader* next = inUseList->nextPage; |
michael@0 | 100 | inUseList->~tHeader(); |
michael@0 | 101 | delete [] reinterpret_cast<char*>(inUseList); |
michael@0 | 102 | inUseList = next; |
michael@0 | 103 | } |
michael@0 | 104 | |
michael@0 | 105 | // We should not check the guard blocks |
michael@0 | 106 | // here, because we did it already when the block was |
michael@0 | 107 | // placed into the free list. |
michael@0 | 108 | // |
michael@0 | 109 | while (freeList) { |
michael@0 | 110 | tHeader* next = freeList->nextPage; |
michael@0 | 111 | delete [] reinterpret_cast<char*>(freeList); |
michael@0 | 112 | freeList = next; |
michael@0 | 113 | } |
michael@0 | 114 | } |
michael@0 | 115 | |
michael@0 | 116 | // Support MSVC++ 6.0 |
michael@0 | 117 | const unsigned char TAllocation::guardBlockBeginVal = 0xfb; |
michael@0 | 118 | const unsigned char TAllocation::guardBlockEndVal = 0xfe; |
michael@0 | 119 | const unsigned char TAllocation::userDataFill = 0xcd; |
michael@0 | 120 | |
michael@0 | 121 | #ifdef GUARD_BLOCKS |
michael@0 | 122 | const size_t TAllocation::guardBlockSize = 16; |
michael@0 | 123 | #else |
michael@0 | 124 | const size_t TAllocation::guardBlockSize = 0; |
michael@0 | 125 | #endif |
michael@0 | 126 | |
michael@0 | 127 | // |
michael@0 | 128 | // Check a single guard block for damage |
michael@0 | 129 | // |
michael@0 | 130 | void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const |
michael@0 | 131 | { |
michael@0 | 132 | #ifdef GUARD_BLOCKS |
michael@0 | 133 | for (size_t x = 0; x < guardBlockSize; x++) { |
michael@0 | 134 | if (blockMem[x] != val) { |
michael@0 | 135 | char assertMsg[80]; |
michael@0 | 136 | |
michael@0 | 137 | // We don't print the assert message. It's here just to be helpful. |
michael@0 | 138 | #if defined(_MSC_VER) |
michael@0 | 139 | snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n", |
michael@0 | 140 | locText, size, data()); |
michael@0 | 141 | #else |
michael@0 | 142 | snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", |
michael@0 | 143 | locText, size, data()); |
michael@0 | 144 | #endif |
michael@0 | 145 | assert(0 && "PoolAlloc: Damage in guard block"); |
michael@0 | 146 | } |
michael@0 | 147 | } |
michael@0 | 148 | #endif |
michael@0 | 149 | } |
michael@0 | 150 | |
michael@0 | 151 | |
michael@0 | 152 | void TPoolAllocator::push() |
michael@0 | 153 | { |
michael@0 | 154 | tAllocState state = { currentPageOffset, inUseList }; |
michael@0 | 155 | |
michael@0 | 156 | stack.push_back(state); |
michael@0 | 157 | |
michael@0 | 158 | // |
michael@0 | 159 | // Indicate there is no current page to allocate from. |
michael@0 | 160 | // |
michael@0 | 161 | currentPageOffset = pageSize; |
michael@0 | 162 | } |
michael@0 | 163 | |
michael@0 | 164 | // |
michael@0 | 165 | // Do a mass-deallocation of all the individual allocations |
michael@0 | 166 | // that have occurred since the last push(), or since the |
michael@0 | 167 | // last pop(), or since the object's creation. |
michael@0 | 168 | // |
michael@0 | 169 | // The deallocated pages are saved for future allocations. |
michael@0 | 170 | // |
michael@0 | 171 | void TPoolAllocator::pop() |
michael@0 | 172 | { |
michael@0 | 173 | if (stack.size() < 1) |
michael@0 | 174 | return; |
michael@0 | 175 | |
michael@0 | 176 | tHeader* page = stack.back().page; |
michael@0 | 177 | currentPageOffset = stack.back().offset; |
michael@0 | 178 | |
michael@0 | 179 | while (inUseList != page) { |
michael@0 | 180 | // invoke destructor to free allocation list |
michael@0 | 181 | inUseList->~tHeader(); |
michael@0 | 182 | |
michael@0 | 183 | tHeader* nextInUse = inUseList->nextPage; |
michael@0 | 184 | if (inUseList->pageCount > 1) |
michael@0 | 185 | delete [] reinterpret_cast<char*>(inUseList); |
michael@0 | 186 | else { |
michael@0 | 187 | inUseList->nextPage = freeList; |
michael@0 | 188 | freeList = inUseList; |
michael@0 | 189 | } |
michael@0 | 190 | inUseList = nextInUse; |
michael@0 | 191 | } |
michael@0 | 192 | |
michael@0 | 193 | stack.pop_back(); |
michael@0 | 194 | } |
michael@0 | 195 | |
michael@0 | 196 | // |
michael@0 | 197 | // Do a mass-deallocation of all the individual allocations |
michael@0 | 198 | // that have occurred. |
michael@0 | 199 | // |
michael@0 | 200 | void TPoolAllocator::popAll() |
michael@0 | 201 | { |
michael@0 | 202 | while (stack.size() > 0) |
michael@0 | 203 | pop(); |
michael@0 | 204 | } |
michael@0 | 205 | |
michael@0 | 206 | void* TPoolAllocator::allocate(size_t numBytes) |
michael@0 | 207 | { |
michael@0 | 208 | // |
michael@0 | 209 | // Just keep some interesting statistics. |
michael@0 | 210 | // |
michael@0 | 211 | ++numCalls; |
michael@0 | 212 | totalBytes += numBytes; |
michael@0 | 213 | |
michael@0 | 214 | // If we are using guard blocks, all allocations are bracketed by |
michael@0 | 215 | // them: [guardblock][allocation][guardblock]. numBytes is how |
michael@0 | 216 | // much memory the caller asked for. allocationSize is the total |
michael@0 | 217 | // size including guard blocks. In release build, |
michael@0 | 218 | // guardBlockSize=0 and this all gets optimized away. |
michael@0 | 219 | size_t allocationSize = TAllocation::allocationSize(numBytes); |
michael@0 | 220 | // Detect integer overflow. |
michael@0 | 221 | if (allocationSize < numBytes) |
michael@0 | 222 | return 0; |
michael@0 | 223 | |
michael@0 | 224 | // |
michael@0 | 225 | // Do the allocation, most likely case first, for efficiency. |
michael@0 | 226 | // This step could be moved to be inline sometime. |
michael@0 | 227 | // |
michael@0 | 228 | if (allocationSize <= pageSize - currentPageOffset) { |
michael@0 | 229 | // |
michael@0 | 230 | // Safe to allocate from currentPageOffset. |
michael@0 | 231 | // |
michael@0 | 232 | unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset; |
michael@0 | 233 | currentPageOffset += allocationSize; |
michael@0 | 234 | currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask; |
michael@0 | 235 | |
michael@0 | 236 | return initializeAllocation(inUseList, memory, numBytes); |
michael@0 | 237 | } |
michael@0 | 238 | |
michael@0 | 239 | if (allocationSize > pageSize - headerSkip) { |
michael@0 | 240 | // |
michael@0 | 241 | // Do a multi-page allocation. Don't mix these with the others. |
michael@0 | 242 | // The OS is efficient and allocating and free-ing multiple pages. |
michael@0 | 243 | // |
michael@0 | 244 | size_t numBytesToAlloc = allocationSize + headerSkip; |
michael@0 | 245 | // Detect integer overflow. |
michael@0 | 246 | if (numBytesToAlloc < allocationSize) |
michael@0 | 247 | return 0; |
michael@0 | 248 | |
michael@0 | 249 | tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]); |
michael@0 | 250 | if (memory == 0) |
michael@0 | 251 | return 0; |
michael@0 | 252 | |
michael@0 | 253 | // Use placement-new to initialize header |
michael@0 | 254 | new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize); |
michael@0 | 255 | inUseList = memory; |
michael@0 | 256 | |
michael@0 | 257 | currentPageOffset = pageSize; // make next allocation come from a new page |
michael@0 | 258 | |
michael@0 | 259 | // No guard blocks for multi-page allocations (yet) |
michael@0 | 260 | return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip); |
michael@0 | 261 | } |
michael@0 | 262 | |
michael@0 | 263 | // |
michael@0 | 264 | // Need a simple page to allocate from. |
michael@0 | 265 | // |
michael@0 | 266 | tHeader* memory; |
michael@0 | 267 | if (freeList) { |
michael@0 | 268 | memory = freeList; |
michael@0 | 269 | freeList = freeList->nextPage; |
michael@0 | 270 | } else { |
michael@0 | 271 | memory = reinterpret_cast<tHeader*>(::new char[pageSize]); |
michael@0 | 272 | if (memory == 0) |
michael@0 | 273 | return 0; |
michael@0 | 274 | } |
michael@0 | 275 | |
michael@0 | 276 | // Use placement-new to initialize header |
michael@0 | 277 | new(memory) tHeader(inUseList, 1); |
michael@0 | 278 | inUseList = memory; |
michael@0 | 279 | |
michael@0 | 280 | unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip; |
michael@0 | 281 | currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask; |
michael@0 | 282 | |
michael@0 | 283 | return initializeAllocation(inUseList, ret, numBytes); |
michael@0 | 284 | } |
michael@0 | 285 | |
michael@0 | 286 | |
michael@0 | 287 | // |
michael@0 | 288 | // Check all allocations in a list for damage by calling check on each. |
michael@0 | 289 | // |
michael@0 | 290 | void TAllocation::checkAllocList() const |
michael@0 | 291 | { |
michael@0 | 292 | for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc) |
michael@0 | 293 | alloc->check(); |
michael@0 | 294 | } |