michael@0: // michael@0: // Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved. michael@0: // Use of this source code is governed by a BSD-style license that can be michael@0: // found in the LICENSE file. michael@0: // michael@0: michael@0: #include "compiler/PoolAlloc.h" michael@0: michael@0: #ifndef _MSC_VER michael@0: #include michael@0: #endif michael@0: #include michael@0: michael@0: #include "common/angleutils.h" michael@0: #include "compiler/InitializeGlobals.h" michael@0: #include "compiler/osinclude.h" michael@0: michael@0: OS_TLSIndex PoolIndex = OS_INVALID_TLS_INDEX; michael@0: michael@0: bool InitializePoolIndex() michael@0: { michael@0: assert(PoolIndex == OS_INVALID_TLS_INDEX); michael@0: michael@0: PoolIndex = OS_AllocTLSIndex(); michael@0: return PoolIndex != OS_INVALID_TLS_INDEX; michael@0: } michael@0: michael@0: void FreePoolIndex() michael@0: { michael@0: assert(PoolIndex != OS_INVALID_TLS_INDEX); michael@0: michael@0: OS_FreeTLSIndex(PoolIndex); michael@0: PoolIndex = OS_INVALID_TLS_INDEX; michael@0: } michael@0: michael@0: TPoolAllocator* GetGlobalPoolAllocator() michael@0: { michael@0: assert(PoolIndex != OS_INVALID_TLS_INDEX); michael@0: return static_cast(OS_GetTLSValue(PoolIndex)); michael@0: } michael@0: michael@0: void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator) michael@0: { michael@0: assert(PoolIndex != OS_INVALID_TLS_INDEX); michael@0: OS_SetTLSValue(PoolIndex, poolAllocator); michael@0: } michael@0: michael@0: // michael@0: // Implement the functionality of the TPoolAllocator class, which michael@0: // is documented in PoolAlloc.h. michael@0: // michael@0: TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) : michael@0: pageSize(growthIncrement), michael@0: alignment(allocationAlignment), michael@0: freeList(0), michael@0: inUseList(0), michael@0: numCalls(0), michael@0: totalBytes(0) michael@0: { michael@0: // michael@0: // Don't allow page sizes we know are smaller than all common michael@0: // OS page sizes. michael@0: // michael@0: if (pageSize < 4*1024) michael@0: pageSize = 4*1024; michael@0: michael@0: // michael@0: // A large currentPageOffset indicates a new page needs to michael@0: // be obtained to allocate memory. michael@0: // michael@0: currentPageOffset = pageSize; michael@0: michael@0: // michael@0: // Adjust alignment to be at least pointer aligned and michael@0: // power of 2. michael@0: // michael@0: size_t minAlign = sizeof(void*); michael@0: alignment &= ~(minAlign - 1); michael@0: if (alignment < minAlign) michael@0: alignment = minAlign; michael@0: size_t a = 1; michael@0: while (a < alignment) michael@0: a <<= 1; michael@0: alignment = a; michael@0: alignmentMask = a - 1; michael@0: michael@0: // michael@0: // Align header skip michael@0: // michael@0: headerSkip = minAlign; michael@0: if (headerSkip < sizeof(tHeader)) { michael@0: headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask; michael@0: } michael@0: } michael@0: michael@0: TPoolAllocator::~TPoolAllocator() michael@0: { michael@0: while (inUseList) { michael@0: tHeader* next = inUseList->nextPage; michael@0: inUseList->~tHeader(); michael@0: delete [] reinterpret_cast(inUseList); michael@0: inUseList = next; michael@0: } michael@0: michael@0: // We should not check the guard blocks michael@0: // here, because we did it already when the block was michael@0: // placed into the free list. michael@0: // michael@0: while (freeList) { michael@0: tHeader* next = freeList->nextPage; michael@0: delete [] reinterpret_cast(freeList); michael@0: freeList = next; michael@0: } michael@0: } michael@0: michael@0: // Support MSVC++ 6.0 michael@0: const unsigned char TAllocation::guardBlockBeginVal = 0xfb; michael@0: const unsigned char TAllocation::guardBlockEndVal = 0xfe; michael@0: const unsigned char TAllocation::userDataFill = 0xcd; michael@0: michael@0: #ifdef GUARD_BLOCKS michael@0: const size_t TAllocation::guardBlockSize = 16; michael@0: #else michael@0: const size_t TAllocation::guardBlockSize = 0; michael@0: #endif michael@0: michael@0: // michael@0: // Check a single guard block for damage michael@0: // michael@0: void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const michael@0: { michael@0: #ifdef GUARD_BLOCKS michael@0: for (size_t x = 0; x < guardBlockSize; x++) { michael@0: if (blockMem[x] != val) { michael@0: char assertMsg[80]; michael@0: michael@0: // We don't print the assert message. It's here just to be helpful. michael@0: #if defined(_MSC_VER) michael@0: snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n", michael@0: locText, size, data()); michael@0: #else michael@0: snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", michael@0: locText, size, data()); michael@0: #endif michael@0: assert(0 && "PoolAlloc: Damage in guard block"); michael@0: } michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: michael@0: void TPoolAllocator::push() michael@0: { michael@0: tAllocState state = { currentPageOffset, inUseList }; michael@0: michael@0: stack.push_back(state); michael@0: michael@0: // michael@0: // Indicate there is no current page to allocate from. michael@0: // michael@0: currentPageOffset = pageSize; michael@0: } michael@0: michael@0: // michael@0: // Do a mass-deallocation of all the individual allocations michael@0: // that have occurred since the last push(), or since the michael@0: // last pop(), or since the object's creation. michael@0: // michael@0: // The deallocated pages are saved for future allocations. michael@0: // michael@0: void TPoolAllocator::pop() michael@0: { michael@0: if (stack.size() < 1) michael@0: return; michael@0: michael@0: tHeader* page = stack.back().page; michael@0: currentPageOffset = stack.back().offset; michael@0: michael@0: while (inUseList != page) { michael@0: // invoke destructor to free allocation list michael@0: inUseList->~tHeader(); michael@0: michael@0: tHeader* nextInUse = inUseList->nextPage; michael@0: if (inUseList->pageCount > 1) michael@0: delete [] reinterpret_cast(inUseList); michael@0: else { michael@0: inUseList->nextPage = freeList; michael@0: freeList = inUseList; michael@0: } michael@0: inUseList = nextInUse; michael@0: } michael@0: michael@0: stack.pop_back(); michael@0: } michael@0: michael@0: // michael@0: // Do a mass-deallocation of all the individual allocations michael@0: // that have occurred. michael@0: // michael@0: void TPoolAllocator::popAll() michael@0: { michael@0: while (stack.size() > 0) michael@0: pop(); michael@0: } michael@0: michael@0: void* TPoolAllocator::allocate(size_t numBytes) michael@0: { michael@0: // michael@0: // Just keep some interesting statistics. michael@0: // michael@0: ++numCalls; michael@0: totalBytes += numBytes; michael@0: michael@0: // If we are using guard blocks, all allocations are bracketed by michael@0: // them: [guardblock][allocation][guardblock]. numBytes is how michael@0: // much memory the caller asked for. allocationSize is the total michael@0: // size including guard blocks. In release build, michael@0: // guardBlockSize=0 and this all gets optimized away. michael@0: size_t allocationSize = TAllocation::allocationSize(numBytes); michael@0: // Detect integer overflow. michael@0: if (allocationSize < numBytes) michael@0: return 0; michael@0: michael@0: // michael@0: // Do the allocation, most likely case first, for efficiency. michael@0: // This step could be moved to be inline sometime. michael@0: // michael@0: if (allocationSize <= pageSize - currentPageOffset) { michael@0: // michael@0: // Safe to allocate from currentPageOffset. michael@0: // michael@0: unsigned char* memory = reinterpret_cast(inUseList) + currentPageOffset; michael@0: currentPageOffset += allocationSize; michael@0: currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask; michael@0: michael@0: return initializeAllocation(inUseList, memory, numBytes); michael@0: } michael@0: michael@0: if (allocationSize > pageSize - headerSkip) { michael@0: // michael@0: // Do a multi-page allocation. Don't mix these with the others. michael@0: // The OS is efficient and allocating and free-ing multiple pages. michael@0: // michael@0: size_t numBytesToAlloc = allocationSize + headerSkip; michael@0: // Detect integer overflow. michael@0: if (numBytesToAlloc < allocationSize) michael@0: return 0; michael@0: michael@0: tHeader* memory = reinterpret_cast(::new char[numBytesToAlloc]); michael@0: if (memory == 0) michael@0: return 0; michael@0: michael@0: // Use placement-new to initialize header michael@0: new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize); michael@0: inUseList = memory; michael@0: michael@0: currentPageOffset = pageSize; // make next allocation come from a new page michael@0: michael@0: // No guard blocks for multi-page allocations (yet) michael@0: return reinterpret_cast(reinterpret_cast(memory) + headerSkip); michael@0: } michael@0: michael@0: // michael@0: // Need a simple page to allocate from. michael@0: // michael@0: tHeader* memory; michael@0: if (freeList) { michael@0: memory = freeList; michael@0: freeList = freeList->nextPage; michael@0: } else { michael@0: memory = reinterpret_cast(::new char[pageSize]); michael@0: if (memory == 0) michael@0: return 0; michael@0: } michael@0: michael@0: // Use placement-new to initialize header michael@0: new(memory) tHeader(inUseList, 1); michael@0: inUseList = memory; michael@0: michael@0: unsigned char* ret = reinterpret_cast(inUseList) + headerSkip; michael@0: currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask; michael@0: michael@0: return initializeAllocation(inUseList, ret, numBytes); michael@0: } michael@0: michael@0: michael@0: // michael@0: // Check all allocations in a list for damage by calling check on each. michael@0: // michael@0: void TAllocation::checkAllocList() const michael@0: { michael@0: for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc) michael@0: alloc->check(); michael@0: }