gfx/angle/src/compiler/PoolAlloc.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/gfx/angle/src/compiler/PoolAlloc.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,294 @@
     1.4 +//
     1.5 +// Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
     1.6 +// Use of this source code is governed by a BSD-style license that can be
     1.7 +// found in the LICENSE file.
     1.8 +//
     1.9 +
    1.10 +#include "compiler/PoolAlloc.h"
    1.11 +
    1.12 +#ifndef _MSC_VER
    1.13 +#include <stdint.h>
    1.14 +#endif
    1.15 +#include <stdio.h>
    1.16 +
    1.17 +#include "common/angleutils.h"
    1.18 +#include "compiler/InitializeGlobals.h"
    1.19 +#include "compiler/osinclude.h"
    1.20 +
    1.21 +OS_TLSIndex PoolIndex = OS_INVALID_TLS_INDEX;
    1.22 +
    1.23 +bool InitializePoolIndex()
    1.24 +{
    1.25 +    assert(PoolIndex == OS_INVALID_TLS_INDEX);
    1.26 +
    1.27 +    PoolIndex = OS_AllocTLSIndex();
    1.28 +    return PoolIndex != OS_INVALID_TLS_INDEX;
    1.29 +}
    1.30 +
    1.31 +void FreePoolIndex()
    1.32 +{
    1.33 +    assert(PoolIndex != OS_INVALID_TLS_INDEX);
    1.34 +
    1.35 +    OS_FreeTLSIndex(PoolIndex);
    1.36 +    PoolIndex = OS_INVALID_TLS_INDEX;
    1.37 +}
    1.38 +
    1.39 +TPoolAllocator* GetGlobalPoolAllocator()
    1.40 +{
    1.41 +    assert(PoolIndex != OS_INVALID_TLS_INDEX);
    1.42 +    return static_cast<TPoolAllocator*>(OS_GetTLSValue(PoolIndex));
    1.43 +}
    1.44 +
    1.45 +void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator)
    1.46 +{
    1.47 +    assert(PoolIndex != OS_INVALID_TLS_INDEX);
    1.48 +    OS_SetTLSValue(PoolIndex, poolAllocator);
    1.49 +}
    1.50 +
    1.51 +//
    1.52 +// Implement the functionality of the TPoolAllocator class, which
    1.53 +// is documented in PoolAlloc.h.
    1.54 +//
    1.55 +TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) : 
    1.56 +    pageSize(growthIncrement),
    1.57 +    alignment(allocationAlignment),
    1.58 +    freeList(0),
    1.59 +    inUseList(0),
    1.60 +    numCalls(0),
    1.61 +    totalBytes(0)
    1.62 +{
    1.63 +    //
    1.64 +    // Don't allow page sizes we know are smaller than all common
    1.65 +    // OS page sizes.
    1.66 +    //
    1.67 +    if (pageSize < 4*1024)
    1.68 +        pageSize = 4*1024;
    1.69 +
    1.70 +    //
    1.71 +    // A large currentPageOffset indicates a new page needs to
    1.72 +    // be obtained to allocate memory.
    1.73 +    //
    1.74 +    currentPageOffset = pageSize;
    1.75 +
    1.76 +    //
    1.77 +    // Adjust alignment to be at least pointer aligned and
    1.78 +    // power of 2.
    1.79 +    //
    1.80 +    size_t minAlign = sizeof(void*);
    1.81 +    alignment &= ~(minAlign - 1);
    1.82 +    if (alignment < minAlign)
    1.83 +        alignment = minAlign;
    1.84 +    size_t a = 1;
    1.85 +    while (a < alignment)
    1.86 +        a <<= 1;
    1.87 +    alignment = a;
    1.88 +    alignmentMask = a - 1;
    1.89 +
    1.90 +    //
    1.91 +    // Align header skip
    1.92 +    //
    1.93 +    headerSkip = minAlign;
    1.94 +    if (headerSkip < sizeof(tHeader)) {
    1.95 +        headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
    1.96 +    }
    1.97 +}
    1.98 +
    1.99 +TPoolAllocator::~TPoolAllocator()
   1.100 +{
   1.101 +    while (inUseList) {
   1.102 +        tHeader* next = inUseList->nextPage;
   1.103 +        inUseList->~tHeader();
   1.104 +        delete [] reinterpret_cast<char*>(inUseList);
   1.105 +        inUseList = next;
   1.106 +    }
   1.107 +
   1.108 +    // We should not check the guard blocks
   1.109 +    // here, because we did it already when the block was
   1.110 +    // placed into the free list.
   1.111 +    //
   1.112 +    while (freeList) {
   1.113 +        tHeader* next = freeList->nextPage;
   1.114 +        delete [] reinterpret_cast<char*>(freeList);
   1.115 +        freeList = next;
   1.116 +    }
   1.117 +}
   1.118 +
   1.119 +// Support MSVC++ 6.0
   1.120 +const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
   1.121 +const unsigned char TAllocation::guardBlockEndVal   = 0xfe;
   1.122 +const unsigned char TAllocation::userDataFill       = 0xcd;
   1.123 +
   1.124 +#ifdef GUARD_BLOCKS
   1.125 +    const size_t TAllocation::guardBlockSize = 16;
   1.126 +#else
   1.127 +    const size_t TAllocation::guardBlockSize = 0;
   1.128 +#endif
   1.129 +
   1.130 +//
   1.131 +// Check a single guard block for damage
   1.132 +//
   1.133 +void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
   1.134 +{
   1.135 +#ifdef GUARD_BLOCKS
   1.136 +    for (size_t x = 0; x < guardBlockSize; x++) {
   1.137 +        if (blockMem[x] != val) {
   1.138 +            char assertMsg[80];
   1.139 +
   1.140 +            // We don't print the assert message.  It's here just to be helpful.
   1.141 +#if defined(_MSC_VER)
   1.142 +            snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n",
   1.143 +                    locText, size, data());
   1.144 +#else
   1.145 +            snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
   1.146 +                    locText, size, data());
   1.147 +#endif
   1.148 +            assert(0 && "PoolAlloc: Damage in guard block");
   1.149 +        }
   1.150 +    }
   1.151 +#endif
   1.152 +}
   1.153 +
   1.154 +
   1.155 +void TPoolAllocator::push()
   1.156 +{
   1.157 +    tAllocState state = { currentPageOffset, inUseList };
   1.158 +
   1.159 +    stack.push_back(state);
   1.160 +        
   1.161 +    //
   1.162 +    // Indicate there is no current page to allocate from.
   1.163 +    //
   1.164 +    currentPageOffset = pageSize;
   1.165 +}
   1.166 +
   1.167 +//
   1.168 +// Do a mass-deallocation of all the individual allocations
   1.169 +// that have occurred since the last push(), or since the
   1.170 +// last pop(), or since the object's creation.
   1.171 +//
   1.172 +// The deallocated pages are saved for future allocations.
   1.173 +//
   1.174 +void TPoolAllocator::pop()
   1.175 +{
   1.176 +    if (stack.size() < 1)
   1.177 +        return;
   1.178 +
   1.179 +    tHeader* page = stack.back().page;
   1.180 +    currentPageOffset = stack.back().offset;
   1.181 +
   1.182 +    while (inUseList != page) {
   1.183 +        // invoke destructor to free allocation list
   1.184 +        inUseList->~tHeader();
   1.185 +        
   1.186 +        tHeader* nextInUse = inUseList->nextPage;
   1.187 +        if (inUseList->pageCount > 1)
   1.188 +            delete [] reinterpret_cast<char*>(inUseList);
   1.189 +        else {
   1.190 +            inUseList->nextPage = freeList;
   1.191 +            freeList = inUseList;
   1.192 +        }
   1.193 +        inUseList = nextInUse;
   1.194 +    }
   1.195 +
   1.196 +    stack.pop_back();
   1.197 +}
   1.198 +
   1.199 +//
   1.200 +// Do a mass-deallocation of all the individual allocations
   1.201 +// that have occurred.
   1.202 +//
   1.203 +void TPoolAllocator::popAll()
   1.204 +{
   1.205 +    while (stack.size() > 0)
   1.206 +        pop();
   1.207 +}
   1.208 +
   1.209 +void* TPoolAllocator::allocate(size_t numBytes)
   1.210 +{
   1.211 +    //
   1.212 +    // Just keep some interesting statistics.
   1.213 +    //
   1.214 +    ++numCalls;
   1.215 +    totalBytes += numBytes;
   1.216 +
   1.217 +    // If we are using guard blocks, all allocations are bracketed by
   1.218 +    // them: [guardblock][allocation][guardblock].  numBytes is how
   1.219 +    // much memory the caller asked for.  allocationSize is the total
   1.220 +    // size including guard blocks.  In release build,
   1.221 +    // guardBlockSize=0 and this all gets optimized away.
   1.222 +    size_t allocationSize = TAllocation::allocationSize(numBytes);
   1.223 +    // Detect integer overflow.
   1.224 +    if (allocationSize < numBytes)
   1.225 +        return 0;
   1.226 +
   1.227 +    //
   1.228 +    // Do the allocation, most likely case first, for efficiency.
   1.229 +    // This step could be moved to be inline sometime.
   1.230 +    //
   1.231 +    if (allocationSize <= pageSize - currentPageOffset) {
   1.232 +        //
   1.233 +        // Safe to allocate from currentPageOffset.
   1.234 +        //
   1.235 +        unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
   1.236 +        currentPageOffset += allocationSize;
   1.237 +        currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
   1.238 +
   1.239 +        return initializeAllocation(inUseList, memory, numBytes);
   1.240 +    }
   1.241 +
   1.242 +    if (allocationSize > pageSize - headerSkip) {
   1.243 +        //
   1.244 +        // Do a multi-page allocation.  Don't mix these with the others.
   1.245 +        // The OS is efficient and allocating and free-ing multiple pages.
   1.246 +        //
   1.247 +        size_t numBytesToAlloc = allocationSize + headerSkip;
   1.248 +        // Detect integer overflow.
   1.249 +        if (numBytesToAlloc < allocationSize)
   1.250 +            return 0;
   1.251 +
   1.252 +        tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
   1.253 +        if (memory == 0)
   1.254 +            return 0;
   1.255 +
   1.256 +        // Use placement-new to initialize header
   1.257 +        new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
   1.258 +        inUseList = memory;
   1.259 +
   1.260 +        currentPageOffset = pageSize;  // make next allocation come from a new page
   1.261 +
   1.262 +        // No guard blocks for multi-page allocations (yet)
   1.263 +        return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
   1.264 +    }
   1.265 +
   1.266 +    //
   1.267 +    // Need a simple page to allocate from.
   1.268 +    //
   1.269 +    tHeader* memory;
   1.270 +    if (freeList) {
   1.271 +        memory = freeList;
   1.272 +        freeList = freeList->nextPage;
   1.273 +    } else {
   1.274 +        memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
   1.275 +        if (memory == 0)
   1.276 +            return 0;
   1.277 +    }
   1.278 +
   1.279 +    // Use placement-new to initialize header
   1.280 +    new(memory) tHeader(inUseList, 1);
   1.281 +    inUseList = memory;
   1.282 +    
   1.283 +    unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
   1.284 +    currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
   1.285 +
   1.286 +    return initializeAllocation(inUseList, ret, numBytes);
   1.287 +}
   1.288 +
   1.289 +
   1.290 +//
   1.291 +// Check all allocations in a list for damage by calling check on each.
   1.292 +//
   1.293 +void TAllocation::checkAllocList() const
   1.294 +{
   1.295 +    for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
   1.296 +        alloc->check();
   1.297 +}

mercurial