gfx/angle/src/compiler/PoolAlloc.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/gfx/angle/src/compiler/PoolAlloc.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,300 @@
     1.4 +//
     1.5 +// Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
     1.6 +// Use of this source code is governed by a BSD-style license that can be
     1.7 +// found in the LICENSE file.
     1.8 +//
     1.9 +
    1.10 +#ifndef _POOLALLOC_INCLUDED_
    1.11 +#define _POOLALLOC_INCLUDED_
    1.12 +
    1.13 +#ifdef _DEBUG
    1.14 +#define GUARD_BLOCKS  // define to enable guard block sanity checking
    1.15 +#endif
    1.16 +
    1.17 +//
    1.18 +// This header defines an allocator that can be used to efficiently
    1.19 +// allocate a large number of small requests for heap memory, with the 
    1.20 +// intention that they are not individually deallocated, but rather 
    1.21 +// collectively deallocated at one time.
    1.22 +//
    1.23 +// This simultaneously
    1.24 +//
    1.25 +// * Makes each individual allocation much more efficient; the
    1.26 +//     typical allocation is trivial.
    1.27 +// * Completely avoids the cost of doing individual deallocation.
    1.28 +// * Saves the trouble of tracking down and plugging a large class of leaks.
    1.29 +//
    1.30 +// Individual classes can use this allocator by supplying their own
    1.31 +// new and delete methods.
    1.32 +//
    1.33 +// STL containers can use this allocator by using the pool_allocator
    1.34 +// class as the allocator (second) template argument.
    1.35 +//
    1.36 +
    1.37 +#include <stddef.h>
    1.38 +#include <string.h>
    1.39 +#include <vector>
    1.40 +
    1.41 +// If we are using guard blocks, we must track each indivual
    1.42 +// allocation.  If we aren't using guard blocks, these
    1.43 +// never get instantiated, so won't have any impact.
    1.44 +// 
    1.45 +
    1.46 +class TAllocation {
    1.47 +public:
    1.48 +    TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
    1.49 +        size(size), mem(mem), prevAlloc(prev) {
    1.50 +        // Allocations are bracketed:
    1.51 +        //    [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
    1.52 +        // This would be cleaner with if (guardBlockSize)..., but that
    1.53 +        // makes the compiler print warnings about 0 length memsets,
    1.54 +        // even with the if() protecting them.
    1.55 +#ifdef GUARD_BLOCKS
    1.56 +        memset(preGuard(), guardBlockBeginVal, guardBlockSize);
    1.57 +        memset(data(),      userDataFill,       size);
    1.58 +        memset(postGuard(), guardBlockEndVal,   guardBlockSize);
    1.59 +#endif
    1.60 +    }
    1.61 +
    1.62 +    void check() const {
    1.63 +        checkGuardBlock(preGuard(),  guardBlockBeginVal, "before");
    1.64 +        checkGuardBlock(postGuard(), guardBlockEndVal,   "after");
    1.65 +    }
    1.66 +
    1.67 +    void checkAllocList() const;
    1.68 +
    1.69 +    // Return total size needed to accomodate user buffer of 'size',
    1.70 +    // plus our tracking data.
    1.71 +    inline static size_t allocationSize(size_t size) {
    1.72 +        return size + 2 * guardBlockSize + headerSize();
    1.73 +    }
    1.74 +
    1.75 +    // Offset from surrounding buffer to get to user data buffer.
    1.76 +    inline static unsigned char* offsetAllocation(unsigned char* m) {
    1.77 +        return m + guardBlockSize + headerSize();
    1.78 +    }
    1.79 +
    1.80 +private:
    1.81 +    void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
    1.82 +
    1.83 +    // Find offsets to pre and post guard blocks, and user data buffer
    1.84 +    unsigned char* preGuard()  const { return mem + headerSize(); }
    1.85 +    unsigned char* data()      const { return preGuard() + guardBlockSize; }
    1.86 +    unsigned char* postGuard() const { return data() + size; }
    1.87 +
    1.88 +    size_t size;                  // size of the user data area
    1.89 +    unsigned char* mem;           // beginning of our allocation (pts to header)
    1.90 +    TAllocation* prevAlloc;       // prior allocation in the chain
    1.91 +
    1.92 +    // Support MSVC++ 6.0
    1.93 +    const static unsigned char guardBlockBeginVal;
    1.94 +    const static unsigned char guardBlockEndVal;
    1.95 +    const static unsigned char userDataFill;
    1.96 +
    1.97 +    const static size_t guardBlockSize;
    1.98 +#ifdef GUARD_BLOCKS
    1.99 +    inline static size_t headerSize() { return sizeof(TAllocation); }
   1.100 +#else
   1.101 +    inline static size_t headerSize() { return 0; }
   1.102 +#endif
   1.103 +};
   1.104 +
   1.105 +//
   1.106 +// There are several stacks.  One is to track the pushing and popping
   1.107 +// of the user, and not yet implemented.  The others are simply a 
   1.108 +// repositories of free pages or used pages.
   1.109 +//
   1.110 +// Page stacks are linked together with a simple header at the beginning
   1.111 +// of each allocation obtained from the underlying OS.  Multi-page allocations
   1.112 +// are returned to the OS.  Individual page allocations are kept for future
   1.113 +// re-use.
   1.114 +//
   1.115 +// The "page size" used is not, nor must it match, the underlying OS
   1.116 +// page size.  But, having it be about that size or equal to a set of 
   1.117 +// pages is likely most optimal.
   1.118 +//
   1.119 +class TPoolAllocator {
   1.120 +public:
   1.121 +    TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
   1.122 +
   1.123 +    //
   1.124 +    // Don't call the destructor just to free up the memory, call pop()
   1.125 +    //
   1.126 +    ~TPoolAllocator();
   1.127 +
   1.128 +    //
   1.129 +    // Call push() to establish a new place to pop memory too.  Does not
   1.130 +    // have to be called to get things started.
   1.131 +    //
   1.132 +    void push();
   1.133 +
   1.134 +    //
   1.135 +    // Call pop() to free all memory allocated since the last call to push(),
   1.136 +    // or if no last call to push, frees all memory since first allocation.
   1.137 +    //
   1.138 +    void pop();
   1.139 +
   1.140 +    //
   1.141 +    // Call popAll() to free all memory allocated.
   1.142 +    //
   1.143 +    void popAll();
   1.144 +
   1.145 +    //
   1.146 +    // Call allocate() to actually acquire memory.  Returns 0 if no memory
   1.147 +    // available, otherwise a properly aligned pointer to 'numBytes' of memory.
   1.148 +    //
   1.149 +    void* allocate(size_t numBytes);
   1.150 +
   1.151 +    //
   1.152 +    // There is no deallocate.  The point of this class is that
   1.153 +    // deallocation can be skipped by the user of it, as the model
   1.154 +    // of use is to simultaneously deallocate everything at once
   1.155 +    // by calling pop(), and to not have to solve memory leak problems.
   1.156 +    //
   1.157 +
   1.158 +protected:
   1.159 +    friend struct tHeader;
   1.160 +    
   1.161 +    struct tHeader {
   1.162 +        tHeader(tHeader* nextPage, size_t pageCount) :
   1.163 +            nextPage(nextPage),
   1.164 +            pageCount(pageCount)
   1.165 +#ifdef GUARD_BLOCKS
   1.166 +          , lastAllocation(0)
   1.167 +#endif
   1.168 +            { }
   1.169 +
   1.170 +        ~tHeader() {
   1.171 +#ifdef GUARD_BLOCKS
   1.172 +            if (lastAllocation)
   1.173 +                lastAllocation->checkAllocList();
   1.174 +#endif
   1.175 +        }
   1.176 +
   1.177 +        tHeader* nextPage;
   1.178 +        size_t pageCount;
   1.179 +#ifdef GUARD_BLOCKS
   1.180 +        TAllocation* lastAllocation;
   1.181 +#endif
   1.182 +    };
   1.183 +
   1.184 +    struct tAllocState {
   1.185 +        size_t offset;
   1.186 +        tHeader* page;
   1.187 +    };
   1.188 +    typedef std::vector<tAllocState> tAllocStack;
   1.189 +
   1.190 +    // Track allocations if and only if we're using guard blocks
   1.191 +    void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
   1.192 +#ifdef GUARD_BLOCKS
   1.193 +        new(memory) TAllocation(numBytes, memory, block->lastAllocation);
   1.194 +        block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
   1.195 +#endif
   1.196 +        // This is optimized entirely away if GUARD_BLOCKS is not defined.
   1.197 +        return TAllocation::offsetAllocation(memory);
   1.198 +    }
   1.199 +
   1.200 +    size_t pageSize;        // granularity of allocation from the OS
   1.201 +    size_t alignment;       // all returned allocations will be aligned at 
   1.202 +                            // this granularity, which will be a power of 2
   1.203 +    size_t alignmentMask;
   1.204 +    size_t headerSkip;      // amount of memory to skip to make room for the
   1.205 +                            //      header (basically, size of header, rounded
   1.206 +                            //      up to make it aligned
   1.207 +    size_t currentPageOffset;  // next offset in top of inUseList to allocate from
   1.208 +    tHeader* freeList;      // list of popped memory
   1.209 +    tHeader* inUseList;     // list of all memory currently being used
   1.210 +    tAllocStack stack;      // stack of where to allocate from, to partition pool
   1.211 +
   1.212 +    int numCalls;           // just an interesting statistic
   1.213 +    size_t totalBytes;      // just an interesting statistic
   1.214 +private:
   1.215 +    TPoolAllocator& operator=(const TPoolAllocator&);  // dont allow assignment operator
   1.216 +    TPoolAllocator(const TPoolAllocator&);  // dont allow default copy constructor
   1.217 +};
   1.218 +
   1.219 +
   1.220 +//
   1.221 +// There could potentially be many pools with pops happening at
   1.222 +// different times.  But a simple use is to have a global pop
   1.223 +// with everyone using the same global allocator.
   1.224 +//
   1.225 +extern TPoolAllocator* GetGlobalPoolAllocator();
   1.226 +extern void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator);
   1.227 +
   1.228 +//
   1.229 +// This STL compatible allocator is intended to be used as the allocator
   1.230 +// parameter to templatized STL containers, like vector and map.
   1.231 +//
   1.232 +// It will use the pools for allocation, and not
   1.233 +// do any deallocation, but will still do destruction.
   1.234 +//
   1.235 +template<class T>
   1.236 +class pool_allocator {
   1.237 +public:
   1.238 +    typedef size_t size_type;
   1.239 +    typedef ptrdiff_t difference_type;
   1.240 +    typedef T* pointer;
   1.241 +    typedef const T* const_pointer;
   1.242 +    typedef T& reference;
   1.243 +    typedef const T& const_reference;
   1.244 +    typedef T value_type;
   1.245 +
   1.246 +    template<class Other> 
   1.247 +    struct rebind {
   1.248 +        typedef pool_allocator<Other> other;
   1.249 +    };
   1.250 +    pointer address(reference x) const { return &x; }
   1.251 +    const_pointer address(const_reference x) const { return &x; }
   1.252 +
   1.253 +    pool_allocator() : allocator(GetGlobalPoolAllocator()) { }
   1.254 +    pool_allocator(TPoolAllocator& a) : allocator(&a) { }
   1.255 +    pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
   1.256 +
   1.257 +    template <class Other>
   1.258 +    pool_allocator<T>& operator=(const pool_allocator<Other>& p) {
   1.259 +      allocator = p.allocator;
   1.260 +      return *this;
   1.261 +    }
   1.262 +
   1.263 +    template<class Other>
   1.264 +    pool_allocator(const pool_allocator<Other>& p) : allocator(&p.getAllocator()) { }
   1.265 +
   1.266 +#if defined(__SUNPRO_CC) && !defined(_RWSTD_ALLOCATOR)
   1.267 +    // libCStd on some platforms have a different allocate/deallocate interface.
   1.268 +    // Caller pre-bakes sizeof(T) into 'n' which is the number of bytes to be
   1.269 +    // allocated, not the number of elements.
   1.270 +    void* allocate(size_type n) { 
   1.271 +        return getAllocator().allocate(n);
   1.272 +    }
   1.273 +    void* allocate(size_type n, const void*) {
   1.274 +        return getAllocator().allocate(n);
   1.275 +    }
   1.276 +    void deallocate(void*, size_type) {}
   1.277 +#else
   1.278 +    pointer allocate(size_type n) { 
   1.279 +        return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
   1.280 +    }
   1.281 +    pointer allocate(size_type n, const void*) { 
   1.282 +        return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
   1.283 +    }
   1.284 +    void deallocate(pointer, size_type) {}
   1.285 +#endif  // _RWSTD_ALLOCATOR
   1.286 +
   1.287 +    void construct(pointer p, const T& val) { new ((void *)p) T(val); }
   1.288 +    void destroy(pointer p) { p->T::~T(); }
   1.289 +
   1.290 +    bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
   1.291 +    bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
   1.292 +
   1.293 +    size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
   1.294 +    size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
   1.295 +
   1.296 +    void setAllocator(TPoolAllocator* a) { allocator = a; }
   1.297 +    TPoolAllocator& getAllocator() const { return *allocator; }
   1.298 +
   1.299 +protected:
   1.300 +    TPoolAllocator* allocator;
   1.301 +};
   1.302 +
   1.303 +#endif // _POOLALLOC_INCLUDED_

mercurial