gfx/angle/src/compiler/PoolAlloc.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 //
michael@0 2 // Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
michael@0 3 // Use of this source code is governed by a BSD-style license that can be
michael@0 4 // found in the LICENSE file.
michael@0 5 //
michael@0 6
michael@0 7 #ifndef _POOLALLOC_INCLUDED_
michael@0 8 #define _POOLALLOC_INCLUDED_
michael@0 9
michael@0 10 #ifdef _DEBUG
michael@0 11 #define GUARD_BLOCKS // define to enable guard block sanity checking
michael@0 12 #endif
michael@0 13
michael@0 14 //
michael@0 15 // This header defines an allocator that can be used to efficiently
michael@0 16 // allocate a large number of small requests for heap memory, with the
michael@0 17 // intention that they are not individually deallocated, but rather
michael@0 18 // collectively deallocated at one time.
michael@0 19 //
michael@0 20 // This simultaneously
michael@0 21 //
michael@0 22 // * Makes each individual allocation much more efficient; the
michael@0 23 // typical allocation is trivial.
michael@0 24 // * Completely avoids the cost of doing individual deallocation.
michael@0 25 // * Saves the trouble of tracking down and plugging a large class of leaks.
michael@0 26 //
michael@0 27 // Individual classes can use this allocator by supplying their own
michael@0 28 // new and delete methods.
michael@0 29 //
michael@0 30 // STL containers can use this allocator by using the pool_allocator
michael@0 31 // class as the allocator (second) template argument.
michael@0 32 //
michael@0 33
michael@0 34 #include <stddef.h>
michael@0 35 #include <string.h>
michael@0 36 #include <vector>
michael@0 37
michael@0 38 // If we are using guard blocks, we must track each indivual
michael@0 39 // allocation. If we aren't using guard blocks, these
michael@0 40 // never get instantiated, so won't have any impact.
michael@0 41 //
michael@0 42
michael@0 43 class TAllocation {
michael@0 44 public:
michael@0 45 TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
michael@0 46 size(size), mem(mem), prevAlloc(prev) {
michael@0 47 // Allocations are bracketed:
michael@0 48 // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
michael@0 49 // This would be cleaner with if (guardBlockSize)..., but that
michael@0 50 // makes the compiler print warnings about 0 length memsets,
michael@0 51 // even with the if() protecting them.
michael@0 52 #ifdef GUARD_BLOCKS
michael@0 53 memset(preGuard(), guardBlockBeginVal, guardBlockSize);
michael@0 54 memset(data(), userDataFill, size);
michael@0 55 memset(postGuard(), guardBlockEndVal, guardBlockSize);
michael@0 56 #endif
michael@0 57 }
michael@0 58
michael@0 59 void check() const {
michael@0 60 checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
michael@0 61 checkGuardBlock(postGuard(), guardBlockEndVal, "after");
michael@0 62 }
michael@0 63
michael@0 64 void checkAllocList() const;
michael@0 65
michael@0 66 // Return total size needed to accomodate user buffer of 'size',
michael@0 67 // plus our tracking data.
michael@0 68 inline static size_t allocationSize(size_t size) {
michael@0 69 return size + 2 * guardBlockSize + headerSize();
michael@0 70 }
michael@0 71
michael@0 72 // Offset from surrounding buffer to get to user data buffer.
michael@0 73 inline static unsigned char* offsetAllocation(unsigned char* m) {
michael@0 74 return m + guardBlockSize + headerSize();
michael@0 75 }
michael@0 76
michael@0 77 private:
michael@0 78 void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
michael@0 79
michael@0 80 // Find offsets to pre and post guard blocks, and user data buffer
michael@0 81 unsigned char* preGuard() const { return mem + headerSize(); }
michael@0 82 unsigned char* data() const { return preGuard() + guardBlockSize; }
michael@0 83 unsigned char* postGuard() const { return data() + size; }
michael@0 84
michael@0 85 size_t size; // size of the user data area
michael@0 86 unsigned char* mem; // beginning of our allocation (pts to header)
michael@0 87 TAllocation* prevAlloc; // prior allocation in the chain
michael@0 88
michael@0 89 // Support MSVC++ 6.0
michael@0 90 const static unsigned char guardBlockBeginVal;
michael@0 91 const static unsigned char guardBlockEndVal;
michael@0 92 const static unsigned char userDataFill;
michael@0 93
michael@0 94 const static size_t guardBlockSize;
michael@0 95 #ifdef GUARD_BLOCKS
michael@0 96 inline static size_t headerSize() { return sizeof(TAllocation); }
michael@0 97 #else
michael@0 98 inline static size_t headerSize() { return 0; }
michael@0 99 #endif
michael@0 100 };
michael@0 101
michael@0 102 //
michael@0 103 // There are several stacks. One is to track the pushing and popping
michael@0 104 // of the user, and not yet implemented. The others are simply a
michael@0 105 // repositories of free pages or used pages.
michael@0 106 //
michael@0 107 // Page stacks are linked together with a simple header at the beginning
michael@0 108 // of each allocation obtained from the underlying OS. Multi-page allocations
michael@0 109 // are returned to the OS. Individual page allocations are kept for future
michael@0 110 // re-use.
michael@0 111 //
michael@0 112 // The "page size" used is not, nor must it match, the underlying OS
michael@0 113 // page size. But, having it be about that size or equal to a set of
michael@0 114 // pages is likely most optimal.
michael@0 115 //
michael@0 116 class TPoolAllocator {
michael@0 117 public:
michael@0 118 TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
michael@0 119
michael@0 120 //
michael@0 121 // Don't call the destructor just to free up the memory, call pop()
michael@0 122 //
michael@0 123 ~TPoolAllocator();
michael@0 124
michael@0 125 //
michael@0 126 // Call push() to establish a new place to pop memory too. Does not
michael@0 127 // have to be called to get things started.
michael@0 128 //
michael@0 129 void push();
michael@0 130
michael@0 131 //
michael@0 132 // Call pop() to free all memory allocated since the last call to push(),
michael@0 133 // or if no last call to push, frees all memory since first allocation.
michael@0 134 //
michael@0 135 void pop();
michael@0 136
michael@0 137 //
michael@0 138 // Call popAll() to free all memory allocated.
michael@0 139 //
michael@0 140 void popAll();
michael@0 141
michael@0 142 //
michael@0 143 // Call allocate() to actually acquire memory. Returns 0 if no memory
michael@0 144 // available, otherwise a properly aligned pointer to 'numBytes' of memory.
michael@0 145 //
michael@0 146 void* allocate(size_t numBytes);
michael@0 147
michael@0 148 //
michael@0 149 // There is no deallocate. The point of this class is that
michael@0 150 // deallocation can be skipped by the user of it, as the model
michael@0 151 // of use is to simultaneously deallocate everything at once
michael@0 152 // by calling pop(), and to not have to solve memory leak problems.
michael@0 153 //
michael@0 154
michael@0 155 protected:
michael@0 156 friend struct tHeader;
michael@0 157
michael@0 158 struct tHeader {
michael@0 159 tHeader(tHeader* nextPage, size_t pageCount) :
michael@0 160 nextPage(nextPage),
michael@0 161 pageCount(pageCount)
michael@0 162 #ifdef GUARD_BLOCKS
michael@0 163 , lastAllocation(0)
michael@0 164 #endif
michael@0 165 { }
michael@0 166
michael@0 167 ~tHeader() {
michael@0 168 #ifdef GUARD_BLOCKS
michael@0 169 if (lastAllocation)
michael@0 170 lastAllocation->checkAllocList();
michael@0 171 #endif
michael@0 172 }
michael@0 173
michael@0 174 tHeader* nextPage;
michael@0 175 size_t pageCount;
michael@0 176 #ifdef GUARD_BLOCKS
michael@0 177 TAllocation* lastAllocation;
michael@0 178 #endif
michael@0 179 };
michael@0 180
michael@0 181 struct tAllocState {
michael@0 182 size_t offset;
michael@0 183 tHeader* page;
michael@0 184 };
michael@0 185 typedef std::vector<tAllocState> tAllocStack;
michael@0 186
michael@0 187 // Track allocations if and only if we're using guard blocks
michael@0 188 void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
michael@0 189 #ifdef GUARD_BLOCKS
michael@0 190 new(memory) TAllocation(numBytes, memory, block->lastAllocation);
michael@0 191 block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
michael@0 192 #endif
michael@0 193 // This is optimized entirely away if GUARD_BLOCKS is not defined.
michael@0 194 return TAllocation::offsetAllocation(memory);
michael@0 195 }
michael@0 196
michael@0 197 size_t pageSize; // granularity of allocation from the OS
michael@0 198 size_t alignment; // all returned allocations will be aligned at
michael@0 199 // this granularity, which will be a power of 2
michael@0 200 size_t alignmentMask;
michael@0 201 size_t headerSkip; // amount of memory to skip to make room for the
michael@0 202 // header (basically, size of header, rounded
michael@0 203 // up to make it aligned
michael@0 204 size_t currentPageOffset; // next offset in top of inUseList to allocate from
michael@0 205 tHeader* freeList; // list of popped memory
michael@0 206 tHeader* inUseList; // list of all memory currently being used
michael@0 207 tAllocStack stack; // stack of where to allocate from, to partition pool
michael@0 208
michael@0 209 int numCalls; // just an interesting statistic
michael@0 210 size_t totalBytes; // just an interesting statistic
michael@0 211 private:
michael@0 212 TPoolAllocator& operator=(const TPoolAllocator&); // dont allow assignment operator
michael@0 213 TPoolAllocator(const TPoolAllocator&); // dont allow default copy constructor
michael@0 214 };
michael@0 215
michael@0 216
michael@0 217 //
michael@0 218 // There could potentially be many pools with pops happening at
michael@0 219 // different times. But a simple use is to have a global pop
michael@0 220 // with everyone using the same global allocator.
michael@0 221 //
michael@0 222 extern TPoolAllocator* GetGlobalPoolAllocator();
michael@0 223 extern void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator);
michael@0 224
michael@0 225 //
michael@0 226 // This STL compatible allocator is intended to be used as the allocator
michael@0 227 // parameter to templatized STL containers, like vector and map.
michael@0 228 //
michael@0 229 // It will use the pools for allocation, and not
michael@0 230 // do any deallocation, but will still do destruction.
michael@0 231 //
michael@0 232 template<class T>
michael@0 233 class pool_allocator {
michael@0 234 public:
michael@0 235 typedef size_t size_type;
michael@0 236 typedef ptrdiff_t difference_type;
michael@0 237 typedef T* pointer;
michael@0 238 typedef const T* const_pointer;
michael@0 239 typedef T& reference;
michael@0 240 typedef const T& const_reference;
michael@0 241 typedef T value_type;
michael@0 242
michael@0 243 template<class Other>
michael@0 244 struct rebind {
michael@0 245 typedef pool_allocator<Other> other;
michael@0 246 };
michael@0 247 pointer address(reference x) const { return &x; }
michael@0 248 const_pointer address(const_reference x) const { return &x; }
michael@0 249
michael@0 250 pool_allocator() : allocator(GetGlobalPoolAllocator()) { }
michael@0 251 pool_allocator(TPoolAllocator& a) : allocator(&a) { }
michael@0 252 pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
michael@0 253
michael@0 254 template <class Other>
michael@0 255 pool_allocator<T>& operator=(const pool_allocator<Other>& p) {
michael@0 256 allocator = p.allocator;
michael@0 257 return *this;
michael@0 258 }
michael@0 259
michael@0 260 template<class Other>
michael@0 261 pool_allocator(const pool_allocator<Other>& p) : allocator(&p.getAllocator()) { }
michael@0 262
michael@0 263 #if defined(__SUNPRO_CC) && !defined(_RWSTD_ALLOCATOR)
michael@0 264 // libCStd on some platforms have a different allocate/deallocate interface.
michael@0 265 // Caller pre-bakes sizeof(T) into 'n' which is the number of bytes to be
michael@0 266 // allocated, not the number of elements.
michael@0 267 void* allocate(size_type n) {
michael@0 268 return getAllocator().allocate(n);
michael@0 269 }
michael@0 270 void* allocate(size_type n, const void*) {
michael@0 271 return getAllocator().allocate(n);
michael@0 272 }
michael@0 273 void deallocate(void*, size_type) {}
michael@0 274 #else
michael@0 275 pointer allocate(size_type n) {
michael@0 276 return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
michael@0 277 }
michael@0 278 pointer allocate(size_type n, const void*) {
michael@0 279 return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
michael@0 280 }
michael@0 281 void deallocate(pointer, size_type) {}
michael@0 282 #endif // _RWSTD_ALLOCATOR
michael@0 283
michael@0 284 void construct(pointer p, const T& val) { new ((void *)p) T(val); }
michael@0 285 void destroy(pointer p) { p->T::~T(); }
michael@0 286
michael@0 287 bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
michael@0 288 bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
michael@0 289
michael@0 290 size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
michael@0 291 size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
michael@0 292
michael@0 293 void setAllocator(TPoolAllocator* a) { allocator = a; }
michael@0 294 TPoolAllocator& getAllocator() const { return *allocator; }
michael@0 295
michael@0 296 protected:
michael@0 297 TPoolAllocator* allocator;
michael@0 298 };
michael@0 299
michael@0 300 #endif // _POOLALLOC_INCLUDED_

mercurial