Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #ifndef ds_LifoAlloc_h |
michael@0 | 8 | #define ds_LifoAlloc_h |
michael@0 | 9 | |
michael@0 | 10 | #include "mozilla/DebugOnly.h" |
michael@0 | 11 | #include "mozilla/MathAlgorithms.h" |
michael@0 | 12 | #include "mozilla/MemoryChecking.h" |
michael@0 | 13 | #include "mozilla/MemoryReporting.h" |
michael@0 | 14 | #include "mozilla/PodOperations.h" |
michael@0 | 15 | #include "mozilla/TemplateLib.h" |
michael@0 | 16 | #include "mozilla/TypeTraits.h" |
michael@0 | 17 | |
michael@0 | 18 | // This data structure supports stacky LIFO allocation (mark/release and |
michael@0 | 19 | // LifoAllocScope). It does not maintain one contiguous segment; instead, it |
michael@0 | 20 | // maintains a bunch of linked memory segments. In order to prevent malloc/free |
michael@0 | 21 | // thrashing, unused segments are deallocated when garbage collection occurs. |
michael@0 | 22 | |
michael@0 | 23 | #include "jsutil.h" |
michael@0 | 24 | |
michael@0 | 25 | namespace js { |
michael@0 | 26 | |
michael@0 | 27 | namespace detail { |
michael@0 | 28 | |
michael@0 | 29 | static const size_t LIFO_ALLOC_ALIGN = 8; |
michael@0 | 30 | |
michael@0 | 31 | MOZ_ALWAYS_INLINE |
michael@0 | 32 | char * |
michael@0 | 33 | AlignPtr(void *orig) |
michael@0 | 34 | { |
michael@0 | 35 | static_assert(mozilla::tl::FloorLog2<LIFO_ALLOC_ALIGN>::value == |
michael@0 | 36 | mozilla::tl::CeilingLog2<LIFO_ALLOC_ALIGN>::value, |
michael@0 | 37 | "LIFO_ALLOC_ALIGN must be a power of two"); |
michael@0 | 38 | |
michael@0 | 39 | char *result = (char *) ((uintptr_t(orig) + (LIFO_ALLOC_ALIGN - 1)) & (~LIFO_ALLOC_ALIGN + 1)); |
michael@0 | 40 | JS_ASSERT(uintptr_t(result) % LIFO_ALLOC_ALIGN == 0); |
michael@0 | 41 | return result; |
michael@0 | 42 | } |
michael@0 | 43 | |
michael@0 | 44 | // Header for a chunk of memory wrangled by the LifoAlloc. |
michael@0 | 45 | class BumpChunk |
michael@0 | 46 | { |
michael@0 | 47 | char *bump; // start of the available data |
michael@0 | 48 | char *limit; // end of the data |
michael@0 | 49 | BumpChunk *next_; // the next BumpChunk |
michael@0 | 50 | size_t bumpSpaceSize; // size of the data area |
michael@0 | 51 | |
michael@0 | 52 | char *headerBase() { return reinterpret_cast<char *>(this); } |
michael@0 | 53 | char *bumpBase() const { return limit - bumpSpaceSize; } |
michael@0 | 54 | |
michael@0 | 55 | explicit BumpChunk(size_t bumpSpaceSize) |
michael@0 | 56 | : bump(reinterpret_cast<char *>(MOZ_THIS_IN_INITIALIZER_LIST()) + sizeof(BumpChunk)), |
michael@0 | 57 | limit(bump + bumpSpaceSize), |
michael@0 | 58 | next_(nullptr), bumpSpaceSize(bumpSpaceSize) |
michael@0 | 59 | { |
michael@0 | 60 | JS_ASSERT(bump == AlignPtr(bump)); |
michael@0 | 61 | } |
michael@0 | 62 | |
michael@0 | 63 | void setBump(void *ptr) { |
michael@0 | 64 | JS_ASSERT(bumpBase() <= ptr); |
michael@0 | 65 | JS_ASSERT(ptr <= limit); |
michael@0 | 66 | #if defined(DEBUG) || defined(MOZ_HAVE_MEM_CHECKS) |
michael@0 | 67 | char* prevBump = bump; |
michael@0 | 68 | #endif |
michael@0 | 69 | bump = static_cast<char *>(ptr); |
michael@0 | 70 | #ifdef DEBUG |
michael@0 | 71 | JS_ASSERT(contains(prevBump)); |
michael@0 | 72 | |
michael@0 | 73 | // Clobber the now-free space. |
michael@0 | 74 | if (prevBump > bump) |
michael@0 | 75 | memset(bump, 0xcd, prevBump - bump); |
michael@0 | 76 | #endif |
michael@0 | 77 | |
michael@0 | 78 | // Poison/Unpoison memory that we just free'd/allocated. |
michael@0 | 79 | #if defined(MOZ_HAVE_MEM_CHECKS) |
michael@0 | 80 | if (prevBump > bump) |
michael@0 | 81 | MOZ_MAKE_MEM_NOACCESS(bump, prevBump - bump); |
michael@0 | 82 | else if (bump > prevBump) |
michael@0 | 83 | MOZ_MAKE_MEM_UNDEFINED(prevBump, bump - prevBump); |
michael@0 | 84 | #endif |
michael@0 | 85 | } |
michael@0 | 86 | |
michael@0 | 87 | public: |
michael@0 | 88 | BumpChunk *next() const { return next_; } |
michael@0 | 89 | void setNext(BumpChunk *succ) { next_ = succ; } |
michael@0 | 90 | |
michael@0 | 91 | size_t used() const { return bump - bumpBase(); } |
michael@0 | 92 | |
michael@0 | 93 | void *start() const { return bumpBase(); } |
michael@0 | 94 | void *end() const { return limit; } |
michael@0 | 95 | |
michael@0 | 96 | size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) { |
michael@0 | 97 | return mallocSizeOf(this); |
michael@0 | 98 | } |
michael@0 | 99 | |
michael@0 | 100 | size_t computedSizeOfIncludingThis() { |
michael@0 | 101 | return limit - headerBase(); |
michael@0 | 102 | } |
michael@0 | 103 | |
michael@0 | 104 | void resetBump() { |
michael@0 | 105 | setBump(headerBase() + sizeof(BumpChunk)); |
michael@0 | 106 | } |
michael@0 | 107 | |
michael@0 | 108 | void *mark() const { return bump; } |
michael@0 | 109 | |
michael@0 | 110 | void release(void *mark) { |
michael@0 | 111 | JS_ASSERT(contains(mark)); |
michael@0 | 112 | JS_ASSERT(mark <= bump); |
michael@0 | 113 | setBump(mark); |
michael@0 | 114 | } |
michael@0 | 115 | |
michael@0 | 116 | bool contains(void *mark) const { |
michael@0 | 117 | return bumpBase() <= mark && mark <= limit; |
michael@0 | 118 | } |
michael@0 | 119 | |
michael@0 | 120 | bool canAlloc(size_t n); |
michael@0 | 121 | |
michael@0 | 122 | size_t unused() { |
michael@0 | 123 | return limit - AlignPtr(bump); |
michael@0 | 124 | } |
michael@0 | 125 | |
michael@0 | 126 | // Try to perform an allocation of size |n|, return null if not possible. |
michael@0 | 127 | MOZ_ALWAYS_INLINE |
michael@0 | 128 | void *tryAlloc(size_t n) { |
michael@0 | 129 | char *aligned = AlignPtr(bump); |
michael@0 | 130 | char *newBump = aligned + n; |
michael@0 | 131 | |
michael@0 | 132 | if (newBump > limit) |
michael@0 | 133 | return nullptr; |
michael@0 | 134 | |
michael@0 | 135 | // Check for overflow. |
michael@0 | 136 | if (MOZ_UNLIKELY(newBump < bump)) |
michael@0 | 137 | return nullptr; |
michael@0 | 138 | |
michael@0 | 139 | JS_ASSERT(canAlloc(n)); // Ensure consistency between "can" and "try". |
michael@0 | 140 | setBump(newBump); |
michael@0 | 141 | return aligned; |
michael@0 | 142 | } |
michael@0 | 143 | |
michael@0 | 144 | static BumpChunk *new_(size_t chunkSize); |
michael@0 | 145 | static void delete_(BumpChunk *chunk); |
michael@0 | 146 | }; |
michael@0 | 147 | |
michael@0 | 148 | } // namespace detail |
michael@0 | 149 | |
michael@0 | 150 | // LIFO bump allocator: used for phase-oriented and fast LIFO allocations. |
michael@0 | 151 | // |
michael@0 | 152 | // Note: |latest| is not necessary "last". We leave BumpChunks latent in the |
michael@0 | 153 | // chain after they've been released to avoid thrashing before a GC. |
michael@0 | 154 | class LifoAlloc |
michael@0 | 155 | { |
michael@0 | 156 | typedef detail::BumpChunk BumpChunk; |
michael@0 | 157 | |
michael@0 | 158 | BumpChunk *first; |
michael@0 | 159 | BumpChunk *latest; |
michael@0 | 160 | BumpChunk *last; |
michael@0 | 161 | size_t markCount; |
michael@0 | 162 | size_t defaultChunkSize_; |
michael@0 | 163 | size_t curSize_; |
michael@0 | 164 | size_t peakSize_; |
michael@0 | 165 | |
michael@0 | 166 | void operator=(const LifoAlloc &) MOZ_DELETE; |
michael@0 | 167 | LifoAlloc(const LifoAlloc &) MOZ_DELETE; |
michael@0 | 168 | |
michael@0 | 169 | // Return a BumpChunk that can perform an allocation of at least size |n| |
michael@0 | 170 | // and add it to the chain appropriately. |
michael@0 | 171 | // |
michael@0 | 172 | // Side effect: if retval is non-null, |first| and |latest| are initialized |
michael@0 | 173 | // appropriately. |
michael@0 | 174 | BumpChunk *getOrCreateChunk(size_t n); |
michael@0 | 175 | |
michael@0 | 176 | void reset(size_t defaultChunkSize) { |
michael@0 | 177 | JS_ASSERT(mozilla::RoundUpPow2(defaultChunkSize) == defaultChunkSize); |
michael@0 | 178 | first = latest = last = nullptr; |
michael@0 | 179 | defaultChunkSize_ = defaultChunkSize; |
michael@0 | 180 | markCount = 0; |
michael@0 | 181 | curSize_ = 0; |
michael@0 | 182 | } |
michael@0 | 183 | |
michael@0 | 184 | // Append unused chunks to the end of this LifoAlloc. |
michael@0 | 185 | void appendUnused(BumpChunk *start, BumpChunk *end) { |
michael@0 | 186 | JS_ASSERT(start && end); |
michael@0 | 187 | if (last) |
michael@0 | 188 | last->setNext(start); |
michael@0 | 189 | else |
michael@0 | 190 | first = latest = start; |
michael@0 | 191 | last = end; |
michael@0 | 192 | } |
michael@0 | 193 | |
michael@0 | 194 | // Append used chunks to the end of this LifoAlloc. We act as if all the |
michael@0 | 195 | // chunks in |this| are used, even if they're not, so memory may be wasted. |
michael@0 | 196 | void appendUsed(BumpChunk *start, BumpChunk *latest, BumpChunk *end) { |
michael@0 | 197 | JS_ASSERT(start && latest && end); |
michael@0 | 198 | if (last) |
michael@0 | 199 | last->setNext(start); |
michael@0 | 200 | else |
michael@0 | 201 | first = latest = start; |
michael@0 | 202 | last = end; |
michael@0 | 203 | this->latest = latest; |
michael@0 | 204 | } |
michael@0 | 205 | |
michael@0 | 206 | void incrementCurSize(size_t size) { |
michael@0 | 207 | curSize_ += size; |
michael@0 | 208 | if (curSize_ > peakSize_) |
michael@0 | 209 | peakSize_ = curSize_; |
michael@0 | 210 | } |
michael@0 | 211 | void decrementCurSize(size_t size) { |
michael@0 | 212 | JS_ASSERT(curSize_ >= size); |
michael@0 | 213 | curSize_ -= size; |
michael@0 | 214 | } |
michael@0 | 215 | |
michael@0 | 216 | public: |
michael@0 | 217 | explicit LifoAlloc(size_t defaultChunkSize) |
michael@0 | 218 | : peakSize_(0) |
michael@0 | 219 | { |
michael@0 | 220 | reset(defaultChunkSize); |
michael@0 | 221 | } |
michael@0 | 222 | |
michael@0 | 223 | // Steal allocated chunks from |other|. |
michael@0 | 224 | void steal(LifoAlloc *other) { |
michael@0 | 225 | JS_ASSERT(!other->markCount); |
michael@0 | 226 | |
michael@0 | 227 | // Copy everything from |other| to |this| except for |peakSize_|, which |
michael@0 | 228 | // requires some care. |
michael@0 | 229 | size_t oldPeakSize = peakSize_; |
michael@0 | 230 | mozilla::PodAssign(this, other); |
michael@0 | 231 | peakSize_ = Max(oldPeakSize, curSize_); |
michael@0 | 232 | |
michael@0 | 233 | other->reset(defaultChunkSize_); |
michael@0 | 234 | } |
michael@0 | 235 | |
michael@0 | 236 | // Append all chunks from |other|. They are removed from |other|. |
michael@0 | 237 | void transferFrom(LifoAlloc *other); |
michael@0 | 238 | |
michael@0 | 239 | // Append unused chunks from |other|. They are removed from |other|. |
michael@0 | 240 | void transferUnusedFrom(LifoAlloc *other); |
michael@0 | 241 | |
michael@0 | 242 | ~LifoAlloc() { freeAll(); } |
michael@0 | 243 | |
michael@0 | 244 | size_t defaultChunkSize() const { return defaultChunkSize_; } |
michael@0 | 245 | |
michael@0 | 246 | // Frees all held memory. |
michael@0 | 247 | void freeAll(); |
michael@0 | 248 | |
michael@0 | 249 | static const unsigned HUGE_ALLOCATION = 50 * 1024 * 1024; |
michael@0 | 250 | void freeAllIfHugeAndUnused() { |
michael@0 | 251 | if (markCount == 0 && curSize_ > HUGE_ALLOCATION) |
michael@0 | 252 | freeAll(); |
michael@0 | 253 | } |
michael@0 | 254 | |
michael@0 | 255 | MOZ_ALWAYS_INLINE |
michael@0 | 256 | void *alloc(size_t n) { |
michael@0 | 257 | JS_OOM_POSSIBLY_FAIL(); |
michael@0 | 258 | |
michael@0 | 259 | void *result; |
michael@0 | 260 | if (latest && (result = latest->tryAlloc(n))) |
michael@0 | 261 | return result; |
michael@0 | 262 | |
michael@0 | 263 | if (!getOrCreateChunk(n)) |
michael@0 | 264 | return nullptr; |
michael@0 | 265 | |
michael@0 | 266 | // Since we just created a large enough chunk, this can't fail. |
michael@0 | 267 | result = latest->tryAlloc(n); |
michael@0 | 268 | MOZ_ASSERT(result); |
michael@0 | 269 | return result; |
michael@0 | 270 | } |
michael@0 | 271 | |
michael@0 | 272 | // Ensures that enough space exists to satisfy N bytes worth of |
michael@0 | 273 | // allocation requests, not necessarily contiguous. Note that this does |
michael@0 | 274 | // not guarantee a successful single allocation of N bytes. |
michael@0 | 275 | MOZ_ALWAYS_INLINE |
michael@0 | 276 | bool ensureUnusedApproximate(size_t n) { |
michael@0 | 277 | size_t total = 0; |
michael@0 | 278 | for (BumpChunk *chunk = latest; chunk; chunk = chunk->next()) { |
michael@0 | 279 | total += chunk->unused(); |
michael@0 | 280 | if (total >= n) |
michael@0 | 281 | return true; |
michael@0 | 282 | } |
michael@0 | 283 | BumpChunk *latestBefore = latest; |
michael@0 | 284 | if (!getOrCreateChunk(n)) |
michael@0 | 285 | return false; |
michael@0 | 286 | if (latestBefore) |
michael@0 | 287 | latest = latestBefore; |
michael@0 | 288 | return true; |
michael@0 | 289 | } |
michael@0 | 290 | |
michael@0 | 291 | template <typename T> |
michael@0 | 292 | T *newArray(size_t count) { |
michael@0 | 293 | JS_STATIC_ASSERT(mozilla::IsPod<T>::value); |
michael@0 | 294 | return newArrayUninitialized<T>(count); |
michael@0 | 295 | } |
michael@0 | 296 | |
michael@0 | 297 | // Create an array with uninitialized elements of type |T|. |
michael@0 | 298 | // The caller is responsible for initialization. |
michael@0 | 299 | template <typename T> |
michael@0 | 300 | T *newArrayUninitialized(size_t count) { |
michael@0 | 301 | if (count & mozilla::tl::MulOverflowMask<sizeof(T)>::value) |
michael@0 | 302 | return nullptr; |
michael@0 | 303 | return static_cast<T *>(alloc(sizeof(T) * count)); |
michael@0 | 304 | } |
michael@0 | 305 | |
michael@0 | 306 | class Mark { |
michael@0 | 307 | BumpChunk *chunk; |
michael@0 | 308 | void *markInChunk; |
michael@0 | 309 | friend class LifoAlloc; |
michael@0 | 310 | Mark(BumpChunk *chunk, void *markInChunk) : chunk(chunk), markInChunk(markInChunk) {} |
michael@0 | 311 | public: |
michael@0 | 312 | Mark() : chunk(nullptr), markInChunk(nullptr) {} |
michael@0 | 313 | }; |
michael@0 | 314 | |
michael@0 | 315 | Mark mark() { |
michael@0 | 316 | markCount++; |
michael@0 | 317 | return latest ? Mark(latest, latest->mark()) : Mark(); |
michael@0 | 318 | } |
michael@0 | 319 | |
michael@0 | 320 | void release(Mark mark) { |
michael@0 | 321 | markCount--; |
michael@0 | 322 | if (!mark.chunk) { |
michael@0 | 323 | latest = first; |
michael@0 | 324 | if (latest) |
michael@0 | 325 | latest->resetBump(); |
michael@0 | 326 | } else { |
michael@0 | 327 | latest = mark.chunk; |
michael@0 | 328 | latest->release(mark.markInChunk); |
michael@0 | 329 | } |
michael@0 | 330 | } |
michael@0 | 331 | |
michael@0 | 332 | void releaseAll() { |
michael@0 | 333 | JS_ASSERT(!markCount); |
michael@0 | 334 | latest = first; |
michael@0 | 335 | if (latest) |
michael@0 | 336 | latest->resetBump(); |
michael@0 | 337 | } |
michael@0 | 338 | |
michael@0 | 339 | // Get the total "used" (occupied bytes) count for the arena chunks. |
michael@0 | 340 | size_t used() const { |
michael@0 | 341 | size_t accum = 0; |
michael@0 | 342 | for (BumpChunk *chunk = first; chunk; chunk = chunk->next()) { |
michael@0 | 343 | accum += chunk->used(); |
michael@0 | 344 | if (chunk == latest) |
michael@0 | 345 | break; |
michael@0 | 346 | } |
michael@0 | 347 | return accum; |
michael@0 | 348 | } |
michael@0 | 349 | |
michael@0 | 350 | // Return true if the LifoAlloc does not currently contain any allocations. |
michael@0 | 351 | bool isEmpty() const { |
michael@0 | 352 | return !latest || !latest->used(); |
michael@0 | 353 | } |
michael@0 | 354 | |
michael@0 | 355 | // Return the number of bytes remaining to allocate in the current chunk. |
michael@0 | 356 | // e.g. How many bytes we can allocate before needing a new block. |
michael@0 | 357 | size_t availableInCurrentChunk() const { |
michael@0 | 358 | if (!latest) |
michael@0 | 359 | return 0; |
michael@0 | 360 | return latest->unused(); |
michael@0 | 361 | } |
michael@0 | 362 | |
michael@0 | 363 | // Get the total size of the arena chunks (including unused space). |
michael@0 | 364 | size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const { |
michael@0 | 365 | size_t n = 0; |
michael@0 | 366 | for (BumpChunk *chunk = first; chunk; chunk = chunk->next()) |
michael@0 | 367 | n += chunk->sizeOfIncludingThis(mallocSizeOf); |
michael@0 | 368 | return n; |
michael@0 | 369 | } |
michael@0 | 370 | |
michael@0 | 371 | // Like sizeOfExcludingThis(), but includes the size of the LifoAlloc itself. |
michael@0 | 372 | size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { |
michael@0 | 373 | return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf); |
michael@0 | 374 | } |
michael@0 | 375 | |
michael@0 | 376 | // Get the peak size of the arena chunks (including unused space and |
michael@0 | 377 | // bookkeeping space). |
michael@0 | 378 | size_t peakSizeOfExcludingThis() const { return peakSize_; } |
michael@0 | 379 | |
michael@0 | 380 | // Doesn't perform construction; useful for lazily-initialized POD types. |
michael@0 | 381 | template <typename T> |
michael@0 | 382 | MOZ_ALWAYS_INLINE |
michael@0 | 383 | T *newPod() { |
michael@0 | 384 | return static_cast<T *>(alloc(sizeof(T))); |
michael@0 | 385 | } |
michael@0 | 386 | |
michael@0 | 387 | JS_DECLARE_NEW_METHODS(new_, alloc, MOZ_ALWAYS_INLINE) |
michael@0 | 388 | |
michael@0 | 389 | // A mutable enumeration of the allocated data. |
michael@0 | 390 | class Enum |
michael@0 | 391 | { |
michael@0 | 392 | friend class LifoAlloc; |
michael@0 | 393 | friend class detail::BumpChunk; |
michael@0 | 394 | |
michael@0 | 395 | LifoAlloc *alloc_; // The LifoAlloc being traversed. |
michael@0 | 396 | BumpChunk *chunk_; // The current chunk. |
michael@0 | 397 | char *position_; // The current position (must be within chunk_). |
michael@0 | 398 | |
michael@0 | 399 | // If there is not enough room in the remaining block for |size|, |
michael@0 | 400 | // advance to the next block and update the position. |
michael@0 | 401 | void ensureSpaceAndAlignment(size_t size) { |
michael@0 | 402 | JS_ASSERT(!empty()); |
michael@0 | 403 | char *aligned = detail::AlignPtr(position_); |
michael@0 | 404 | if (aligned + size > chunk_->end()) { |
michael@0 | 405 | chunk_ = chunk_->next(); |
michael@0 | 406 | position_ = static_cast<char *>(chunk_->start()); |
michael@0 | 407 | } else { |
michael@0 | 408 | position_ = aligned; |
michael@0 | 409 | } |
michael@0 | 410 | JS_ASSERT(uintptr_t(position_) + size <= uintptr_t(chunk_->end())); |
michael@0 | 411 | } |
michael@0 | 412 | |
michael@0 | 413 | public: |
michael@0 | 414 | Enum(LifoAlloc &alloc) |
michael@0 | 415 | : alloc_(&alloc), |
michael@0 | 416 | chunk_(alloc.first), |
michael@0 | 417 | position_(static_cast<char *>(alloc.first ? alloc.first->start() : nullptr)) |
michael@0 | 418 | {} |
michael@0 | 419 | |
michael@0 | 420 | // Return true if there are no more bytes to enumerate. |
michael@0 | 421 | bool empty() { |
michael@0 | 422 | return !chunk_ || (chunk_ == alloc_->latest && position_ >= chunk_->mark()); |
michael@0 | 423 | } |
michael@0 | 424 | |
michael@0 | 425 | // Move the read position forward by the size of one T. |
michael@0 | 426 | template <typename T> |
michael@0 | 427 | void popFront() { |
michael@0 | 428 | popFront(sizeof(T)); |
michael@0 | 429 | } |
michael@0 | 430 | |
michael@0 | 431 | // Move the read position forward by |size| bytes. |
michael@0 | 432 | void popFront(size_t size) { |
michael@0 | 433 | ensureSpaceAndAlignment(size); |
michael@0 | 434 | position_ = position_ + size; |
michael@0 | 435 | } |
michael@0 | 436 | |
michael@0 | 437 | // Update the bytes at the current position with a new value. |
michael@0 | 438 | template <typename T> |
michael@0 | 439 | void updateFront(const T &t) { |
michael@0 | 440 | ensureSpaceAndAlignment(sizeof(T)); |
michael@0 | 441 | memmove(position_, &t, sizeof(T)); |
michael@0 | 442 | } |
michael@0 | 443 | |
michael@0 | 444 | // Return a pointer to the item at the current position. This |
michael@0 | 445 | // returns a pointer to the inline storage, not a copy. |
michael@0 | 446 | template <typename T> |
michael@0 | 447 | T *get(size_t size = sizeof(T)) { |
michael@0 | 448 | ensureSpaceAndAlignment(size); |
michael@0 | 449 | return reinterpret_cast<T *>(position_); |
michael@0 | 450 | } |
michael@0 | 451 | |
michael@0 | 452 | // Return a Mark at the current position of the Enum. |
michael@0 | 453 | Mark mark() { |
michael@0 | 454 | alloc_->markCount++; |
michael@0 | 455 | return Mark(chunk_, position_); |
michael@0 | 456 | } |
michael@0 | 457 | }; |
michael@0 | 458 | }; |
michael@0 | 459 | |
michael@0 | 460 | class LifoAllocScope |
michael@0 | 461 | { |
michael@0 | 462 | LifoAlloc *lifoAlloc; |
michael@0 | 463 | LifoAlloc::Mark mark; |
michael@0 | 464 | bool shouldRelease; |
michael@0 | 465 | MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER |
michael@0 | 466 | |
michael@0 | 467 | public: |
michael@0 | 468 | explicit LifoAllocScope(LifoAlloc *lifoAlloc |
michael@0 | 469 | MOZ_GUARD_OBJECT_NOTIFIER_PARAM) |
michael@0 | 470 | : lifoAlloc(lifoAlloc), |
michael@0 | 471 | mark(lifoAlloc->mark()), |
michael@0 | 472 | shouldRelease(true) |
michael@0 | 473 | { |
michael@0 | 474 | MOZ_GUARD_OBJECT_NOTIFIER_INIT; |
michael@0 | 475 | } |
michael@0 | 476 | |
michael@0 | 477 | ~LifoAllocScope() { |
michael@0 | 478 | if (shouldRelease) |
michael@0 | 479 | lifoAlloc->release(mark); |
michael@0 | 480 | } |
michael@0 | 481 | |
michael@0 | 482 | LifoAlloc &alloc() { |
michael@0 | 483 | return *lifoAlloc; |
michael@0 | 484 | } |
michael@0 | 485 | |
michael@0 | 486 | void releaseEarly() { |
michael@0 | 487 | JS_ASSERT(shouldRelease); |
michael@0 | 488 | lifoAlloc->release(mark); |
michael@0 | 489 | shouldRelease = false; |
michael@0 | 490 | } |
michael@0 | 491 | }; |
michael@0 | 492 | |
michael@0 | 493 | class LifoAllocPolicy |
michael@0 | 494 | { |
michael@0 | 495 | LifoAlloc &alloc_; |
michael@0 | 496 | |
michael@0 | 497 | public: |
michael@0 | 498 | LifoAllocPolicy(LifoAlloc &alloc) |
michael@0 | 499 | : alloc_(alloc) |
michael@0 | 500 | {} |
michael@0 | 501 | void *malloc_(size_t bytes) { |
michael@0 | 502 | return alloc_.alloc(bytes); |
michael@0 | 503 | } |
michael@0 | 504 | void *calloc_(size_t bytes) { |
michael@0 | 505 | void *p = malloc_(bytes); |
michael@0 | 506 | if (p) |
michael@0 | 507 | memset(p, 0, bytes); |
michael@0 | 508 | return p; |
michael@0 | 509 | } |
michael@0 | 510 | void *realloc_(void *p, size_t oldBytes, size_t bytes) { |
michael@0 | 511 | void *n = malloc_(bytes); |
michael@0 | 512 | if (!n) |
michael@0 | 513 | return n; |
michael@0 | 514 | memcpy(n, p, Min(oldBytes, bytes)); |
michael@0 | 515 | return n; |
michael@0 | 516 | } |
michael@0 | 517 | void free_(void *p) { |
michael@0 | 518 | } |
michael@0 | 519 | void reportAllocOverflow() const { |
michael@0 | 520 | } |
michael@0 | 521 | }; |
michael@0 | 522 | |
michael@0 | 523 | } // namespace js |
michael@0 | 524 | |
michael@0 | 525 | #endif /* ds_LifoAlloc_h */ |