js/src/ds/LifoAlloc.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/ds/LifoAlloc.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,525 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#ifndef ds_LifoAlloc_h
    1.11 +#define ds_LifoAlloc_h
    1.12 +
    1.13 +#include "mozilla/DebugOnly.h"
    1.14 +#include "mozilla/MathAlgorithms.h"
    1.15 +#include "mozilla/MemoryChecking.h"
    1.16 +#include "mozilla/MemoryReporting.h"
    1.17 +#include "mozilla/PodOperations.h"
    1.18 +#include "mozilla/TemplateLib.h"
    1.19 +#include "mozilla/TypeTraits.h"
    1.20 +
    1.21 +// This data structure supports stacky LIFO allocation (mark/release and
    1.22 +// LifoAllocScope). It does not maintain one contiguous segment; instead, it
    1.23 +// maintains a bunch of linked memory segments. In order to prevent malloc/free
    1.24 +// thrashing, unused segments are deallocated when garbage collection occurs.
    1.25 +
    1.26 +#include "jsutil.h"
    1.27 +
    1.28 +namespace js {
    1.29 +
    1.30 +namespace detail {
    1.31 +
    1.32 +static const size_t LIFO_ALLOC_ALIGN = 8;
    1.33 +
    1.34 +MOZ_ALWAYS_INLINE
    1.35 +char *
    1.36 +AlignPtr(void *orig)
    1.37 +{
    1.38 +    static_assert(mozilla::tl::FloorLog2<LIFO_ALLOC_ALIGN>::value ==
    1.39 +                  mozilla::tl::CeilingLog2<LIFO_ALLOC_ALIGN>::value,
    1.40 +                  "LIFO_ALLOC_ALIGN must be a power of two");
    1.41 +
    1.42 +    char *result = (char *) ((uintptr_t(orig) + (LIFO_ALLOC_ALIGN - 1)) & (~LIFO_ALLOC_ALIGN + 1));
    1.43 +    JS_ASSERT(uintptr_t(result) % LIFO_ALLOC_ALIGN == 0);
    1.44 +    return result;
    1.45 +}
    1.46 +
    1.47 +// Header for a chunk of memory wrangled by the LifoAlloc.
    1.48 +class BumpChunk
    1.49 +{
    1.50 +    char        *bump;          // start of the available data
    1.51 +    char        *limit;         // end of the data
    1.52 +    BumpChunk   *next_;         // the next BumpChunk
    1.53 +    size_t      bumpSpaceSize;  // size of the data area
    1.54 +
    1.55 +    char *headerBase() { return reinterpret_cast<char *>(this); }
    1.56 +    char *bumpBase() const { return limit - bumpSpaceSize; }
    1.57 +
    1.58 +    explicit BumpChunk(size_t bumpSpaceSize)
    1.59 +      : bump(reinterpret_cast<char *>(MOZ_THIS_IN_INITIALIZER_LIST()) + sizeof(BumpChunk)),
    1.60 +        limit(bump + bumpSpaceSize),
    1.61 +        next_(nullptr), bumpSpaceSize(bumpSpaceSize)
    1.62 +    {
    1.63 +        JS_ASSERT(bump == AlignPtr(bump));
    1.64 +    }
    1.65 +
    1.66 +    void setBump(void *ptr) {
    1.67 +        JS_ASSERT(bumpBase() <= ptr);
    1.68 +        JS_ASSERT(ptr <= limit);
    1.69 +#if defined(DEBUG) || defined(MOZ_HAVE_MEM_CHECKS)
    1.70 +        char* prevBump = bump;
    1.71 +#endif
    1.72 +        bump = static_cast<char *>(ptr);
    1.73 +#ifdef DEBUG
    1.74 +        JS_ASSERT(contains(prevBump));
    1.75 +
    1.76 +        // Clobber the now-free space.
    1.77 +        if (prevBump > bump)
    1.78 +            memset(bump, 0xcd, prevBump - bump);
    1.79 +#endif
    1.80 +
    1.81 +        // Poison/Unpoison memory that we just free'd/allocated.
    1.82 +#if defined(MOZ_HAVE_MEM_CHECKS)
    1.83 +        if (prevBump > bump)
    1.84 +            MOZ_MAKE_MEM_NOACCESS(bump, prevBump - bump);
    1.85 +        else if (bump > prevBump)
    1.86 +            MOZ_MAKE_MEM_UNDEFINED(prevBump, bump - prevBump);
    1.87 +#endif
    1.88 +    }
    1.89 +
    1.90 +  public:
    1.91 +    BumpChunk *next() const { return next_; }
    1.92 +    void setNext(BumpChunk *succ) { next_ = succ; }
    1.93 +
    1.94 +    size_t used() const { return bump - bumpBase(); }
    1.95 +
    1.96 +    void *start() const { return bumpBase(); }
    1.97 +    void *end() const { return limit; }
    1.98 +
    1.99 +    size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) {
   1.100 +        return mallocSizeOf(this);
   1.101 +    }
   1.102 +
   1.103 +    size_t computedSizeOfIncludingThis() {
   1.104 +        return limit - headerBase();
   1.105 +    }
   1.106 +
   1.107 +    void resetBump() {
   1.108 +        setBump(headerBase() + sizeof(BumpChunk));
   1.109 +    }
   1.110 +
   1.111 +    void *mark() const { return bump; }
   1.112 +
   1.113 +    void release(void *mark) {
   1.114 +        JS_ASSERT(contains(mark));
   1.115 +        JS_ASSERT(mark <= bump);
   1.116 +        setBump(mark);
   1.117 +    }
   1.118 +
   1.119 +    bool contains(void *mark) const {
   1.120 +        return bumpBase() <= mark && mark <= limit;
   1.121 +    }
   1.122 +
   1.123 +    bool canAlloc(size_t n);
   1.124 +
   1.125 +    size_t unused() {
   1.126 +        return limit - AlignPtr(bump);
   1.127 +    }
   1.128 +
   1.129 +    // Try to perform an allocation of size |n|, return null if not possible.
   1.130 +    MOZ_ALWAYS_INLINE
   1.131 +    void *tryAlloc(size_t n) {
   1.132 +        char *aligned = AlignPtr(bump);
   1.133 +        char *newBump = aligned + n;
   1.134 +
   1.135 +        if (newBump > limit)
   1.136 +            return nullptr;
   1.137 +
   1.138 +        // Check for overflow.
   1.139 +        if (MOZ_UNLIKELY(newBump < bump))
   1.140 +            return nullptr;
   1.141 +
   1.142 +        JS_ASSERT(canAlloc(n)); // Ensure consistency between "can" and "try".
   1.143 +        setBump(newBump);
   1.144 +        return aligned;
   1.145 +    }
   1.146 +
   1.147 +    static BumpChunk *new_(size_t chunkSize);
   1.148 +    static void delete_(BumpChunk *chunk);
   1.149 +};
   1.150 +
   1.151 +} // namespace detail
   1.152 +
   1.153 +// LIFO bump allocator: used for phase-oriented and fast LIFO allocations.
   1.154 +//
   1.155 +// Note: |latest| is not necessary "last". We leave BumpChunks latent in the
   1.156 +// chain after they've been released to avoid thrashing before a GC.
   1.157 +class LifoAlloc
   1.158 +{
   1.159 +    typedef detail::BumpChunk BumpChunk;
   1.160 +
   1.161 +    BumpChunk   *first;
   1.162 +    BumpChunk   *latest;
   1.163 +    BumpChunk   *last;
   1.164 +    size_t      markCount;
   1.165 +    size_t      defaultChunkSize_;
   1.166 +    size_t      curSize_;
   1.167 +    size_t      peakSize_;
   1.168 +
   1.169 +    void operator=(const LifoAlloc &) MOZ_DELETE;
   1.170 +    LifoAlloc(const LifoAlloc &) MOZ_DELETE;
   1.171 +
   1.172 +    // Return a BumpChunk that can perform an allocation of at least size |n|
   1.173 +    // and add it to the chain appropriately.
   1.174 +    //
   1.175 +    // Side effect: if retval is non-null, |first| and |latest| are initialized
   1.176 +    // appropriately.
   1.177 +    BumpChunk *getOrCreateChunk(size_t n);
   1.178 +
   1.179 +    void reset(size_t defaultChunkSize) {
   1.180 +        JS_ASSERT(mozilla::RoundUpPow2(defaultChunkSize) == defaultChunkSize);
   1.181 +        first = latest = last = nullptr;
   1.182 +        defaultChunkSize_ = defaultChunkSize;
   1.183 +        markCount = 0;
   1.184 +        curSize_ = 0;
   1.185 +    }
   1.186 +
   1.187 +    // Append unused chunks to the end of this LifoAlloc.
   1.188 +    void appendUnused(BumpChunk *start, BumpChunk *end) {
   1.189 +        JS_ASSERT(start && end);
   1.190 +        if (last)
   1.191 +            last->setNext(start);
   1.192 +        else
   1.193 +            first = latest = start;
   1.194 +        last = end;
   1.195 +    }
   1.196 +
   1.197 +    // Append used chunks to the end of this LifoAlloc. We act as if all the
   1.198 +    // chunks in |this| are used, even if they're not, so memory may be wasted.
   1.199 +    void appendUsed(BumpChunk *start, BumpChunk *latest, BumpChunk *end) {
   1.200 +        JS_ASSERT(start && latest &&  end);
   1.201 +        if (last)
   1.202 +            last->setNext(start);
   1.203 +        else
   1.204 +            first = latest = start;
   1.205 +        last = end;
   1.206 +        this->latest = latest;
   1.207 +    }
   1.208 +
   1.209 +    void incrementCurSize(size_t size) {
   1.210 +        curSize_ += size;
   1.211 +        if (curSize_ > peakSize_)
   1.212 +            peakSize_ = curSize_;
   1.213 +    }
   1.214 +    void decrementCurSize(size_t size) {
   1.215 +        JS_ASSERT(curSize_ >= size);
   1.216 +        curSize_ -= size;
   1.217 +    }
   1.218 +
   1.219 +  public:
   1.220 +    explicit LifoAlloc(size_t defaultChunkSize)
   1.221 +      : peakSize_(0)
   1.222 +    {
   1.223 +        reset(defaultChunkSize);
   1.224 +    }
   1.225 +
   1.226 +    // Steal allocated chunks from |other|.
   1.227 +    void steal(LifoAlloc *other) {
   1.228 +        JS_ASSERT(!other->markCount);
   1.229 +
   1.230 +        // Copy everything from |other| to |this| except for |peakSize_|, which
   1.231 +        // requires some care.
   1.232 +        size_t oldPeakSize = peakSize_;
   1.233 +        mozilla::PodAssign(this, other);
   1.234 +        peakSize_ = Max(oldPeakSize, curSize_);
   1.235 +
   1.236 +        other->reset(defaultChunkSize_);
   1.237 +    }
   1.238 +
   1.239 +    // Append all chunks from |other|. They are removed from |other|.
   1.240 +    void transferFrom(LifoAlloc *other);
   1.241 +
   1.242 +    // Append unused chunks from |other|. They are removed from |other|.
   1.243 +    void transferUnusedFrom(LifoAlloc *other);
   1.244 +
   1.245 +    ~LifoAlloc() { freeAll(); }
   1.246 +
   1.247 +    size_t defaultChunkSize() const { return defaultChunkSize_; }
   1.248 +
   1.249 +    // Frees all held memory.
   1.250 +    void freeAll();
   1.251 +
   1.252 +    static const unsigned HUGE_ALLOCATION = 50 * 1024 * 1024;
   1.253 +    void freeAllIfHugeAndUnused() {
   1.254 +        if (markCount == 0 && curSize_ > HUGE_ALLOCATION)
   1.255 +            freeAll();
   1.256 +    }
   1.257 +
   1.258 +    MOZ_ALWAYS_INLINE
   1.259 +    void *alloc(size_t n) {
   1.260 +        JS_OOM_POSSIBLY_FAIL();
   1.261 +
   1.262 +        void *result;
   1.263 +        if (latest && (result = latest->tryAlloc(n)))
   1.264 +            return result;
   1.265 +
   1.266 +        if (!getOrCreateChunk(n))
   1.267 +            return nullptr;
   1.268 +
   1.269 +        // Since we just created a large enough chunk, this can't fail.
   1.270 +        result = latest->tryAlloc(n);
   1.271 +        MOZ_ASSERT(result);
   1.272 +        return result;
   1.273 +    }
   1.274 +
   1.275 +    // Ensures that enough space exists to satisfy N bytes worth of
   1.276 +    // allocation requests, not necessarily contiguous. Note that this does
   1.277 +    // not guarantee a successful single allocation of N bytes.
   1.278 +    MOZ_ALWAYS_INLINE
   1.279 +    bool ensureUnusedApproximate(size_t n) {
   1.280 +        size_t total = 0;
   1.281 +        for (BumpChunk *chunk = latest; chunk; chunk = chunk->next()) {
   1.282 +            total += chunk->unused();
   1.283 +            if (total >= n)
   1.284 +                return true;
   1.285 +        }
   1.286 +        BumpChunk *latestBefore = latest;
   1.287 +        if (!getOrCreateChunk(n))
   1.288 +            return false;
   1.289 +        if (latestBefore)
   1.290 +            latest = latestBefore;
   1.291 +        return true;
   1.292 +    }
   1.293 +
   1.294 +    template <typename T>
   1.295 +    T *newArray(size_t count) {
   1.296 +        JS_STATIC_ASSERT(mozilla::IsPod<T>::value);
   1.297 +        return newArrayUninitialized<T>(count);
   1.298 +    }
   1.299 +
   1.300 +    // Create an array with uninitialized elements of type |T|.
   1.301 +    // The caller is responsible for initialization.
   1.302 +    template <typename T>
   1.303 +    T *newArrayUninitialized(size_t count) {
   1.304 +        if (count & mozilla::tl::MulOverflowMask<sizeof(T)>::value)
   1.305 +            return nullptr;
   1.306 +        return static_cast<T *>(alloc(sizeof(T) * count));
   1.307 +    }
   1.308 +
   1.309 +    class Mark {
   1.310 +        BumpChunk *chunk;
   1.311 +        void *markInChunk;
   1.312 +        friend class LifoAlloc;
   1.313 +        Mark(BumpChunk *chunk, void *markInChunk) : chunk(chunk), markInChunk(markInChunk) {}
   1.314 +      public:
   1.315 +        Mark() : chunk(nullptr), markInChunk(nullptr) {}
   1.316 +    };
   1.317 +
   1.318 +    Mark mark() {
   1.319 +        markCount++;
   1.320 +        return latest ? Mark(latest, latest->mark()) : Mark();
   1.321 +    }
   1.322 +
   1.323 +    void release(Mark mark) {
   1.324 +        markCount--;
   1.325 +        if (!mark.chunk) {
   1.326 +            latest = first;
   1.327 +            if (latest)
   1.328 +                latest->resetBump();
   1.329 +        } else {
   1.330 +            latest = mark.chunk;
   1.331 +            latest->release(mark.markInChunk);
   1.332 +        }
   1.333 +    }
   1.334 +
   1.335 +    void releaseAll() {
   1.336 +        JS_ASSERT(!markCount);
   1.337 +        latest = first;
   1.338 +        if (latest)
   1.339 +            latest->resetBump();
   1.340 +    }
   1.341 +
   1.342 +    // Get the total "used" (occupied bytes) count for the arena chunks.
   1.343 +    size_t used() const {
   1.344 +        size_t accum = 0;
   1.345 +        for (BumpChunk *chunk = first; chunk; chunk = chunk->next()) {
   1.346 +            accum += chunk->used();
   1.347 +            if (chunk == latest)
   1.348 +                break;
   1.349 +        }
   1.350 +        return accum;
   1.351 +    }
   1.352 +
   1.353 +    // Return true if the LifoAlloc does not currently contain any allocations.
   1.354 +    bool isEmpty() const {
   1.355 +        return !latest || !latest->used();
   1.356 +    }
   1.357 +
   1.358 +    // Return the number of bytes remaining to allocate in the current chunk.
   1.359 +    // e.g. How many bytes we can allocate before needing a new block.
   1.360 +    size_t availableInCurrentChunk() const {
   1.361 +        if (!latest)
   1.362 +            return 0;
   1.363 +        return latest->unused();
   1.364 +    }
   1.365 +
   1.366 +    // Get the total size of the arena chunks (including unused space).
   1.367 +    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
   1.368 +        size_t n = 0;
   1.369 +        for (BumpChunk *chunk = first; chunk; chunk = chunk->next())
   1.370 +            n += chunk->sizeOfIncludingThis(mallocSizeOf);
   1.371 +        return n;
   1.372 +    }
   1.373 +
   1.374 +    // Like sizeOfExcludingThis(), but includes the size of the LifoAlloc itself.
   1.375 +    size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
   1.376 +        return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
   1.377 +    }
   1.378 +
   1.379 +    // Get the peak size of the arena chunks (including unused space and
   1.380 +    // bookkeeping space).
   1.381 +    size_t peakSizeOfExcludingThis() const { return peakSize_; }
   1.382 +
   1.383 +    // Doesn't perform construction; useful for lazily-initialized POD types.
   1.384 +    template <typename T>
   1.385 +    MOZ_ALWAYS_INLINE
   1.386 +    T *newPod() {
   1.387 +        return static_cast<T *>(alloc(sizeof(T)));
   1.388 +    }
   1.389 +
   1.390 +    JS_DECLARE_NEW_METHODS(new_, alloc, MOZ_ALWAYS_INLINE)
   1.391 +
   1.392 +    // A mutable enumeration of the allocated data.
   1.393 +    class Enum
   1.394 +    {
   1.395 +        friend class LifoAlloc;
   1.396 +        friend class detail::BumpChunk;
   1.397 +
   1.398 +        LifoAlloc *alloc_;  // The LifoAlloc being traversed.
   1.399 +        BumpChunk *chunk_;  // The current chunk.
   1.400 +        char *position_;    // The current position (must be within chunk_).
   1.401 +
   1.402 +        // If there is not enough room in the remaining block for |size|,
   1.403 +        // advance to the next block and update the position.
   1.404 +        void ensureSpaceAndAlignment(size_t size) {
   1.405 +            JS_ASSERT(!empty());
   1.406 +            char *aligned = detail::AlignPtr(position_);
   1.407 +            if (aligned + size > chunk_->end()) {
   1.408 +                chunk_ = chunk_->next();
   1.409 +                position_ = static_cast<char *>(chunk_->start());
   1.410 +            } else {
   1.411 +                position_ = aligned;
   1.412 +            }
   1.413 +            JS_ASSERT(uintptr_t(position_) + size <= uintptr_t(chunk_->end()));
   1.414 +        }
   1.415 +
   1.416 +      public:
   1.417 +        Enum(LifoAlloc &alloc)
   1.418 +          : alloc_(&alloc),
   1.419 +            chunk_(alloc.first),
   1.420 +            position_(static_cast<char *>(alloc.first ? alloc.first->start() : nullptr))
   1.421 +        {}
   1.422 +
   1.423 +        // Return true if there are no more bytes to enumerate.
   1.424 +        bool empty() {
   1.425 +            return !chunk_ || (chunk_ == alloc_->latest && position_ >= chunk_->mark());
   1.426 +        }
   1.427 +
   1.428 +        // Move the read position forward by the size of one T.
   1.429 +        template <typename T>
   1.430 +        void popFront() {
   1.431 +            popFront(sizeof(T));
   1.432 +        }
   1.433 +
   1.434 +        // Move the read position forward by |size| bytes.
   1.435 +        void popFront(size_t size) {
   1.436 +            ensureSpaceAndAlignment(size);
   1.437 +            position_ = position_ + size;
   1.438 +        }
   1.439 +
   1.440 +        // Update the bytes at the current position with a new value.
   1.441 +        template <typename T>
   1.442 +        void updateFront(const T &t) {
   1.443 +            ensureSpaceAndAlignment(sizeof(T));
   1.444 +            memmove(position_, &t, sizeof(T));
   1.445 +        }
   1.446 +
   1.447 +        // Return a pointer to the item at the current position. This
   1.448 +        // returns a pointer to the inline storage, not a copy.
   1.449 +        template <typename T>
   1.450 +        T *get(size_t size = sizeof(T)) {
   1.451 +            ensureSpaceAndAlignment(size);
   1.452 +            return reinterpret_cast<T *>(position_);
   1.453 +        }
   1.454 +
   1.455 +        // Return a Mark at the current position of the Enum.
   1.456 +        Mark mark() {
   1.457 +            alloc_->markCount++;
   1.458 +            return Mark(chunk_, position_);
   1.459 +        }
   1.460 +    };
   1.461 +};
   1.462 +
   1.463 +class LifoAllocScope
   1.464 +{
   1.465 +    LifoAlloc       *lifoAlloc;
   1.466 +    LifoAlloc::Mark mark;
   1.467 +    bool            shouldRelease;
   1.468 +    MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
   1.469 +
   1.470 +  public:
   1.471 +    explicit LifoAllocScope(LifoAlloc *lifoAlloc
   1.472 +                            MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
   1.473 +      : lifoAlloc(lifoAlloc),
   1.474 +        mark(lifoAlloc->mark()),
   1.475 +        shouldRelease(true)
   1.476 +    {
   1.477 +        MOZ_GUARD_OBJECT_NOTIFIER_INIT;
   1.478 +    }
   1.479 +
   1.480 +    ~LifoAllocScope() {
   1.481 +        if (shouldRelease)
   1.482 +            lifoAlloc->release(mark);
   1.483 +    }
   1.484 +
   1.485 +    LifoAlloc &alloc() {
   1.486 +        return *lifoAlloc;
   1.487 +    }
   1.488 +
   1.489 +    void releaseEarly() {
   1.490 +        JS_ASSERT(shouldRelease);
   1.491 +        lifoAlloc->release(mark);
   1.492 +        shouldRelease = false;
   1.493 +    }
   1.494 +};
   1.495 +
   1.496 +class LifoAllocPolicy
   1.497 +{
   1.498 +    LifoAlloc &alloc_;
   1.499 +
   1.500 +  public:
   1.501 +    LifoAllocPolicy(LifoAlloc &alloc)
   1.502 +      : alloc_(alloc)
   1.503 +    {}
   1.504 +    void *malloc_(size_t bytes) {
   1.505 +        return alloc_.alloc(bytes);
   1.506 +    }
   1.507 +    void *calloc_(size_t bytes) {
   1.508 +        void *p = malloc_(bytes);
   1.509 +        if (p)
   1.510 +            memset(p, 0, bytes);
   1.511 +        return p;
   1.512 +    }
   1.513 +    void *realloc_(void *p, size_t oldBytes, size_t bytes) {
   1.514 +        void *n = malloc_(bytes);
   1.515 +        if (!n)
   1.516 +            return n;
   1.517 +        memcpy(n, p, Min(oldBytes, bytes));
   1.518 +        return n;
   1.519 +    }
   1.520 +    void free_(void *p) {
   1.521 +    }
   1.522 +    void reportAllocOverflow() const {
   1.523 +    }
   1.524 +};
   1.525 +
   1.526 +} // namespace js
   1.527 +
   1.528 +#endif /* ds_LifoAlloc_h */

mercurial