js/src/yarr/BumpPointerAllocator.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

     1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     2  * vim: set ts=8 sts=4 et sw=4 tw=99:
     3  *
     4  * ***** BEGIN LICENSE BLOCK *****
     5  * Copyright (C) 2010 Apple Inc. All rights reserved.
     6  *
     7  * Redistribution and use in source and binary forms, with or without
     8  * modification, are permitted provided that the following conditions
     9  * are met:
    10  * 1. Redistributions of source code must retain the above copyright
    11  *    notice, this list of conditions and the following disclaimer.
    12  * 2. Redistributions in binary form must reproduce the above copyright
    13  *    notice, this list of conditions and the following disclaimer in the
    14  *    documentation and/or other materials provided with the distribution.
    15  *
    16  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
    17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
    19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
    20  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
    21  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
    22  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    23  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
    24  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    27  *
    28  * ***** END LICENSE BLOCK ***** */
    30 #ifndef yarr_BumpPointerAllocator_h
    31 #define yarr_BumpPointerAllocator_h
    33 #include "yarr/PageAllocation.h"
    35 namespace WTF {
    37 #if WTF_CPU_SPARC
    38 #define MINIMUM_BUMP_POOL_SIZE 0x2000
    39 #elif WTF_CPU_IA64
    40 #define MINIMUM_BUMP_POOL_SIZE 0x4000
    41 #else
    42 #define MINIMUM_BUMP_POOL_SIZE 0x1000
    43 #endif
    45 class BumpPointerPool {
    46 public:
    47     // ensureCapacity will check whether the current pool has capacity to
    48     // allocate 'size' bytes of memory  If it does not, it will attempt to
    49     // allocate a new pool (which will be added to this one in a chain).
    50     //
    51     // If allocation fails (out of memory) this method will return null.
    52     // If the return value is non-null, then callers should update any
    53     // references they have to this current (possibly full) BumpPointerPool
    54     // to instead point to the newly returned BumpPointerPool.
    55     BumpPointerPool* ensureCapacity(size_t size)
    56     {
    57         void* allocationEnd = static_cast<char*>(m_current) + size;
    58         ASSERT(allocationEnd > m_current); // check for overflow
    59         if (allocationEnd <= static_cast<void*>(this))
    60             return this;
    61         return ensureCapacityCrossPool(this, size);
    62     }
    64     // alloc should only be called after calling ensureCapacity; as such
    65     // alloc will never fail.
    66     void* alloc(size_t size)
    67     {
    68         void* current = m_current;
    69         void* allocationEnd = static_cast<char*>(current) + size;
    70         ASSERT(allocationEnd > current); // check for overflow
    71         ASSERT(allocationEnd <= static_cast<void*>(this));
    72         m_current = allocationEnd;
    73         return current;
    74     }
    76     // The dealloc method releases memory allocated using alloc.  Memory
    77     // must be released in a LIFO fashion, e.g. if the client calls alloc
    78     // four times, returning pointer A, B, C, D, then the only valid order
    79     // in which these may be deallocaed is D, C, B, A.
    80     //
    81     // The client may optionally skip some deallocations.  In the example
    82     // above, it would be valid to only explicitly dealloc C, A (D being
    83     // dealloced along with C, B along with A).
    84     //
    85     // If pointer was not allocated from this pool (or pools) then dealloc
    86     // will CRASH().  Callers should update any references they have to
    87     // this current BumpPointerPool to instead point to the returned
    88     // BumpPointerPool.
    89     BumpPointerPool* dealloc(void* position)
    90     {
    91         if ((position >= m_start) && (position <= static_cast<void*>(this))) {
    92             ASSERT(position <= m_current);
    93             m_current = position;
    94             return this;
    95         }
    96         return deallocCrossPool(this, position);
    97     }
    99     size_t sizeOfNonHeapData() const
   100     {
   101         ASSERT(!m_previous);
   102         size_t n = 0;
   103         const BumpPointerPool *curr = this;
   104         while (curr) {
   105             n += m_allocation.size();
   106             curr = curr->m_next;
   107         }
   108         return n;
   109     }
   111 private:
   112     // Placement operator new, returns the last 'size' bytes of allocation for use as this.
   113     void* operator new(size_t size, const PageAllocation& allocation)
   114     {
   115         ASSERT(size < allocation.size());
   116         return reinterpret_cast<char*>(reinterpret_cast<intptr_t>(allocation.base()) + allocation.size()) - size;
   117     }
   119     BumpPointerPool(const PageAllocation& allocation)
   120         : m_current(allocation.base())
   121         , m_start(allocation.base())
   122         , m_next(0)
   123         , m_previous(0)
   124         , m_allocation(allocation)
   125     {
   126     }
   128     static BumpPointerPool* create(size_t minimumCapacity = 0)
   129     {
   130         // Add size of BumpPointerPool object, check for overflow.
   131         minimumCapacity += sizeof(BumpPointerPool);
   132         if (minimumCapacity < sizeof(BumpPointerPool))
   133             return 0;
   135         size_t poolSize = MINIMUM_BUMP_POOL_SIZE;
   136         while (poolSize < minimumCapacity) {
   137             poolSize <<= 1;
   138             // The following if check relies on MINIMUM_BUMP_POOL_SIZE being a power of 2!
   139             ASSERT(!(MINIMUM_BUMP_POOL_SIZE & (MINIMUM_BUMP_POOL_SIZE - 1)));
   140             if (!poolSize)
   141                 return 0;
   142         }
   144         PageAllocation allocation = PageAllocation::allocate(poolSize);
   145         if (!!allocation)
   146             return new(allocation) BumpPointerPool(allocation);
   147         return 0;
   148     }
   150     void shrink()
   151     {
   152         ASSERT(!m_previous);
   153         m_current = m_start;
   154         while (m_next) {
   155             BumpPointerPool* nextNext = m_next->m_next;
   156             m_next->destroy();
   157             m_next = nextNext;
   158         }
   159     }
   161     void destroy()
   162     {
   163         m_allocation.deallocate();
   164     }
   166     static BumpPointerPool* ensureCapacityCrossPool(BumpPointerPool* previousPool, size_t size)
   167     {
   168         // The pool passed should not have capacity, so we'll start with the next one.
   169         ASSERT(previousPool);
   170         ASSERT((static_cast<char*>(previousPool->m_current) + size) > previousPool->m_current); // check for overflow
   171         ASSERT((static_cast<char*>(previousPool->m_current) + size) > static_cast<void*>(previousPool));
   172         BumpPointerPool* pool = previousPool->m_next;
   174         while (true) {
   175             if (!pool) {
   176                 // We've run to the end; allocate a new pool.
   177                 pool = BumpPointerPool::create(size);
   178                 previousPool->m_next = pool;
   179                 pool->m_previous = previousPool;
   180                 return pool;
   181             }
   183             void* current = pool->m_current;
   184             void* allocationEnd = static_cast<char*>(current) + size;
   185             ASSERT(allocationEnd > current); // check for overflow
   186             if (allocationEnd <= static_cast<void*>(pool))
   187                 return pool;
   188         }
   189     }
   191     static BumpPointerPool* deallocCrossPool(BumpPointerPool* pool, void* position)
   192     {
   193         // Should only be called if position is not in the current pool.
   194         ASSERT((position < pool->m_start) || (position > static_cast<void*>(pool)));
   196         while (true) {
   197             // Unwind the current pool to the start, move back in the chain to the previous pool.
   198             pool->m_current = pool->m_start;
   199             pool = pool->m_previous;
   201             // position was nowhere in the chain!
   202             if (!pool)
   203                 CRASH();
   205             if ((position >= pool->m_start) && (position <= static_cast<void*>(pool))) {
   206                 ASSERT(position <= pool->m_current);
   207                 pool->m_current = position;
   208                 return pool;
   209             }
   210         }
   211     }
   213     void* m_current;
   214     void* m_start;
   215     BumpPointerPool* m_next;
   216     BumpPointerPool* m_previous;
   217     PageAllocation m_allocation;
   219     friend class BumpPointerAllocator;
   220 };
   222 // A BumpPointerAllocator manages a set of BumpPointerPool objects, which
   223 // can be used for LIFO (stack like) allocation.
   224 //
   225 // To begin allocating using this class call startAllocator().  The result
   226 // of this method will be null if the initial pool allocation fails, or a
   227 // pointer to a BumpPointerPool object that can be used to perform
   228 // allocations.  Whilst running no memory will be released until
   229 // stopAllocator() is called.  At this point all allocations made through
   230 // this allocator will be reaped, and underlying memory may be freed.
   231 //
   232 // (In practice we will still hold on to the initial pool to allow allocation
   233 // to be quickly restared, but aditional pools will be freed).
   234 //
   235 // This allocator is non-renetrant, it is encumbant on the clients to ensure
   236 // startAllocator() is not called again until stopAllocator() has been called.
   237 class BumpPointerAllocator {
   238 public:
   239     BumpPointerAllocator()
   240         : m_head(0)
   241     {
   242     }
   244     ~BumpPointerAllocator()
   245     {
   246         if (m_head)
   247             m_head->destroy();
   248     }
   250     BumpPointerPool* startAllocator()
   251     {
   252         if (!m_head)
   253             m_head = BumpPointerPool::create();
   254         return m_head;
   255     }
   257     void stopAllocator()
   258     {
   259         if (m_head)
   260             m_head->shrink();
   261     }
   263     size_t sizeOfNonHeapData() const
   264     {
   265         return m_head ? m_head->sizeOfNonHeapData() : 0;
   266     }
   268 private:
   269     BumpPointerPool* m_head;
   270 };
   272 }
   274 using WTF::BumpPointerAllocator;
   276 #endif /* yarr_BumpPointerAllocator_h */

mercurial