js/src/assembler/jit/ExecutableAllocator.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/assembler/jit/ExecutableAllocator.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,529 @@
     1.4 +/*
     1.5 + * Copyright (C) 2008 Apple Inc. All rights reserved.
     1.6 + *
     1.7 + * Redistribution and use in source and binary forms, with or without
     1.8 + * modification, are permitted provided that the following conditions
     1.9 + * are met:
    1.10 + * 1. Redistributions of source code must retain the above copyright
    1.11 + *    notice, this list of conditions and the following disclaimer.
    1.12 + * 2. Redistributions in binary form must reproduce the above copyright
    1.13 + *    notice, this list of conditions and the following disclaimer in the
    1.14 + *    documentation and/or other materials provided with the distribution.
    1.15 + *
    1.16 + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
    1.17 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    1.18 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
    1.19 + * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
    1.20 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
    1.21 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
    1.22 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    1.23 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
    1.24 + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    1.25 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    1.26 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    1.27 + */
    1.28 +
    1.29 +#ifndef assembler_jit_ExecutableAllocator_h
    1.30 +#define assembler_jit_ExecutableAllocator_h
    1.31 +
    1.32 +#include <stddef.h> // for ptrdiff_t
    1.33 +#include <limits>
    1.34 +
    1.35 +#include "jsalloc.h"
    1.36 +
    1.37 +#include "assembler/wtf/Platform.h"
    1.38 +#include "jit/arm/Simulator-arm.h"
    1.39 +#include "js/HashTable.h"
    1.40 +#include "js/Vector.h"
    1.41 +
    1.42 +#if WTF_CPU_SPARC
    1.43 +#ifdef linux  // bugzilla 502369
    1.44 +static void sync_instruction_memory(caddr_t v, u_int len)
    1.45 +{
    1.46 +    caddr_t end = v + len;
    1.47 +    caddr_t p = v;
    1.48 +    while (p < end) {
    1.49 +        asm("flush %0" : : "r" (p));
    1.50 +        p += 32;
    1.51 +    }
    1.52 +}
    1.53 +#else
    1.54 +extern  "C" void sync_instruction_memory(caddr_t v, u_int len);
    1.55 +#endif
    1.56 +#endif
    1.57 +
    1.58 +#if WTF_OS_IOS
    1.59 +#include <libkern/OSCacheControl.h>
    1.60 +#include <sys/mman.h>
    1.61 +#endif
    1.62 +
    1.63 +#if WTF_OS_SYMBIAN
    1.64 +#include <e32std.h>
    1.65 +#endif
    1.66 +
    1.67 +#if WTF_CPU_MIPS && WTF_OS_LINUX
    1.68 +#include <sys/cachectl.h>
    1.69 +#endif
    1.70 +
    1.71 +#if ENABLE_ASSEMBLER_WX_EXCLUSIVE
    1.72 +#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
    1.73 +#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
    1.74 +#define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
    1.75 +#else
    1.76 +#define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
    1.77 +#endif
    1.78 +
    1.79 +namespace JSC {
    1.80 +  enum CodeKind { ION_CODE = 0, BASELINE_CODE, REGEXP_CODE, OTHER_CODE };
    1.81 +}
    1.82 +
    1.83 +#if ENABLE_ASSEMBLER
    1.84 +
    1.85 +//#define DEBUG_STRESS_JSC_ALLOCATOR
    1.86 +
    1.87 +namespace JS {
    1.88 +    struct CodeSizes;
    1.89 +}
    1.90 +
    1.91 +namespace JSC {
    1.92 +
    1.93 +  class ExecutableAllocator;
    1.94 +
    1.95 +  // These are reference-counted. A new one starts with a count of 1.
    1.96 +  class ExecutablePool {
    1.97 +
    1.98 +    friend class ExecutableAllocator;
    1.99 +private:
   1.100 +    struct Allocation {
   1.101 +        char* pages;
   1.102 +        size_t size;
   1.103 +#if WTF_OS_SYMBIAN
   1.104 +        RChunk* chunk;
   1.105 +#endif
   1.106 +    };
   1.107 +
   1.108 +    ExecutableAllocator* m_allocator;
   1.109 +    char* m_freePtr;
   1.110 +    char* m_end;
   1.111 +    Allocation m_allocation;
   1.112 +
   1.113 +    // Reference count for automatic reclamation.
   1.114 +    unsigned m_refCount;
   1.115 +
   1.116 +    // Number of bytes currently used for Method and Regexp JIT code.
   1.117 +    size_t m_ionCodeBytes;
   1.118 +    size_t m_baselineCodeBytes;
   1.119 +    size_t m_regexpCodeBytes;
   1.120 +    size_t m_otherCodeBytes;
   1.121 +
   1.122 +public:
   1.123 +    void release(bool willDestroy = false)
   1.124 +    {
   1.125 +        JS_ASSERT(m_refCount != 0);
   1.126 +        // XXX: disabled, see bug 654820.
   1.127 +        //JS_ASSERT_IF(willDestroy, m_refCount == 1);
   1.128 +        if (--m_refCount == 0)
   1.129 +            js_delete(this);
   1.130 +    }
   1.131 +    void release(size_t n, CodeKind kind)
   1.132 +    {
   1.133 +        switch (kind) {
   1.134 +          case ION_CODE:
   1.135 +            m_ionCodeBytes -= n;
   1.136 +            MOZ_ASSERT(m_ionCodeBytes < m_allocation.size); // Shouldn't underflow.
   1.137 +            break;
   1.138 +          case BASELINE_CODE:
   1.139 +            m_baselineCodeBytes -= n;
   1.140 +            MOZ_ASSERT(m_baselineCodeBytes < m_allocation.size);
   1.141 +            break;
   1.142 +          case REGEXP_CODE:
   1.143 +            m_regexpCodeBytes -= n;
   1.144 +            MOZ_ASSERT(m_regexpCodeBytes < m_allocation.size);
   1.145 +            break;
   1.146 +          case OTHER_CODE:
   1.147 +            m_otherCodeBytes -= n;
   1.148 +            MOZ_ASSERT(m_otherCodeBytes < m_allocation.size);
   1.149 +            break;
   1.150 +          default:
   1.151 +            MOZ_ASSUME_UNREACHABLE("bad code kind");
   1.152 +        }
   1.153 +
   1.154 +        release();
   1.155 +    }
   1.156 +
   1.157 +    ExecutablePool(ExecutableAllocator* allocator, Allocation a)
   1.158 +      : m_allocator(allocator), m_freePtr(a.pages), m_end(m_freePtr + a.size), m_allocation(a),
   1.159 +        m_refCount(1), m_ionCodeBytes(0), m_baselineCodeBytes(0), m_regexpCodeBytes(0),
   1.160 +        m_otherCodeBytes(0)
   1.161 +    { }
   1.162 +
   1.163 +    ~ExecutablePool();
   1.164 +
   1.165 +private:
   1.166 +    // It should be impossible for us to roll over, because only small
   1.167 +    // pools have multiple holders, and they have one holder per chunk
   1.168 +    // of generated code, and they only hold 16KB or so of code.
   1.169 +    void addRef()
   1.170 +    {
   1.171 +        JS_ASSERT(m_refCount);
   1.172 +        ++m_refCount;
   1.173 +    }
   1.174 +
   1.175 +    void* alloc(size_t n, CodeKind kind)
   1.176 +    {
   1.177 +        JS_ASSERT(n <= available());
   1.178 +        void *result = m_freePtr;
   1.179 +        m_freePtr += n;
   1.180 +
   1.181 +        switch (kind) {
   1.182 +          case ION_CODE:      m_ionCodeBytes      += n;        break;
   1.183 +          case BASELINE_CODE: m_baselineCodeBytes += n;        break;
   1.184 +          case REGEXP_CODE:   m_regexpCodeBytes   += n;        break;
   1.185 +          case OTHER_CODE:    m_otherCodeBytes    += n;        break;
   1.186 +          default:            MOZ_ASSUME_UNREACHABLE("bad code kind");
   1.187 +        }
   1.188 +        return result;
   1.189 +    }
   1.190 +
   1.191 +    size_t available() const {
   1.192 +        JS_ASSERT(m_end >= m_freePtr);
   1.193 +        return m_end - m_freePtr;
   1.194 +    }
   1.195 +
   1.196 +    void toggleAllCodeAsAccessible(bool accessible);
   1.197 +
   1.198 +    bool codeContains(char* address) {
   1.199 +        return address >= m_allocation.pages && address < m_freePtr;
   1.200 +    }
   1.201 +};
   1.202 +
   1.203 +class ExecutableAllocator {
   1.204 +    typedef void (*DestroyCallback)(void* addr, size_t size);
   1.205 +    enum ProtectionSetting { Writable, Executable };
   1.206 +    DestroyCallback destroyCallback;
   1.207 +
   1.208 +public:
   1.209 +    ExecutableAllocator()
   1.210 +      : destroyCallback(NULL)
   1.211 +    {
   1.212 +        if (!pageSize) {
   1.213 +            pageSize = determinePageSize();
   1.214 +            /*
   1.215 +             * On Windows, VirtualAlloc effectively allocates in 64K chunks.
   1.216 +             * (Technically, it allocates in page chunks, but the starting
   1.217 +             * address is always a multiple of 64K, so each allocation uses up
   1.218 +             * 64K of address space.)  So a size less than that would be
   1.219 +             * pointless.  But it turns out that 64KB is a reasonable size for
   1.220 +             * all platforms.  (This assumes 4KB pages.)
   1.221 +             */
   1.222 +            largeAllocSize = pageSize * 16;
   1.223 +        }
   1.224 +
   1.225 +        JS_ASSERT(m_smallPools.empty());
   1.226 +    }
   1.227 +
   1.228 +    ~ExecutableAllocator()
   1.229 +    {
   1.230 +        for (size_t i = 0; i < m_smallPools.length(); i++)
   1.231 +            m_smallPools[i]->release(/* willDestroy = */true);
   1.232 +
   1.233 +        // If this asserts we have a pool leak.
   1.234 +        JS_ASSERT_IF(m_pools.initialized(), m_pools.empty());
   1.235 +    }
   1.236 +
   1.237 +    void purge() {
   1.238 +        for (size_t i = 0; i < m_smallPools.length(); i++)
   1.239 +            m_smallPools[i]->release();
   1.240 +
   1.241 +        m_smallPools.clear();
   1.242 +    }
   1.243 +
   1.244 +    // alloc() returns a pointer to some memory, and also (by reference) a
   1.245 +    // pointer to reference-counted pool. The caller owns a reference to the
   1.246 +    // pool; i.e. alloc() increments the count before returning the object.
   1.247 +    void* alloc(size_t n, ExecutablePool** poolp, CodeKind type)
   1.248 +    {
   1.249 +        // Caller must ensure 'n' is word-size aligned. If all allocations are
   1.250 +        // of word sized quantities, then all subsequent allocations will be
   1.251 +        // aligned.
   1.252 +        JS_ASSERT(roundUpAllocationSize(n, sizeof(void*)) == n);
   1.253 +
   1.254 +        if (n == OVERSIZE_ALLOCATION) {
   1.255 +            *poolp = NULL;
   1.256 +            return NULL;
   1.257 +        }
   1.258 +
   1.259 +        *poolp = poolForSize(n);
   1.260 +        if (!*poolp)
   1.261 +            return NULL;
   1.262 +
   1.263 +        // This alloc is infallible because poolForSize() just obtained
   1.264 +        // (found, or created if necessary) a pool that had enough space.
   1.265 +        void *result = (*poolp)->alloc(n, type);
   1.266 +        JS_ASSERT(result);
   1.267 +        return result;
   1.268 +    }
   1.269 +
   1.270 +    void releasePoolPages(ExecutablePool *pool) {
   1.271 +        JS_ASSERT(pool->m_allocation.pages);
   1.272 +        if (destroyCallback)
   1.273 +            destroyCallback(pool->m_allocation.pages, pool->m_allocation.size);
   1.274 +        systemRelease(pool->m_allocation);
   1.275 +        JS_ASSERT(m_pools.initialized());
   1.276 +        m_pools.remove(m_pools.lookup(pool));   // this asserts if |pool| is not in m_pools
   1.277 +    }
   1.278 +
   1.279 +    void addSizeOfCode(JS::CodeSizes *sizes) const;
   1.280 +    void toggleAllCodeAsAccessible(bool accessible);
   1.281 +    bool codeContains(char* address);
   1.282 +
   1.283 +    void setDestroyCallback(DestroyCallback destroyCallback) {
   1.284 +        this->destroyCallback = destroyCallback;
   1.285 +    }
   1.286 +
   1.287 +private:
   1.288 +    static size_t pageSize;
   1.289 +    static size_t largeAllocSize;
   1.290 +#if WTF_OS_WINDOWS
   1.291 +    static uint64_t rngSeed;
   1.292 +#endif
   1.293 +
   1.294 +    static const size_t OVERSIZE_ALLOCATION = size_t(-1);
   1.295 +
   1.296 +    static size_t roundUpAllocationSize(size_t request, size_t granularity)
   1.297 +    {
   1.298 +        // Something included via windows.h defines a macro with this name,
   1.299 +        // which causes the function below to fail to compile.
   1.300 +        #ifdef _MSC_VER
   1.301 +        # undef max
   1.302 +        #endif
   1.303 +
   1.304 +        if ((std::numeric_limits<size_t>::max() - granularity) <= request)
   1.305 +            return OVERSIZE_ALLOCATION;
   1.306 +
   1.307 +        // Round up to next page boundary
   1.308 +        size_t size = request + (granularity - 1);
   1.309 +        size = size & ~(granularity - 1);
   1.310 +        JS_ASSERT(size >= request);
   1.311 +        return size;
   1.312 +    }
   1.313 +
   1.314 +    // On OOM, this will return an Allocation where pages is NULL.
   1.315 +    ExecutablePool::Allocation systemAlloc(size_t n);
   1.316 +    static void systemRelease(const ExecutablePool::Allocation& alloc);
   1.317 +    void *computeRandomAllocationAddress();
   1.318 +
   1.319 +    ExecutablePool* createPool(size_t n)
   1.320 +    {
   1.321 +        size_t allocSize = roundUpAllocationSize(n, pageSize);
   1.322 +        if (allocSize == OVERSIZE_ALLOCATION)
   1.323 +            return NULL;
   1.324 +
   1.325 +        if (!m_pools.initialized() && !m_pools.init())
   1.326 +            return NULL;
   1.327 +
   1.328 +#ifdef DEBUG_STRESS_JSC_ALLOCATOR
   1.329 +        ExecutablePool::Allocation a = systemAlloc(size_t(4294967291));
   1.330 +#else
   1.331 +        ExecutablePool::Allocation a = systemAlloc(allocSize);
   1.332 +#endif
   1.333 +        if (!a.pages)
   1.334 +            return NULL;
   1.335 +
   1.336 +        ExecutablePool *pool = js_new<ExecutablePool>(this, a);
   1.337 +        if (!pool) {
   1.338 +            systemRelease(a);
   1.339 +            return NULL;
   1.340 +        }
   1.341 +        m_pools.put(pool);
   1.342 +        return pool;
   1.343 +    }
   1.344 +
   1.345 +public:
   1.346 +    ExecutablePool* poolForSize(size_t n)
   1.347 +    {
   1.348 +#ifndef DEBUG_STRESS_JSC_ALLOCATOR
   1.349 +        // Try to fit in an existing small allocator.  Use the pool with the
   1.350 +        // least available space that is big enough (best-fit).  This is the
   1.351 +        // best strategy because (a) it maximizes the chance of the next
   1.352 +        // allocation fitting in a small pool, and (b) it minimizes the
   1.353 +        // potential waste when a small pool is next abandoned.
   1.354 +        ExecutablePool *minPool = NULL;
   1.355 +        for (size_t i = 0; i < m_smallPools.length(); i++) {
   1.356 +            ExecutablePool *pool = m_smallPools[i];
   1.357 +            if (n <= pool->available() && (!minPool || pool->available() < minPool->available()))
   1.358 +                minPool = pool;
   1.359 +        }
   1.360 +        if (minPool) {
   1.361 +            minPool->addRef();
   1.362 +            return minPool;
   1.363 +        }
   1.364 +#endif
   1.365 +
   1.366 +        // If the request is large, we just provide a unshared allocator
   1.367 +        if (n > largeAllocSize)
   1.368 +            return createPool(n);
   1.369 +
   1.370 +        // Create a new allocator
   1.371 +        ExecutablePool* pool = createPool(largeAllocSize);
   1.372 +        if (!pool)
   1.373 +            return NULL;
   1.374 +        // At this point, local |pool| is the owner.
   1.375 +
   1.376 +        if (m_smallPools.length() < maxSmallPools) {
   1.377 +            // We haven't hit the maximum number of live pools;  add the new pool.
   1.378 +            m_smallPools.append(pool);
   1.379 +            pool->addRef();
   1.380 +        } else {
   1.381 +            // Find the pool with the least space.
   1.382 +            int iMin = 0;
   1.383 +            for (size_t i = 1; i < m_smallPools.length(); i++)
   1.384 +                if (m_smallPools[i]->available() <
   1.385 +                    m_smallPools[iMin]->available())
   1.386 +                {
   1.387 +                    iMin = i;
   1.388 +                }
   1.389 +
   1.390 +            // If the new allocator will result in more free space than the small
   1.391 +            // pool with the least space, then we will use it instead
   1.392 +            ExecutablePool *minPool = m_smallPools[iMin];
   1.393 +            if ((pool->available() - n) > minPool->available()) {
   1.394 +                minPool->release();
   1.395 +                m_smallPools[iMin] = pool;
   1.396 +                pool->addRef();
   1.397 +            }
   1.398 +        }
   1.399 +
   1.400 +        // Pass ownership to the caller.
   1.401 +        return pool;
   1.402 +    }
   1.403 +
   1.404 +#if ENABLE_ASSEMBLER_WX_EXCLUSIVE
   1.405 +    static void makeWritable(void* start, size_t size)
   1.406 +    {
   1.407 +        reprotectRegion(start, size, Writable);
   1.408 +    }
   1.409 +
   1.410 +    static void makeExecutable(void* start, size_t size)
   1.411 +    {
   1.412 +        reprotectRegion(start, size, Executable);
   1.413 +    }
   1.414 +#else
   1.415 +    static void makeWritable(void*, size_t) {}
   1.416 +    static void makeExecutable(void*, size_t) {}
   1.417 +#endif
   1.418 +
   1.419 +
   1.420 +#if WTF_CPU_X86 || WTF_CPU_X86_64
   1.421 +    static void cacheFlush(void*, size_t)
   1.422 +    {
   1.423 +    }
   1.424 +#elif defined(JS_ARM_SIMULATOR)
   1.425 +    static void cacheFlush(void *code, size_t size)
   1.426 +    {
   1.427 +        js::jit::Simulator::FlushICache(code, size);
   1.428 +    }
   1.429 +#elif WTF_CPU_MIPS
   1.430 +    static void cacheFlush(void* code, size_t size)
   1.431 +    {
   1.432 +#if WTF_COMPILER_GCC && (GCC_VERSION >= 40300)
   1.433 +#if WTF_MIPS_ISA_REV(2) && (GCC_VERSION < 40403)
   1.434 +        int lineSize;
   1.435 +        asm("rdhwr %0, $1" : "=r" (lineSize));
   1.436 +        //
   1.437 +        // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
   1.438 +        // mips_expand_synci_loop that may execute synci one more time.
   1.439 +        // "start" points to the first byte of the cache line.
   1.440 +        // "end" points to the last byte of the line before the last cache line.
   1.441 +        // Because size is always a multiple of 4, this is safe to set
   1.442 +        // "end" to the last byte.
   1.443 +        //
   1.444 +        intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
   1.445 +        intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
   1.446 +        __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
   1.447 +#else
   1.448 +        intptr_t end = reinterpret_cast<intptr_t>(code) + size;
   1.449 +        __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
   1.450 +#endif
   1.451 +#else
   1.452 +        _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
   1.453 +#endif
   1.454 +    }
   1.455 +#elif WTF_CPU_ARM && WTF_OS_IOS
   1.456 +    static void cacheFlush(void* code, size_t size)
   1.457 +    {
   1.458 +        sys_dcache_flush(code, size);
   1.459 +        sys_icache_invalidate(code, size);
   1.460 +    }
   1.461 +#elif WTF_CPU_ARM_THUMB2 && WTF_IOS
   1.462 +    static void cacheFlush(void* code, size_t size)
   1.463 +    {
   1.464 +        asm volatile (
   1.465 +            "push    {r7}\n"
   1.466 +            "mov     r0, %0\n"
   1.467 +            "mov     r1, %1\n"
   1.468 +            "movw    r7, #0x2\n"
   1.469 +            "movt    r7, #0xf\n"
   1.470 +            "movs    r2, #0x0\n"
   1.471 +            "svc     0x0\n"
   1.472 +            "pop     {r7}\n"
   1.473 +            :
   1.474 +            : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
   1.475 +            : "r0", "r1", "r2");
   1.476 +    }
   1.477 +#elif WTF_OS_SYMBIAN
   1.478 +    static void cacheFlush(void* code, size_t size)
   1.479 +    {
   1.480 +        User::IMB_Range(code, static_cast<char*>(code) + size);
   1.481 +    }
   1.482 +#elif WTF_CPU_ARM_TRADITIONAL && WTF_OS_LINUX && WTF_COMPILER_RVCT
   1.483 +    static __asm void cacheFlush(void* code, size_t size);
   1.484 +#elif WTF_CPU_ARM_TRADITIONAL && (WTF_OS_LINUX || WTF_OS_ANDROID) && WTF_COMPILER_GCC
   1.485 +    static void cacheFlush(void* code, size_t size)
   1.486 +    {
   1.487 +        asm volatile (
   1.488 +            "push    {r7}\n"
   1.489 +            "mov     r0, %0\n"
   1.490 +            "mov     r1, %1\n"
   1.491 +            "mov     r7, #0xf0000\n"
   1.492 +            "add     r7, r7, #0x2\n"
   1.493 +            "mov     r2, #0x0\n"
   1.494 +            "svc     0x0\n"
   1.495 +            "pop     {r7}\n"
   1.496 +            :
   1.497 +            : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
   1.498 +            : "r0", "r1", "r2");
   1.499 +    }
   1.500 +#elif WTF_CPU_SPARC
   1.501 +    static void cacheFlush(void* code, size_t size)
   1.502 +    {
   1.503 +        sync_instruction_memory((caddr_t)code, size);
   1.504 +    }
   1.505 +#endif
   1.506 +
   1.507 +private:
   1.508 +
   1.509 +#if ENABLE_ASSEMBLER_WX_EXCLUSIVE
   1.510 +    static void reprotectRegion(void*, size_t, ProtectionSetting);
   1.511 +#endif
   1.512 +
   1.513 +    // These are strong references;  they keep pools alive.
   1.514 +    static const size_t maxSmallPools = 4;
   1.515 +    typedef js::Vector<ExecutablePool *, maxSmallPools, js::SystemAllocPolicy> SmallExecPoolVector;
   1.516 +    SmallExecPoolVector m_smallPools;
   1.517 +
   1.518 +    // All live pools are recorded here, just for stats purposes.  These are
   1.519 +    // weak references;  they don't keep pools alive.  When a pool is destroyed
   1.520 +    // its reference is removed from m_pools.
   1.521 +    typedef js::HashSet<ExecutablePool *, js::DefaultHasher<ExecutablePool *>, js::SystemAllocPolicy>
   1.522 +            ExecPoolHashSet;
   1.523 +    ExecPoolHashSet m_pools;    // All pools, just for stats purposes.
   1.524 +
   1.525 +    static size_t determinePageSize();
   1.526 +};
   1.527 +
   1.528 +}
   1.529 +
   1.530 +#endif // ENABLE(ASSEMBLER)
   1.531 +
   1.532 +#endif /* assembler_jit_ExecutableAllocator_h */

mercurial