michael@0: /* michael@0: * Copyright (C) 2008 Apple Inc. All rights reserved. michael@0: * michael@0: * Redistribution and use in source and binary forms, with or without michael@0: * modification, are permitted provided that the following conditions michael@0: * are met: michael@0: * 1. Redistributions of source code must retain the above copyright michael@0: * notice, this list of conditions and the following disclaimer. michael@0: * 2. Redistributions in binary form must reproduce the above copyright michael@0: * notice, this list of conditions and the following disclaimer in the michael@0: * documentation and/or other materials provided with the distribution. michael@0: * michael@0: * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY michael@0: * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE michael@0: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR michael@0: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR michael@0: * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, michael@0: * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, michael@0: * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR michael@0: * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY michael@0: * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT michael@0: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE michael@0: * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. michael@0: */ michael@0: michael@0: #ifndef assembler_jit_ExecutableAllocator_h michael@0: #define assembler_jit_ExecutableAllocator_h michael@0: michael@0: #include // for ptrdiff_t michael@0: #include michael@0: michael@0: #include "jsalloc.h" michael@0: michael@0: #include "assembler/wtf/Platform.h" michael@0: #include "jit/arm/Simulator-arm.h" michael@0: #include "js/HashTable.h" michael@0: #include "js/Vector.h" michael@0: michael@0: #if WTF_CPU_SPARC michael@0: #ifdef linux // bugzilla 502369 michael@0: static void sync_instruction_memory(caddr_t v, u_int len) michael@0: { michael@0: caddr_t end = v + len; michael@0: caddr_t p = v; michael@0: while (p < end) { michael@0: asm("flush %0" : : "r" (p)); michael@0: p += 32; michael@0: } michael@0: } michael@0: #else michael@0: extern "C" void sync_instruction_memory(caddr_t v, u_int len); michael@0: #endif michael@0: #endif michael@0: michael@0: #if WTF_OS_IOS michael@0: #include michael@0: #include michael@0: #endif michael@0: michael@0: #if WTF_OS_SYMBIAN michael@0: #include michael@0: #endif michael@0: michael@0: #if WTF_CPU_MIPS && WTF_OS_LINUX michael@0: #include michael@0: #endif michael@0: michael@0: #if ENABLE_ASSEMBLER_WX_EXCLUSIVE michael@0: #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE) michael@0: #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC) michael@0: #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX michael@0: #else michael@0: #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC) michael@0: #endif michael@0: michael@0: namespace JSC { michael@0: enum CodeKind { ION_CODE = 0, BASELINE_CODE, REGEXP_CODE, OTHER_CODE }; michael@0: } michael@0: michael@0: #if ENABLE_ASSEMBLER michael@0: michael@0: //#define DEBUG_STRESS_JSC_ALLOCATOR michael@0: michael@0: namespace JS { michael@0: struct CodeSizes; michael@0: } michael@0: michael@0: namespace JSC { michael@0: michael@0: class ExecutableAllocator; michael@0: michael@0: // These are reference-counted. A new one starts with a count of 1. michael@0: class ExecutablePool { michael@0: michael@0: friend class ExecutableAllocator; michael@0: private: michael@0: struct Allocation { michael@0: char* pages; michael@0: size_t size; michael@0: #if WTF_OS_SYMBIAN michael@0: RChunk* chunk; michael@0: #endif michael@0: }; michael@0: michael@0: ExecutableAllocator* m_allocator; michael@0: char* m_freePtr; michael@0: char* m_end; michael@0: Allocation m_allocation; michael@0: michael@0: // Reference count for automatic reclamation. michael@0: unsigned m_refCount; michael@0: michael@0: // Number of bytes currently used for Method and Regexp JIT code. michael@0: size_t m_ionCodeBytes; michael@0: size_t m_baselineCodeBytes; michael@0: size_t m_regexpCodeBytes; michael@0: size_t m_otherCodeBytes; michael@0: michael@0: public: michael@0: void release(bool willDestroy = false) michael@0: { michael@0: JS_ASSERT(m_refCount != 0); michael@0: // XXX: disabled, see bug 654820. michael@0: //JS_ASSERT_IF(willDestroy, m_refCount == 1); michael@0: if (--m_refCount == 0) michael@0: js_delete(this); michael@0: } michael@0: void release(size_t n, CodeKind kind) michael@0: { michael@0: switch (kind) { michael@0: case ION_CODE: michael@0: m_ionCodeBytes -= n; michael@0: MOZ_ASSERT(m_ionCodeBytes < m_allocation.size); // Shouldn't underflow. michael@0: break; michael@0: case BASELINE_CODE: michael@0: m_baselineCodeBytes -= n; michael@0: MOZ_ASSERT(m_baselineCodeBytes < m_allocation.size); michael@0: break; michael@0: case REGEXP_CODE: michael@0: m_regexpCodeBytes -= n; michael@0: MOZ_ASSERT(m_regexpCodeBytes < m_allocation.size); michael@0: break; michael@0: case OTHER_CODE: michael@0: m_otherCodeBytes -= n; michael@0: MOZ_ASSERT(m_otherCodeBytes < m_allocation.size); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("bad code kind"); michael@0: } michael@0: michael@0: release(); michael@0: } michael@0: michael@0: ExecutablePool(ExecutableAllocator* allocator, Allocation a) michael@0: : m_allocator(allocator), m_freePtr(a.pages), m_end(m_freePtr + a.size), m_allocation(a), michael@0: m_refCount(1), m_ionCodeBytes(0), m_baselineCodeBytes(0), m_regexpCodeBytes(0), michael@0: m_otherCodeBytes(0) michael@0: { } michael@0: michael@0: ~ExecutablePool(); michael@0: michael@0: private: michael@0: // It should be impossible for us to roll over, because only small michael@0: // pools have multiple holders, and they have one holder per chunk michael@0: // of generated code, and they only hold 16KB or so of code. michael@0: void addRef() michael@0: { michael@0: JS_ASSERT(m_refCount); michael@0: ++m_refCount; michael@0: } michael@0: michael@0: void* alloc(size_t n, CodeKind kind) michael@0: { michael@0: JS_ASSERT(n <= available()); michael@0: void *result = m_freePtr; michael@0: m_freePtr += n; michael@0: michael@0: switch (kind) { michael@0: case ION_CODE: m_ionCodeBytes += n; break; michael@0: case BASELINE_CODE: m_baselineCodeBytes += n; break; michael@0: case REGEXP_CODE: m_regexpCodeBytes += n; break; michael@0: case OTHER_CODE: m_otherCodeBytes += n; break; michael@0: default: MOZ_ASSUME_UNREACHABLE("bad code kind"); michael@0: } michael@0: return result; michael@0: } michael@0: michael@0: size_t available() const { michael@0: JS_ASSERT(m_end >= m_freePtr); michael@0: return m_end - m_freePtr; michael@0: } michael@0: michael@0: void toggleAllCodeAsAccessible(bool accessible); michael@0: michael@0: bool codeContains(char* address) { michael@0: return address >= m_allocation.pages && address < m_freePtr; michael@0: } michael@0: }; michael@0: michael@0: class ExecutableAllocator { michael@0: typedef void (*DestroyCallback)(void* addr, size_t size); michael@0: enum ProtectionSetting { Writable, Executable }; michael@0: DestroyCallback destroyCallback; michael@0: michael@0: public: michael@0: ExecutableAllocator() michael@0: : destroyCallback(NULL) michael@0: { michael@0: if (!pageSize) { michael@0: pageSize = determinePageSize(); michael@0: /* michael@0: * On Windows, VirtualAlloc effectively allocates in 64K chunks. michael@0: * (Technically, it allocates in page chunks, but the starting michael@0: * address is always a multiple of 64K, so each allocation uses up michael@0: * 64K of address space.) So a size less than that would be michael@0: * pointless. But it turns out that 64KB is a reasonable size for michael@0: * all platforms. (This assumes 4KB pages.) michael@0: */ michael@0: largeAllocSize = pageSize * 16; michael@0: } michael@0: michael@0: JS_ASSERT(m_smallPools.empty()); michael@0: } michael@0: michael@0: ~ExecutableAllocator() michael@0: { michael@0: for (size_t i = 0; i < m_smallPools.length(); i++) michael@0: m_smallPools[i]->release(/* willDestroy = */true); michael@0: michael@0: // If this asserts we have a pool leak. michael@0: JS_ASSERT_IF(m_pools.initialized(), m_pools.empty()); michael@0: } michael@0: michael@0: void purge() { michael@0: for (size_t i = 0; i < m_smallPools.length(); i++) michael@0: m_smallPools[i]->release(); michael@0: michael@0: m_smallPools.clear(); michael@0: } michael@0: michael@0: // alloc() returns a pointer to some memory, and also (by reference) a michael@0: // pointer to reference-counted pool. The caller owns a reference to the michael@0: // pool; i.e. alloc() increments the count before returning the object. michael@0: void* alloc(size_t n, ExecutablePool** poolp, CodeKind type) michael@0: { michael@0: // Caller must ensure 'n' is word-size aligned. If all allocations are michael@0: // of word sized quantities, then all subsequent allocations will be michael@0: // aligned. michael@0: JS_ASSERT(roundUpAllocationSize(n, sizeof(void*)) == n); michael@0: michael@0: if (n == OVERSIZE_ALLOCATION) { michael@0: *poolp = NULL; michael@0: return NULL; michael@0: } michael@0: michael@0: *poolp = poolForSize(n); michael@0: if (!*poolp) michael@0: return NULL; michael@0: michael@0: // This alloc is infallible because poolForSize() just obtained michael@0: // (found, or created if necessary) a pool that had enough space. michael@0: void *result = (*poolp)->alloc(n, type); michael@0: JS_ASSERT(result); michael@0: return result; michael@0: } michael@0: michael@0: void releasePoolPages(ExecutablePool *pool) { michael@0: JS_ASSERT(pool->m_allocation.pages); michael@0: if (destroyCallback) michael@0: destroyCallback(pool->m_allocation.pages, pool->m_allocation.size); michael@0: systemRelease(pool->m_allocation); michael@0: JS_ASSERT(m_pools.initialized()); michael@0: m_pools.remove(m_pools.lookup(pool)); // this asserts if |pool| is not in m_pools michael@0: } michael@0: michael@0: void addSizeOfCode(JS::CodeSizes *sizes) const; michael@0: void toggleAllCodeAsAccessible(bool accessible); michael@0: bool codeContains(char* address); michael@0: michael@0: void setDestroyCallback(DestroyCallback destroyCallback) { michael@0: this->destroyCallback = destroyCallback; michael@0: } michael@0: michael@0: private: michael@0: static size_t pageSize; michael@0: static size_t largeAllocSize; michael@0: #if WTF_OS_WINDOWS michael@0: static uint64_t rngSeed; michael@0: #endif michael@0: michael@0: static const size_t OVERSIZE_ALLOCATION = size_t(-1); michael@0: michael@0: static size_t roundUpAllocationSize(size_t request, size_t granularity) michael@0: { michael@0: // Something included via windows.h defines a macro with this name, michael@0: // which causes the function below to fail to compile. michael@0: #ifdef _MSC_VER michael@0: # undef max michael@0: #endif michael@0: michael@0: if ((std::numeric_limits::max() - granularity) <= request) michael@0: return OVERSIZE_ALLOCATION; michael@0: michael@0: // Round up to next page boundary michael@0: size_t size = request + (granularity - 1); michael@0: size = size & ~(granularity - 1); michael@0: JS_ASSERT(size >= request); michael@0: return size; michael@0: } michael@0: michael@0: // On OOM, this will return an Allocation where pages is NULL. michael@0: ExecutablePool::Allocation systemAlloc(size_t n); michael@0: static void systemRelease(const ExecutablePool::Allocation& alloc); michael@0: void *computeRandomAllocationAddress(); michael@0: michael@0: ExecutablePool* createPool(size_t n) michael@0: { michael@0: size_t allocSize = roundUpAllocationSize(n, pageSize); michael@0: if (allocSize == OVERSIZE_ALLOCATION) michael@0: return NULL; michael@0: michael@0: if (!m_pools.initialized() && !m_pools.init()) michael@0: return NULL; michael@0: michael@0: #ifdef DEBUG_STRESS_JSC_ALLOCATOR michael@0: ExecutablePool::Allocation a = systemAlloc(size_t(4294967291)); michael@0: #else michael@0: ExecutablePool::Allocation a = systemAlloc(allocSize); michael@0: #endif michael@0: if (!a.pages) michael@0: return NULL; michael@0: michael@0: ExecutablePool *pool = js_new(this, a); michael@0: if (!pool) { michael@0: systemRelease(a); michael@0: return NULL; michael@0: } michael@0: m_pools.put(pool); michael@0: return pool; michael@0: } michael@0: michael@0: public: michael@0: ExecutablePool* poolForSize(size_t n) michael@0: { michael@0: #ifndef DEBUG_STRESS_JSC_ALLOCATOR michael@0: // Try to fit in an existing small allocator. Use the pool with the michael@0: // least available space that is big enough (best-fit). This is the michael@0: // best strategy because (a) it maximizes the chance of the next michael@0: // allocation fitting in a small pool, and (b) it minimizes the michael@0: // potential waste when a small pool is next abandoned. michael@0: ExecutablePool *minPool = NULL; michael@0: for (size_t i = 0; i < m_smallPools.length(); i++) { michael@0: ExecutablePool *pool = m_smallPools[i]; michael@0: if (n <= pool->available() && (!minPool || pool->available() < minPool->available())) michael@0: minPool = pool; michael@0: } michael@0: if (minPool) { michael@0: minPool->addRef(); michael@0: return minPool; michael@0: } michael@0: #endif michael@0: michael@0: // If the request is large, we just provide a unshared allocator michael@0: if (n > largeAllocSize) michael@0: return createPool(n); michael@0: michael@0: // Create a new allocator michael@0: ExecutablePool* pool = createPool(largeAllocSize); michael@0: if (!pool) michael@0: return NULL; michael@0: // At this point, local |pool| is the owner. michael@0: michael@0: if (m_smallPools.length() < maxSmallPools) { michael@0: // We haven't hit the maximum number of live pools; add the new pool. michael@0: m_smallPools.append(pool); michael@0: pool->addRef(); michael@0: } else { michael@0: // Find the pool with the least space. michael@0: int iMin = 0; michael@0: for (size_t i = 1; i < m_smallPools.length(); i++) michael@0: if (m_smallPools[i]->available() < michael@0: m_smallPools[iMin]->available()) michael@0: { michael@0: iMin = i; michael@0: } michael@0: michael@0: // If the new allocator will result in more free space than the small michael@0: // pool with the least space, then we will use it instead michael@0: ExecutablePool *minPool = m_smallPools[iMin]; michael@0: if ((pool->available() - n) > minPool->available()) { michael@0: minPool->release(); michael@0: m_smallPools[iMin] = pool; michael@0: pool->addRef(); michael@0: } michael@0: } michael@0: michael@0: // Pass ownership to the caller. michael@0: return pool; michael@0: } michael@0: michael@0: #if ENABLE_ASSEMBLER_WX_EXCLUSIVE michael@0: static void makeWritable(void* start, size_t size) michael@0: { michael@0: reprotectRegion(start, size, Writable); michael@0: } michael@0: michael@0: static void makeExecutable(void* start, size_t size) michael@0: { michael@0: reprotectRegion(start, size, Executable); michael@0: } michael@0: #else michael@0: static void makeWritable(void*, size_t) {} michael@0: static void makeExecutable(void*, size_t) {} michael@0: #endif michael@0: michael@0: michael@0: #if WTF_CPU_X86 || WTF_CPU_X86_64 michael@0: static void cacheFlush(void*, size_t) michael@0: { michael@0: } michael@0: #elif defined(JS_ARM_SIMULATOR) michael@0: static void cacheFlush(void *code, size_t size) michael@0: { michael@0: js::jit::Simulator::FlushICache(code, size); michael@0: } michael@0: #elif WTF_CPU_MIPS michael@0: static void cacheFlush(void* code, size_t size) michael@0: { michael@0: #if WTF_COMPILER_GCC && (GCC_VERSION >= 40300) michael@0: #if WTF_MIPS_ISA_REV(2) && (GCC_VERSION < 40403) michael@0: int lineSize; michael@0: asm("rdhwr %0, $1" : "=r" (lineSize)); michael@0: // michael@0: // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in michael@0: // mips_expand_synci_loop that may execute synci one more time. michael@0: // "start" points to the first byte of the cache line. michael@0: // "end" points to the last byte of the line before the last cache line. michael@0: // Because size is always a multiple of 4, this is safe to set michael@0: // "end" to the last byte. michael@0: // michael@0: intptr_t start = reinterpret_cast(code) & (-lineSize); michael@0: intptr_t end = ((reinterpret_cast(code) + size - 1) & (-lineSize)) - 1; michael@0: __builtin___clear_cache(reinterpret_cast(start), reinterpret_cast(end)); michael@0: #else michael@0: intptr_t end = reinterpret_cast(code) + size; michael@0: __builtin___clear_cache(reinterpret_cast(code), reinterpret_cast(end)); michael@0: #endif michael@0: #else michael@0: _flush_cache(reinterpret_cast(code), size, BCACHE); michael@0: #endif michael@0: } michael@0: #elif WTF_CPU_ARM && WTF_OS_IOS michael@0: static void cacheFlush(void* code, size_t size) michael@0: { michael@0: sys_dcache_flush(code, size); michael@0: sys_icache_invalidate(code, size); michael@0: } michael@0: #elif WTF_CPU_ARM_THUMB2 && WTF_IOS michael@0: static void cacheFlush(void* code, size_t size) michael@0: { michael@0: asm volatile ( michael@0: "push {r7}\n" michael@0: "mov r0, %0\n" michael@0: "mov r1, %1\n" michael@0: "movw r7, #0x2\n" michael@0: "movt r7, #0xf\n" michael@0: "movs r2, #0x0\n" michael@0: "svc 0x0\n" michael@0: "pop {r7}\n" michael@0: : michael@0: : "r" (code), "r" (reinterpret_cast(code) + size) michael@0: : "r0", "r1", "r2"); michael@0: } michael@0: #elif WTF_OS_SYMBIAN michael@0: static void cacheFlush(void* code, size_t size) michael@0: { michael@0: User::IMB_Range(code, static_cast(code) + size); michael@0: } michael@0: #elif WTF_CPU_ARM_TRADITIONAL && WTF_OS_LINUX && WTF_COMPILER_RVCT michael@0: static __asm void cacheFlush(void* code, size_t size); michael@0: #elif WTF_CPU_ARM_TRADITIONAL && (WTF_OS_LINUX || WTF_OS_ANDROID) && WTF_COMPILER_GCC michael@0: static void cacheFlush(void* code, size_t size) michael@0: { michael@0: asm volatile ( michael@0: "push {r7}\n" michael@0: "mov r0, %0\n" michael@0: "mov r1, %1\n" michael@0: "mov r7, #0xf0000\n" michael@0: "add r7, r7, #0x2\n" michael@0: "mov r2, #0x0\n" michael@0: "svc 0x0\n" michael@0: "pop {r7}\n" michael@0: : michael@0: : "r" (code), "r" (reinterpret_cast(code) + size) michael@0: : "r0", "r1", "r2"); michael@0: } michael@0: #elif WTF_CPU_SPARC michael@0: static void cacheFlush(void* code, size_t size) michael@0: { michael@0: sync_instruction_memory((caddr_t)code, size); michael@0: } michael@0: #endif michael@0: michael@0: private: michael@0: michael@0: #if ENABLE_ASSEMBLER_WX_EXCLUSIVE michael@0: static void reprotectRegion(void*, size_t, ProtectionSetting); michael@0: #endif michael@0: michael@0: // These are strong references; they keep pools alive. michael@0: static const size_t maxSmallPools = 4; michael@0: typedef js::Vector SmallExecPoolVector; michael@0: SmallExecPoolVector m_smallPools; michael@0: michael@0: // All live pools are recorded here, just for stats purposes. These are michael@0: // weak references; they don't keep pools alive. When a pool is destroyed michael@0: // its reference is removed from m_pools. michael@0: typedef js::HashSet, js::SystemAllocPolicy> michael@0: ExecPoolHashSet; michael@0: ExecPoolHashSet m_pools; // All pools, just for stats purposes. michael@0: michael@0: static size_t determinePageSize(); michael@0: }; michael@0: michael@0: } michael@0: michael@0: #endif // ENABLE(ASSEMBLER) michael@0: michael@0: #endif /* assembler_jit_ExecutableAllocator_h */