js/src/assembler/jit/ExecutableAllocator.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /*
michael@0 2 * Copyright (C) 2008 Apple Inc. All rights reserved.
michael@0 3 *
michael@0 4 * Redistribution and use in source and binary forms, with or without
michael@0 5 * modification, are permitted provided that the following conditions
michael@0 6 * are met:
michael@0 7 * 1. Redistributions of source code must retain the above copyright
michael@0 8 * notice, this list of conditions and the following disclaimer.
michael@0 9 * 2. Redistributions in binary form must reproduce the above copyright
michael@0 10 * notice, this list of conditions and the following disclaimer in the
michael@0 11 * documentation and/or other materials provided with the distribution.
michael@0 12 *
michael@0 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
michael@0 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
michael@0 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
michael@0 16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
michael@0 17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
michael@0 18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
michael@0 19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
michael@0 20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
michael@0 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
michael@0 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
michael@0 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
michael@0 24 */
michael@0 25
michael@0 26 #ifndef assembler_jit_ExecutableAllocator_h
michael@0 27 #define assembler_jit_ExecutableAllocator_h
michael@0 28
michael@0 29 #include <stddef.h> // for ptrdiff_t
michael@0 30 #include <limits>
michael@0 31
michael@0 32 #include "jsalloc.h"
michael@0 33
michael@0 34 #include "assembler/wtf/Platform.h"
michael@0 35 #include "jit/arm/Simulator-arm.h"
michael@0 36 #include "js/HashTable.h"
michael@0 37 #include "js/Vector.h"
michael@0 38
michael@0 39 #if WTF_CPU_SPARC
michael@0 40 #ifdef linux // bugzilla 502369
michael@0 41 static void sync_instruction_memory(caddr_t v, u_int len)
michael@0 42 {
michael@0 43 caddr_t end = v + len;
michael@0 44 caddr_t p = v;
michael@0 45 while (p < end) {
michael@0 46 asm("flush %0" : : "r" (p));
michael@0 47 p += 32;
michael@0 48 }
michael@0 49 }
michael@0 50 #else
michael@0 51 extern "C" void sync_instruction_memory(caddr_t v, u_int len);
michael@0 52 #endif
michael@0 53 #endif
michael@0 54
michael@0 55 #if WTF_OS_IOS
michael@0 56 #include <libkern/OSCacheControl.h>
michael@0 57 #include <sys/mman.h>
michael@0 58 #endif
michael@0 59
michael@0 60 #if WTF_OS_SYMBIAN
michael@0 61 #include <e32std.h>
michael@0 62 #endif
michael@0 63
michael@0 64 #if WTF_CPU_MIPS && WTF_OS_LINUX
michael@0 65 #include <sys/cachectl.h>
michael@0 66 #endif
michael@0 67
michael@0 68 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
michael@0 69 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
michael@0 70 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
michael@0 71 #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
michael@0 72 #else
michael@0 73 #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
michael@0 74 #endif
michael@0 75
michael@0 76 namespace JSC {
michael@0 77 enum CodeKind { ION_CODE = 0, BASELINE_CODE, REGEXP_CODE, OTHER_CODE };
michael@0 78 }
michael@0 79
michael@0 80 #if ENABLE_ASSEMBLER
michael@0 81
michael@0 82 //#define DEBUG_STRESS_JSC_ALLOCATOR
michael@0 83
michael@0 84 namespace JS {
michael@0 85 struct CodeSizes;
michael@0 86 }
michael@0 87
michael@0 88 namespace JSC {
michael@0 89
michael@0 90 class ExecutableAllocator;
michael@0 91
michael@0 92 // These are reference-counted. A new one starts with a count of 1.
michael@0 93 class ExecutablePool {
michael@0 94
michael@0 95 friend class ExecutableAllocator;
michael@0 96 private:
michael@0 97 struct Allocation {
michael@0 98 char* pages;
michael@0 99 size_t size;
michael@0 100 #if WTF_OS_SYMBIAN
michael@0 101 RChunk* chunk;
michael@0 102 #endif
michael@0 103 };
michael@0 104
michael@0 105 ExecutableAllocator* m_allocator;
michael@0 106 char* m_freePtr;
michael@0 107 char* m_end;
michael@0 108 Allocation m_allocation;
michael@0 109
michael@0 110 // Reference count for automatic reclamation.
michael@0 111 unsigned m_refCount;
michael@0 112
michael@0 113 // Number of bytes currently used for Method and Regexp JIT code.
michael@0 114 size_t m_ionCodeBytes;
michael@0 115 size_t m_baselineCodeBytes;
michael@0 116 size_t m_regexpCodeBytes;
michael@0 117 size_t m_otherCodeBytes;
michael@0 118
michael@0 119 public:
michael@0 120 void release(bool willDestroy = false)
michael@0 121 {
michael@0 122 JS_ASSERT(m_refCount != 0);
michael@0 123 // XXX: disabled, see bug 654820.
michael@0 124 //JS_ASSERT_IF(willDestroy, m_refCount == 1);
michael@0 125 if (--m_refCount == 0)
michael@0 126 js_delete(this);
michael@0 127 }
michael@0 128 void release(size_t n, CodeKind kind)
michael@0 129 {
michael@0 130 switch (kind) {
michael@0 131 case ION_CODE:
michael@0 132 m_ionCodeBytes -= n;
michael@0 133 MOZ_ASSERT(m_ionCodeBytes < m_allocation.size); // Shouldn't underflow.
michael@0 134 break;
michael@0 135 case BASELINE_CODE:
michael@0 136 m_baselineCodeBytes -= n;
michael@0 137 MOZ_ASSERT(m_baselineCodeBytes < m_allocation.size);
michael@0 138 break;
michael@0 139 case REGEXP_CODE:
michael@0 140 m_regexpCodeBytes -= n;
michael@0 141 MOZ_ASSERT(m_regexpCodeBytes < m_allocation.size);
michael@0 142 break;
michael@0 143 case OTHER_CODE:
michael@0 144 m_otherCodeBytes -= n;
michael@0 145 MOZ_ASSERT(m_otherCodeBytes < m_allocation.size);
michael@0 146 break;
michael@0 147 default:
michael@0 148 MOZ_ASSUME_UNREACHABLE("bad code kind");
michael@0 149 }
michael@0 150
michael@0 151 release();
michael@0 152 }
michael@0 153
michael@0 154 ExecutablePool(ExecutableAllocator* allocator, Allocation a)
michael@0 155 : m_allocator(allocator), m_freePtr(a.pages), m_end(m_freePtr + a.size), m_allocation(a),
michael@0 156 m_refCount(1), m_ionCodeBytes(0), m_baselineCodeBytes(0), m_regexpCodeBytes(0),
michael@0 157 m_otherCodeBytes(0)
michael@0 158 { }
michael@0 159
michael@0 160 ~ExecutablePool();
michael@0 161
michael@0 162 private:
michael@0 163 // It should be impossible for us to roll over, because only small
michael@0 164 // pools have multiple holders, and they have one holder per chunk
michael@0 165 // of generated code, and they only hold 16KB or so of code.
michael@0 166 void addRef()
michael@0 167 {
michael@0 168 JS_ASSERT(m_refCount);
michael@0 169 ++m_refCount;
michael@0 170 }
michael@0 171
michael@0 172 void* alloc(size_t n, CodeKind kind)
michael@0 173 {
michael@0 174 JS_ASSERT(n <= available());
michael@0 175 void *result = m_freePtr;
michael@0 176 m_freePtr += n;
michael@0 177
michael@0 178 switch (kind) {
michael@0 179 case ION_CODE: m_ionCodeBytes += n; break;
michael@0 180 case BASELINE_CODE: m_baselineCodeBytes += n; break;
michael@0 181 case REGEXP_CODE: m_regexpCodeBytes += n; break;
michael@0 182 case OTHER_CODE: m_otherCodeBytes += n; break;
michael@0 183 default: MOZ_ASSUME_UNREACHABLE("bad code kind");
michael@0 184 }
michael@0 185 return result;
michael@0 186 }
michael@0 187
michael@0 188 size_t available() const {
michael@0 189 JS_ASSERT(m_end >= m_freePtr);
michael@0 190 return m_end - m_freePtr;
michael@0 191 }
michael@0 192
michael@0 193 void toggleAllCodeAsAccessible(bool accessible);
michael@0 194
michael@0 195 bool codeContains(char* address) {
michael@0 196 return address >= m_allocation.pages && address < m_freePtr;
michael@0 197 }
michael@0 198 };
michael@0 199
michael@0 200 class ExecutableAllocator {
michael@0 201 typedef void (*DestroyCallback)(void* addr, size_t size);
michael@0 202 enum ProtectionSetting { Writable, Executable };
michael@0 203 DestroyCallback destroyCallback;
michael@0 204
michael@0 205 public:
michael@0 206 ExecutableAllocator()
michael@0 207 : destroyCallback(NULL)
michael@0 208 {
michael@0 209 if (!pageSize) {
michael@0 210 pageSize = determinePageSize();
michael@0 211 /*
michael@0 212 * On Windows, VirtualAlloc effectively allocates in 64K chunks.
michael@0 213 * (Technically, it allocates in page chunks, but the starting
michael@0 214 * address is always a multiple of 64K, so each allocation uses up
michael@0 215 * 64K of address space.) So a size less than that would be
michael@0 216 * pointless. But it turns out that 64KB is a reasonable size for
michael@0 217 * all platforms. (This assumes 4KB pages.)
michael@0 218 */
michael@0 219 largeAllocSize = pageSize * 16;
michael@0 220 }
michael@0 221
michael@0 222 JS_ASSERT(m_smallPools.empty());
michael@0 223 }
michael@0 224
michael@0 225 ~ExecutableAllocator()
michael@0 226 {
michael@0 227 for (size_t i = 0; i < m_smallPools.length(); i++)
michael@0 228 m_smallPools[i]->release(/* willDestroy = */true);
michael@0 229
michael@0 230 // If this asserts we have a pool leak.
michael@0 231 JS_ASSERT_IF(m_pools.initialized(), m_pools.empty());
michael@0 232 }
michael@0 233
michael@0 234 void purge() {
michael@0 235 for (size_t i = 0; i < m_smallPools.length(); i++)
michael@0 236 m_smallPools[i]->release();
michael@0 237
michael@0 238 m_smallPools.clear();
michael@0 239 }
michael@0 240
michael@0 241 // alloc() returns a pointer to some memory, and also (by reference) a
michael@0 242 // pointer to reference-counted pool. The caller owns a reference to the
michael@0 243 // pool; i.e. alloc() increments the count before returning the object.
michael@0 244 void* alloc(size_t n, ExecutablePool** poolp, CodeKind type)
michael@0 245 {
michael@0 246 // Caller must ensure 'n' is word-size aligned. If all allocations are
michael@0 247 // of word sized quantities, then all subsequent allocations will be
michael@0 248 // aligned.
michael@0 249 JS_ASSERT(roundUpAllocationSize(n, sizeof(void*)) == n);
michael@0 250
michael@0 251 if (n == OVERSIZE_ALLOCATION) {
michael@0 252 *poolp = NULL;
michael@0 253 return NULL;
michael@0 254 }
michael@0 255
michael@0 256 *poolp = poolForSize(n);
michael@0 257 if (!*poolp)
michael@0 258 return NULL;
michael@0 259
michael@0 260 // This alloc is infallible because poolForSize() just obtained
michael@0 261 // (found, or created if necessary) a pool that had enough space.
michael@0 262 void *result = (*poolp)->alloc(n, type);
michael@0 263 JS_ASSERT(result);
michael@0 264 return result;
michael@0 265 }
michael@0 266
michael@0 267 void releasePoolPages(ExecutablePool *pool) {
michael@0 268 JS_ASSERT(pool->m_allocation.pages);
michael@0 269 if (destroyCallback)
michael@0 270 destroyCallback(pool->m_allocation.pages, pool->m_allocation.size);
michael@0 271 systemRelease(pool->m_allocation);
michael@0 272 JS_ASSERT(m_pools.initialized());
michael@0 273 m_pools.remove(m_pools.lookup(pool)); // this asserts if |pool| is not in m_pools
michael@0 274 }
michael@0 275
michael@0 276 void addSizeOfCode(JS::CodeSizes *sizes) const;
michael@0 277 void toggleAllCodeAsAccessible(bool accessible);
michael@0 278 bool codeContains(char* address);
michael@0 279
michael@0 280 void setDestroyCallback(DestroyCallback destroyCallback) {
michael@0 281 this->destroyCallback = destroyCallback;
michael@0 282 }
michael@0 283
michael@0 284 private:
michael@0 285 static size_t pageSize;
michael@0 286 static size_t largeAllocSize;
michael@0 287 #if WTF_OS_WINDOWS
michael@0 288 static uint64_t rngSeed;
michael@0 289 #endif
michael@0 290
michael@0 291 static const size_t OVERSIZE_ALLOCATION = size_t(-1);
michael@0 292
michael@0 293 static size_t roundUpAllocationSize(size_t request, size_t granularity)
michael@0 294 {
michael@0 295 // Something included via windows.h defines a macro with this name,
michael@0 296 // which causes the function below to fail to compile.
michael@0 297 #ifdef _MSC_VER
michael@0 298 # undef max
michael@0 299 #endif
michael@0 300
michael@0 301 if ((std::numeric_limits<size_t>::max() - granularity) <= request)
michael@0 302 return OVERSIZE_ALLOCATION;
michael@0 303
michael@0 304 // Round up to next page boundary
michael@0 305 size_t size = request + (granularity - 1);
michael@0 306 size = size & ~(granularity - 1);
michael@0 307 JS_ASSERT(size >= request);
michael@0 308 return size;
michael@0 309 }
michael@0 310
michael@0 311 // On OOM, this will return an Allocation where pages is NULL.
michael@0 312 ExecutablePool::Allocation systemAlloc(size_t n);
michael@0 313 static void systemRelease(const ExecutablePool::Allocation& alloc);
michael@0 314 void *computeRandomAllocationAddress();
michael@0 315
michael@0 316 ExecutablePool* createPool(size_t n)
michael@0 317 {
michael@0 318 size_t allocSize = roundUpAllocationSize(n, pageSize);
michael@0 319 if (allocSize == OVERSIZE_ALLOCATION)
michael@0 320 return NULL;
michael@0 321
michael@0 322 if (!m_pools.initialized() && !m_pools.init())
michael@0 323 return NULL;
michael@0 324
michael@0 325 #ifdef DEBUG_STRESS_JSC_ALLOCATOR
michael@0 326 ExecutablePool::Allocation a = systemAlloc(size_t(4294967291));
michael@0 327 #else
michael@0 328 ExecutablePool::Allocation a = systemAlloc(allocSize);
michael@0 329 #endif
michael@0 330 if (!a.pages)
michael@0 331 return NULL;
michael@0 332
michael@0 333 ExecutablePool *pool = js_new<ExecutablePool>(this, a);
michael@0 334 if (!pool) {
michael@0 335 systemRelease(a);
michael@0 336 return NULL;
michael@0 337 }
michael@0 338 m_pools.put(pool);
michael@0 339 return pool;
michael@0 340 }
michael@0 341
michael@0 342 public:
michael@0 343 ExecutablePool* poolForSize(size_t n)
michael@0 344 {
michael@0 345 #ifndef DEBUG_STRESS_JSC_ALLOCATOR
michael@0 346 // Try to fit in an existing small allocator. Use the pool with the
michael@0 347 // least available space that is big enough (best-fit). This is the
michael@0 348 // best strategy because (a) it maximizes the chance of the next
michael@0 349 // allocation fitting in a small pool, and (b) it minimizes the
michael@0 350 // potential waste when a small pool is next abandoned.
michael@0 351 ExecutablePool *minPool = NULL;
michael@0 352 for (size_t i = 0; i < m_smallPools.length(); i++) {
michael@0 353 ExecutablePool *pool = m_smallPools[i];
michael@0 354 if (n <= pool->available() && (!minPool || pool->available() < minPool->available()))
michael@0 355 minPool = pool;
michael@0 356 }
michael@0 357 if (minPool) {
michael@0 358 minPool->addRef();
michael@0 359 return minPool;
michael@0 360 }
michael@0 361 #endif
michael@0 362
michael@0 363 // If the request is large, we just provide a unshared allocator
michael@0 364 if (n > largeAllocSize)
michael@0 365 return createPool(n);
michael@0 366
michael@0 367 // Create a new allocator
michael@0 368 ExecutablePool* pool = createPool(largeAllocSize);
michael@0 369 if (!pool)
michael@0 370 return NULL;
michael@0 371 // At this point, local |pool| is the owner.
michael@0 372
michael@0 373 if (m_smallPools.length() < maxSmallPools) {
michael@0 374 // We haven't hit the maximum number of live pools; add the new pool.
michael@0 375 m_smallPools.append(pool);
michael@0 376 pool->addRef();
michael@0 377 } else {
michael@0 378 // Find the pool with the least space.
michael@0 379 int iMin = 0;
michael@0 380 for (size_t i = 1; i < m_smallPools.length(); i++)
michael@0 381 if (m_smallPools[i]->available() <
michael@0 382 m_smallPools[iMin]->available())
michael@0 383 {
michael@0 384 iMin = i;
michael@0 385 }
michael@0 386
michael@0 387 // If the new allocator will result in more free space than the small
michael@0 388 // pool with the least space, then we will use it instead
michael@0 389 ExecutablePool *minPool = m_smallPools[iMin];
michael@0 390 if ((pool->available() - n) > minPool->available()) {
michael@0 391 minPool->release();
michael@0 392 m_smallPools[iMin] = pool;
michael@0 393 pool->addRef();
michael@0 394 }
michael@0 395 }
michael@0 396
michael@0 397 // Pass ownership to the caller.
michael@0 398 return pool;
michael@0 399 }
michael@0 400
michael@0 401 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
michael@0 402 static void makeWritable(void* start, size_t size)
michael@0 403 {
michael@0 404 reprotectRegion(start, size, Writable);
michael@0 405 }
michael@0 406
michael@0 407 static void makeExecutable(void* start, size_t size)
michael@0 408 {
michael@0 409 reprotectRegion(start, size, Executable);
michael@0 410 }
michael@0 411 #else
michael@0 412 static void makeWritable(void*, size_t) {}
michael@0 413 static void makeExecutable(void*, size_t) {}
michael@0 414 #endif
michael@0 415
michael@0 416
michael@0 417 #if WTF_CPU_X86 || WTF_CPU_X86_64
michael@0 418 static void cacheFlush(void*, size_t)
michael@0 419 {
michael@0 420 }
michael@0 421 #elif defined(JS_ARM_SIMULATOR)
michael@0 422 static void cacheFlush(void *code, size_t size)
michael@0 423 {
michael@0 424 js::jit::Simulator::FlushICache(code, size);
michael@0 425 }
michael@0 426 #elif WTF_CPU_MIPS
michael@0 427 static void cacheFlush(void* code, size_t size)
michael@0 428 {
michael@0 429 #if WTF_COMPILER_GCC && (GCC_VERSION >= 40300)
michael@0 430 #if WTF_MIPS_ISA_REV(2) && (GCC_VERSION < 40403)
michael@0 431 int lineSize;
michael@0 432 asm("rdhwr %0, $1" : "=r" (lineSize));
michael@0 433 //
michael@0 434 // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
michael@0 435 // mips_expand_synci_loop that may execute synci one more time.
michael@0 436 // "start" points to the first byte of the cache line.
michael@0 437 // "end" points to the last byte of the line before the last cache line.
michael@0 438 // Because size is always a multiple of 4, this is safe to set
michael@0 439 // "end" to the last byte.
michael@0 440 //
michael@0 441 intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
michael@0 442 intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
michael@0 443 __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
michael@0 444 #else
michael@0 445 intptr_t end = reinterpret_cast<intptr_t>(code) + size;
michael@0 446 __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
michael@0 447 #endif
michael@0 448 #else
michael@0 449 _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
michael@0 450 #endif
michael@0 451 }
michael@0 452 #elif WTF_CPU_ARM && WTF_OS_IOS
michael@0 453 static void cacheFlush(void* code, size_t size)
michael@0 454 {
michael@0 455 sys_dcache_flush(code, size);
michael@0 456 sys_icache_invalidate(code, size);
michael@0 457 }
michael@0 458 #elif WTF_CPU_ARM_THUMB2 && WTF_IOS
michael@0 459 static void cacheFlush(void* code, size_t size)
michael@0 460 {
michael@0 461 asm volatile (
michael@0 462 "push {r7}\n"
michael@0 463 "mov r0, %0\n"
michael@0 464 "mov r1, %1\n"
michael@0 465 "movw r7, #0x2\n"
michael@0 466 "movt r7, #0xf\n"
michael@0 467 "movs r2, #0x0\n"
michael@0 468 "svc 0x0\n"
michael@0 469 "pop {r7}\n"
michael@0 470 :
michael@0 471 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
michael@0 472 : "r0", "r1", "r2");
michael@0 473 }
michael@0 474 #elif WTF_OS_SYMBIAN
michael@0 475 static void cacheFlush(void* code, size_t size)
michael@0 476 {
michael@0 477 User::IMB_Range(code, static_cast<char*>(code) + size);
michael@0 478 }
michael@0 479 #elif WTF_CPU_ARM_TRADITIONAL && WTF_OS_LINUX && WTF_COMPILER_RVCT
michael@0 480 static __asm void cacheFlush(void* code, size_t size);
michael@0 481 #elif WTF_CPU_ARM_TRADITIONAL && (WTF_OS_LINUX || WTF_OS_ANDROID) && WTF_COMPILER_GCC
michael@0 482 static void cacheFlush(void* code, size_t size)
michael@0 483 {
michael@0 484 asm volatile (
michael@0 485 "push {r7}\n"
michael@0 486 "mov r0, %0\n"
michael@0 487 "mov r1, %1\n"
michael@0 488 "mov r7, #0xf0000\n"
michael@0 489 "add r7, r7, #0x2\n"
michael@0 490 "mov r2, #0x0\n"
michael@0 491 "svc 0x0\n"
michael@0 492 "pop {r7}\n"
michael@0 493 :
michael@0 494 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
michael@0 495 : "r0", "r1", "r2");
michael@0 496 }
michael@0 497 #elif WTF_CPU_SPARC
michael@0 498 static void cacheFlush(void* code, size_t size)
michael@0 499 {
michael@0 500 sync_instruction_memory((caddr_t)code, size);
michael@0 501 }
michael@0 502 #endif
michael@0 503
michael@0 504 private:
michael@0 505
michael@0 506 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
michael@0 507 static void reprotectRegion(void*, size_t, ProtectionSetting);
michael@0 508 #endif
michael@0 509
michael@0 510 // These are strong references; they keep pools alive.
michael@0 511 static const size_t maxSmallPools = 4;
michael@0 512 typedef js::Vector<ExecutablePool *, maxSmallPools, js::SystemAllocPolicy> SmallExecPoolVector;
michael@0 513 SmallExecPoolVector m_smallPools;
michael@0 514
michael@0 515 // All live pools are recorded here, just for stats purposes. These are
michael@0 516 // weak references; they don't keep pools alive. When a pool is destroyed
michael@0 517 // its reference is removed from m_pools.
michael@0 518 typedef js::HashSet<ExecutablePool *, js::DefaultHasher<ExecutablePool *>, js::SystemAllocPolicy>
michael@0 519 ExecPoolHashSet;
michael@0 520 ExecPoolHashSet m_pools; // All pools, just for stats purposes.
michael@0 521
michael@0 522 static size_t determinePageSize();
michael@0 523 };
michael@0 524
michael@0 525 }
michael@0 526
michael@0 527 #endif // ENABLE(ASSEMBLER)
michael@0 528
michael@0 529 #endif /* assembler_jit_ExecutableAllocator_h */

mercurial