Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
26 #ifndef assembler_jit_ExecutableAllocator_h
27 #define assembler_jit_ExecutableAllocator_h
29 #include <stddef.h> // for ptrdiff_t
30 #include <limits>
32 #include "jsalloc.h"
34 #include "assembler/wtf/Platform.h"
35 #include "jit/arm/Simulator-arm.h"
36 #include "js/HashTable.h"
37 #include "js/Vector.h"
39 #if WTF_CPU_SPARC
40 #ifdef linux // bugzilla 502369
41 static void sync_instruction_memory(caddr_t v, u_int len)
42 {
43 caddr_t end = v + len;
44 caddr_t p = v;
45 while (p < end) {
46 asm("flush %0" : : "r" (p));
47 p += 32;
48 }
49 }
50 #else
51 extern "C" void sync_instruction_memory(caddr_t v, u_int len);
52 #endif
53 #endif
55 #if WTF_OS_IOS
56 #include <libkern/OSCacheControl.h>
57 #include <sys/mman.h>
58 #endif
60 #if WTF_OS_SYMBIAN
61 #include <e32std.h>
62 #endif
64 #if WTF_CPU_MIPS && WTF_OS_LINUX
65 #include <sys/cachectl.h>
66 #endif
68 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
69 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
70 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
71 #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
72 #else
73 #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
74 #endif
76 namespace JSC {
77 enum CodeKind { ION_CODE = 0, BASELINE_CODE, REGEXP_CODE, OTHER_CODE };
78 }
80 #if ENABLE_ASSEMBLER
82 //#define DEBUG_STRESS_JSC_ALLOCATOR
84 namespace JS {
85 struct CodeSizes;
86 }
88 namespace JSC {
90 class ExecutableAllocator;
92 // These are reference-counted. A new one starts with a count of 1.
93 class ExecutablePool {
95 friend class ExecutableAllocator;
96 private:
97 struct Allocation {
98 char* pages;
99 size_t size;
100 #if WTF_OS_SYMBIAN
101 RChunk* chunk;
102 #endif
103 };
105 ExecutableAllocator* m_allocator;
106 char* m_freePtr;
107 char* m_end;
108 Allocation m_allocation;
110 // Reference count for automatic reclamation.
111 unsigned m_refCount;
113 // Number of bytes currently used for Method and Regexp JIT code.
114 size_t m_ionCodeBytes;
115 size_t m_baselineCodeBytes;
116 size_t m_regexpCodeBytes;
117 size_t m_otherCodeBytes;
119 public:
120 void release(bool willDestroy = false)
121 {
122 JS_ASSERT(m_refCount != 0);
123 // XXX: disabled, see bug 654820.
124 //JS_ASSERT_IF(willDestroy, m_refCount == 1);
125 if (--m_refCount == 0)
126 js_delete(this);
127 }
128 void release(size_t n, CodeKind kind)
129 {
130 switch (kind) {
131 case ION_CODE:
132 m_ionCodeBytes -= n;
133 MOZ_ASSERT(m_ionCodeBytes < m_allocation.size); // Shouldn't underflow.
134 break;
135 case BASELINE_CODE:
136 m_baselineCodeBytes -= n;
137 MOZ_ASSERT(m_baselineCodeBytes < m_allocation.size);
138 break;
139 case REGEXP_CODE:
140 m_regexpCodeBytes -= n;
141 MOZ_ASSERT(m_regexpCodeBytes < m_allocation.size);
142 break;
143 case OTHER_CODE:
144 m_otherCodeBytes -= n;
145 MOZ_ASSERT(m_otherCodeBytes < m_allocation.size);
146 break;
147 default:
148 MOZ_ASSUME_UNREACHABLE("bad code kind");
149 }
151 release();
152 }
154 ExecutablePool(ExecutableAllocator* allocator, Allocation a)
155 : m_allocator(allocator), m_freePtr(a.pages), m_end(m_freePtr + a.size), m_allocation(a),
156 m_refCount(1), m_ionCodeBytes(0), m_baselineCodeBytes(0), m_regexpCodeBytes(0),
157 m_otherCodeBytes(0)
158 { }
160 ~ExecutablePool();
162 private:
163 // It should be impossible for us to roll over, because only small
164 // pools have multiple holders, and they have one holder per chunk
165 // of generated code, and they only hold 16KB or so of code.
166 void addRef()
167 {
168 JS_ASSERT(m_refCount);
169 ++m_refCount;
170 }
172 void* alloc(size_t n, CodeKind kind)
173 {
174 JS_ASSERT(n <= available());
175 void *result = m_freePtr;
176 m_freePtr += n;
178 switch (kind) {
179 case ION_CODE: m_ionCodeBytes += n; break;
180 case BASELINE_CODE: m_baselineCodeBytes += n; break;
181 case REGEXP_CODE: m_regexpCodeBytes += n; break;
182 case OTHER_CODE: m_otherCodeBytes += n; break;
183 default: MOZ_ASSUME_UNREACHABLE("bad code kind");
184 }
185 return result;
186 }
188 size_t available() const {
189 JS_ASSERT(m_end >= m_freePtr);
190 return m_end - m_freePtr;
191 }
193 void toggleAllCodeAsAccessible(bool accessible);
195 bool codeContains(char* address) {
196 return address >= m_allocation.pages && address < m_freePtr;
197 }
198 };
200 class ExecutableAllocator {
201 typedef void (*DestroyCallback)(void* addr, size_t size);
202 enum ProtectionSetting { Writable, Executable };
203 DestroyCallback destroyCallback;
205 public:
206 ExecutableAllocator()
207 : destroyCallback(NULL)
208 {
209 if (!pageSize) {
210 pageSize = determinePageSize();
211 /*
212 * On Windows, VirtualAlloc effectively allocates in 64K chunks.
213 * (Technically, it allocates in page chunks, but the starting
214 * address is always a multiple of 64K, so each allocation uses up
215 * 64K of address space.) So a size less than that would be
216 * pointless. But it turns out that 64KB is a reasonable size for
217 * all platforms. (This assumes 4KB pages.)
218 */
219 largeAllocSize = pageSize * 16;
220 }
222 JS_ASSERT(m_smallPools.empty());
223 }
225 ~ExecutableAllocator()
226 {
227 for (size_t i = 0; i < m_smallPools.length(); i++)
228 m_smallPools[i]->release(/* willDestroy = */true);
230 // If this asserts we have a pool leak.
231 JS_ASSERT_IF(m_pools.initialized(), m_pools.empty());
232 }
234 void purge() {
235 for (size_t i = 0; i < m_smallPools.length(); i++)
236 m_smallPools[i]->release();
238 m_smallPools.clear();
239 }
241 // alloc() returns a pointer to some memory, and also (by reference) a
242 // pointer to reference-counted pool. The caller owns a reference to the
243 // pool; i.e. alloc() increments the count before returning the object.
244 void* alloc(size_t n, ExecutablePool** poolp, CodeKind type)
245 {
246 // Caller must ensure 'n' is word-size aligned. If all allocations are
247 // of word sized quantities, then all subsequent allocations will be
248 // aligned.
249 JS_ASSERT(roundUpAllocationSize(n, sizeof(void*)) == n);
251 if (n == OVERSIZE_ALLOCATION) {
252 *poolp = NULL;
253 return NULL;
254 }
256 *poolp = poolForSize(n);
257 if (!*poolp)
258 return NULL;
260 // This alloc is infallible because poolForSize() just obtained
261 // (found, or created if necessary) a pool that had enough space.
262 void *result = (*poolp)->alloc(n, type);
263 JS_ASSERT(result);
264 return result;
265 }
267 void releasePoolPages(ExecutablePool *pool) {
268 JS_ASSERT(pool->m_allocation.pages);
269 if (destroyCallback)
270 destroyCallback(pool->m_allocation.pages, pool->m_allocation.size);
271 systemRelease(pool->m_allocation);
272 JS_ASSERT(m_pools.initialized());
273 m_pools.remove(m_pools.lookup(pool)); // this asserts if |pool| is not in m_pools
274 }
276 void addSizeOfCode(JS::CodeSizes *sizes) const;
277 void toggleAllCodeAsAccessible(bool accessible);
278 bool codeContains(char* address);
280 void setDestroyCallback(DestroyCallback destroyCallback) {
281 this->destroyCallback = destroyCallback;
282 }
284 private:
285 static size_t pageSize;
286 static size_t largeAllocSize;
287 #if WTF_OS_WINDOWS
288 static uint64_t rngSeed;
289 #endif
291 static const size_t OVERSIZE_ALLOCATION = size_t(-1);
293 static size_t roundUpAllocationSize(size_t request, size_t granularity)
294 {
295 // Something included via windows.h defines a macro with this name,
296 // which causes the function below to fail to compile.
297 #ifdef _MSC_VER
298 # undef max
299 #endif
301 if ((std::numeric_limits<size_t>::max() - granularity) <= request)
302 return OVERSIZE_ALLOCATION;
304 // Round up to next page boundary
305 size_t size = request + (granularity - 1);
306 size = size & ~(granularity - 1);
307 JS_ASSERT(size >= request);
308 return size;
309 }
311 // On OOM, this will return an Allocation where pages is NULL.
312 ExecutablePool::Allocation systemAlloc(size_t n);
313 static void systemRelease(const ExecutablePool::Allocation& alloc);
314 void *computeRandomAllocationAddress();
316 ExecutablePool* createPool(size_t n)
317 {
318 size_t allocSize = roundUpAllocationSize(n, pageSize);
319 if (allocSize == OVERSIZE_ALLOCATION)
320 return NULL;
322 if (!m_pools.initialized() && !m_pools.init())
323 return NULL;
325 #ifdef DEBUG_STRESS_JSC_ALLOCATOR
326 ExecutablePool::Allocation a = systemAlloc(size_t(4294967291));
327 #else
328 ExecutablePool::Allocation a = systemAlloc(allocSize);
329 #endif
330 if (!a.pages)
331 return NULL;
333 ExecutablePool *pool = js_new<ExecutablePool>(this, a);
334 if (!pool) {
335 systemRelease(a);
336 return NULL;
337 }
338 m_pools.put(pool);
339 return pool;
340 }
342 public:
343 ExecutablePool* poolForSize(size_t n)
344 {
345 #ifndef DEBUG_STRESS_JSC_ALLOCATOR
346 // Try to fit in an existing small allocator. Use the pool with the
347 // least available space that is big enough (best-fit). This is the
348 // best strategy because (a) it maximizes the chance of the next
349 // allocation fitting in a small pool, and (b) it minimizes the
350 // potential waste when a small pool is next abandoned.
351 ExecutablePool *minPool = NULL;
352 for (size_t i = 0; i < m_smallPools.length(); i++) {
353 ExecutablePool *pool = m_smallPools[i];
354 if (n <= pool->available() && (!minPool || pool->available() < minPool->available()))
355 minPool = pool;
356 }
357 if (minPool) {
358 minPool->addRef();
359 return minPool;
360 }
361 #endif
363 // If the request is large, we just provide a unshared allocator
364 if (n > largeAllocSize)
365 return createPool(n);
367 // Create a new allocator
368 ExecutablePool* pool = createPool(largeAllocSize);
369 if (!pool)
370 return NULL;
371 // At this point, local |pool| is the owner.
373 if (m_smallPools.length() < maxSmallPools) {
374 // We haven't hit the maximum number of live pools; add the new pool.
375 m_smallPools.append(pool);
376 pool->addRef();
377 } else {
378 // Find the pool with the least space.
379 int iMin = 0;
380 for (size_t i = 1; i < m_smallPools.length(); i++)
381 if (m_smallPools[i]->available() <
382 m_smallPools[iMin]->available())
383 {
384 iMin = i;
385 }
387 // If the new allocator will result in more free space than the small
388 // pool with the least space, then we will use it instead
389 ExecutablePool *minPool = m_smallPools[iMin];
390 if ((pool->available() - n) > minPool->available()) {
391 minPool->release();
392 m_smallPools[iMin] = pool;
393 pool->addRef();
394 }
395 }
397 // Pass ownership to the caller.
398 return pool;
399 }
401 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
402 static void makeWritable(void* start, size_t size)
403 {
404 reprotectRegion(start, size, Writable);
405 }
407 static void makeExecutable(void* start, size_t size)
408 {
409 reprotectRegion(start, size, Executable);
410 }
411 #else
412 static void makeWritable(void*, size_t) {}
413 static void makeExecutable(void*, size_t) {}
414 #endif
417 #if WTF_CPU_X86 || WTF_CPU_X86_64
418 static void cacheFlush(void*, size_t)
419 {
420 }
421 #elif defined(JS_ARM_SIMULATOR)
422 static void cacheFlush(void *code, size_t size)
423 {
424 js::jit::Simulator::FlushICache(code, size);
425 }
426 #elif WTF_CPU_MIPS
427 static void cacheFlush(void* code, size_t size)
428 {
429 #if WTF_COMPILER_GCC && (GCC_VERSION >= 40300)
430 #if WTF_MIPS_ISA_REV(2) && (GCC_VERSION < 40403)
431 int lineSize;
432 asm("rdhwr %0, $1" : "=r" (lineSize));
433 //
434 // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
435 // mips_expand_synci_loop that may execute synci one more time.
436 // "start" points to the first byte of the cache line.
437 // "end" points to the last byte of the line before the last cache line.
438 // Because size is always a multiple of 4, this is safe to set
439 // "end" to the last byte.
440 //
441 intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
442 intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
443 __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
444 #else
445 intptr_t end = reinterpret_cast<intptr_t>(code) + size;
446 __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
447 #endif
448 #else
449 _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
450 #endif
451 }
452 #elif WTF_CPU_ARM && WTF_OS_IOS
453 static void cacheFlush(void* code, size_t size)
454 {
455 sys_dcache_flush(code, size);
456 sys_icache_invalidate(code, size);
457 }
458 #elif WTF_CPU_ARM_THUMB2 && WTF_IOS
459 static void cacheFlush(void* code, size_t size)
460 {
461 asm volatile (
462 "push {r7}\n"
463 "mov r0, %0\n"
464 "mov r1, %1\n"
465 "movw r7, #0x2\n"
466 "movt r7, #0xf\n"
467 "movs r2, #0x0\n"
468 "svc 0x0\n"
469 "pop {r7}\n"
470 :
471 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
472 : "r0", "r1", "r2");
473 }
474 #elif WTF_OS_SYMBIAN
475 static void cacheFlush(void* code, size_t size)
476 {
477 User::IMB_Range(code, static_cast<char*>(code) + size);
478 }
479 #elif WTF_CPU_ARM_TRADITIONAL && WTF_OS_LINUX && WTF_COMPILER_RVCT
480 static __asm void cacheFlush(void* code, size_t size);
481 #elif WTF_CPU_ARM_TRADITIONAL && (WTF_OS_LINUX || WTF_OS_ANDROID) && WTF_COMPILER_GCC
482 static void cacheFlush(void* code, size_t size)
483 {
484 asm volatile (
485 "push {r7}\n"
486 "mov r0, %0\n"
487 "mov r1, %1\n"
488 "mov r7, #0xf0000\n"
489 "add r7, r7, #0x2\n"
490 "mov r2, #0x0\n"
491 "svc 0x0\n"
492 "pop {r7}\n"
493 :
494 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
495 : "r0", "r1", "r2");
496 }
497 #elif WTF_CPU_SPARC
498 static void cacheFlush(void* code, size_t size)
499 {
500 sync_instruction_memory((caddr_t)code, size);
501 }
502 #endif
504 private:
506 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
507 static void reprotectRegion(void*, size_t, ProtectionSetting);
508 #endif
510 // These are strong references; they keep pools alive.
511 static const size_t maxSmallPools = 4;
512 typedef js::Vector<ExecutablePool *, maxSmallPools, js::SystemAllocPolicy> SmallExecPoolVector;
513 SmallExecPoolVector m_smallPools;
515 // All live pools are recorded here, just for stats purposes. These are
516 // weak references; they don't keep pools alive. When a pool is destroyed
517 // its reference is removed from m_pools.
518 typedef js::HashSet<ExecutablePool *, js::DefaultHasher<ExecutablePool *>, js::SystemAllocPolicy>
519 ExecPoolHashSet;
520 ExecPoolHashSet m_pools; // All pools, just for stats purposes.
522 static size_t determinePageSize();
523 };
525 }
527 #endif // ENABLE(ASSEMBLER)
529 #endif /* assembler_jit_ExecutableAllocator_h */