js/src/jit/JitCompartment.h

changeset 0
6474c204b198
equal deleted inserted replaced
-1:000000000000 0:dd33ca917fc1
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef jit_JitCompartment_h
8 #define jit_JitCompartment_h
9
10 #ifdef JS_ION
11
12 #include "mozilla/MemoryReporting.h"
13
14 #include "jsweakcache.h"
15
16 #include "jit/CompileInfo.h"
17 #include "jit/IonCode.h"
18 #include "jit/IonFrames.h"
19 #include "jit/shared/Assembler-shared.h"
20 #include "js/Value.h"
21 #include "vm/Stack.h"
22
23 namespace js {
24 namespace jit {
25
26 class FrameSizeClass;
27
28 enum EnterJitType {
29 EnterJitBaseline = 0,
30 EnterJitOptimized = 1
31 };
32
33 struct EnterJitData
34 {
35 explicit EnterJitData(JSContext *cx)
36 : scopeChain(cx),
37 result(cx)
38 {}
39
40 uint8_t *jitcode;
41 InterpreterFrame *osrFrame;
42
43 void *calleeToken;
44
45 Value *maxArgv;
46 unsigned maxArgc;
47 unsigned numActualArgs;
48 unsigned osrNumStackValues;
49
50 RootedObject scopeChain;
51 RootedValue result;
52
53 bool constructing;
54 };
55
56 typedef void (*EnterJitCode)(void *code, unsigned argc, Value *argv, InterpreterFrame *fp,
57 CalleeToken calleeToken, JSObject *scopeChain,
58 size_t numStackValues, Value *vp);
59
60 class IonBuilder;
61
62 // ICStubSpace is an abstraction for allocation policy and storage for stub data.
63 // There are two kinds of stubs: optimized stubs and fallback stubs (the latter
64 // also includes stubs that can make non-tail calls that can GC).
65 //
66 // Optimized stubs are allocated per-compartment and are always purged when
67 // JIT-code is discarded. Fallback stubs are allocated per BaselineScript and
68 // are only destroyed when the BaselineScript is destroyed.
69 class ICStubSpace
70 {
71 protected:
72 LifoAlloc allocator_;
73
74 explicit ICStubSpace(size_t chunkSize)
75 : allocator_(chunkSize)
76 {}
77
78 public:
79 inline void *alloc(size_t size) {
80 return allocator_.alloc(size);
81 }
82
83 JS_DECLARE_NEW_METHODS(allocate, alloc, inline)
84
85 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
86 return allocator_.sizeOfExcludingThis(mallocSizeOf);
87 }
88 };
89
90 // Space for optimized stubs. Every JitCompartment has a single
91 // OptimizedICStubSpace.
92 struct OptimizedICStubSpace : public ICStubSpace
93 {
94 static const size_t STUB_DEFAULT_CHUNK_SIZE = 4 * 1024;
95
96 public:
97 OptimizedICStubSpace()
98 : ICStubSpace(STUB_DEFAULT_CHUNK_SIZE)
99 {}
100
101 void free() {
102 allocator_.freeAll();
103 }
104 };
105
106 // Space for fallback stubs. Every BaselineScript has a
107 // FallbackICStubSpace.
108 struct FallbackICStubSpace : public ICStubSpace
109 {
110 static const size_t STUB_DEFAULT_CHUNK_SIZE = 256;
111
112 public:
113 FallbackICStubSpace()
114 : ICStubSpace(STUB_DEFAULT_CHUNK_SIZE)
115 {}
116
117 inline void adoptFrom(FallbackICStubSpace *other) {
118 allocator_.steal(&(other->allocator_));
119 }
120 };
121
122 // Information about a loop backedge in the runtime, which can be set to
123 // point to either the loop header or to an OOL interrupt checking stub,
124 // if signal handlers are being used to implement interrupts.
125 class PatchableBackedge : public InlineListNode<PatchableBackedge>
126 {
127 friend class JitRuntime;
128
129 CodeLocationJump backedge;
130 CodeLocationLabel loopHeader;
131 CodeLocationLabel interruptCheck;
132
133 public:
134 PatchableBackedge(CodeLocationJump backedge,
135 CodeLocationLabel loopHeader,
136 CodeLocationLabel interruptCheck)
137 : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck)
138 {}
139 };
140
141 class JitRuntime
142 {
143 friend class JitCompartment;
144
145 // Executable allocator for all code except the main code in an IonScript.
146 // Shared with the runtime.
147 JSC::ExecutableAllocator *execAlloc_;
148
149 // Executable allocator used for allocating the main code in an IonScript.
150 // All accesses on this allocator must be protected by the runtime's
151 // interrupt lock, as the executable memory may be protected() when
152 // requesting an interrupt to force a fault in the Ion code and avoid the
153 // need for explicit interrupt checks.
154 JSC::ExecutableAllocator *ionAlloc_;
155
156 // Shared post-exception-handler tail
157 JitCode *exceptionTail_;
158
159 // Shared post-bailout-handler tail.
160 JitCode *bailoutTail_;
161
162 // Trampoline for entering JIT code. Contains OSR prologue.
163 JitCode *enterJIT_;
164
165 // Trampoline for entering baseline JIT code.
166 JitCode *enterBaselineJIT_;
167
168 // Vector mapping frame class sizes to bailout tables.
169 Vector<JitCode*, 4, SystemAllocPolicy> bailoutTables_;
170
171 // Generic bailout table; used if the bailout table overflows.
172 JitCode *bailoutHandler_;
173
174 // Argument-rectifying thunk, in the case of insufficient arguments passed
175 // to a function call site.
176 JitCode *argumentsRectifier_;
177 void *argumentsRectifierReturnAddr_;
178
179 // Arguments-rectifying thunk which loads |parallelIon| instead of |ion|.
180 JitCode *parallelArgumentsRectifier_;
181
182 // Thunk that invalides an (Ion compiled) caller on the Ion stack.
183 JitCode *invalidator_;
184
185 // Thunk that calls the GC pre barrier.
186 JitCode *valuePreBarrier_;
187 JitCode *shapePreBarrier_;
188
189 // Thunk used by the debugger for breakpoint and step mode.
190 JitCode *debugTrapHandler_;
191
192 // Stub used to inline the ForkJoinGetSlice intrinsic.
193 JitCode *forkJoinGetSliceStub_;
194
195 // Thunk used to fix up on-stack recompile of baseline scripts.
196 JitCode *baselineDebugModeOSRHandler_;
197 void *baselineDebugModeOSRHandlerNoFrameRegPopAddr_;
198
199 // Map VMFunction addresses to the JitCode of the wrapper.
200 typedef WeakCache<const VMFunction *, JitCode *> VMWrapperMap;
201 VMWrapperMap *functionWrappers_;
202
203 // Buffer for OSR from baseline to Ion. To avoid holding on to this for
204 // too long, it's also freed in JitCompartment::mark and in EnterBaseline
205 // (after returning from JIT code).
206 uint8_t *osrTempData_;
207
208 // Whether all Ion code in the runtime is protected, and will fault if it
209 // is accessed.
210 bool ionCodeProtected_;
211
212 // If signal handlers are installed, this contains all loop backedges for
213 // IonScripts in the runtime.
214 InlineList<PatchableBackedge> backedgeList_;
215
216 private:
217 JitCode *generateExceptionTailStub(JSContext *cx);
218 JitCode *generateBailoutTailStub(JSContext *cx);
219 JitCode *generateEnterJIT(JSContext *cx, EnterJitType type);
220 JitCode *generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void **returnAddrOut);
221 JitCode *generateBailoutTable(JSContext *cx, uint32_t frameClass);
222 JitCode *generateBailoutHandler(JSContext *cx);
223 JitCode *generateInvalidator(JSContext *cx);
224 JitCode *generatePreBarrier(JSContext *cx, MIRType type);
225 JitCode *generateDebugTrapHandler(JSContext *cx);
226 JitCode *generateForkJoinGetSliceStub(JSContext *cx);
227 JitCode *generateBaselineDebugModeOSRHandler(JSContext *cx, uint32_t *noFrameRegPopOffsetOut);
228 JitCode *generateVMWrapper(JSContext *cx, const VMFunction &f);
229
230 JSC::ExecutableAllocator *createIonAlloc(JSContext *cx);
231
232 public:
233 JitRuntime();
234 ~JitRuntime();
235 bool initialize(JSContext *cx);
236
237 uint8_t *allocateOsrTempData(size_t size);
238 void freeOsrTempData();
239
240 static void Mark(JSTracer *trc);
241
242 JSC::ExecutableAllocator *execAlloc() const {
243 return execAlloc_;
244 }
245
246 JSC::ExecutableAllocator *getIonAlloc(JSContext *cx) {
247 JS_ASSERT(cx->runtime()->currentThreadOwnsInterruptLock());
248 return ionAlloc_ ? ionAlloc_ : createIonAlloc(cx);
249 }
250
251 JSC::ExecutableAllocator *ionAlloc(JSRuntime *rt) {
252 JS_ASSERT(rt->currentThreadOwnsInterruptLock());
253 return ionAlloc_;
254 }
255
256 bool ionCodeProtected() {
257 return ionCodeProtected_;
258 }
259
260 void addPatchableBackedge(PatchableBackedge *backedge) {
261 backedgeList_.pushFront(backedge);
262 }
263 void removePatchableBackedge(PatchableBackedge *backedge) {
264 backedgeList_.remove(backedge);
265 }
266
267 enum BackedgeTarget {
268 BackedgeLoopHeader,
269 BackedgeInterruptCheck
270 };
271
272 void ensureIonCodeProtected(JSRuntime *rt);
273 void ensureIonCodeAccessible(JSRuntime *rt);
274 void patchIonBackedges(JSRuntime *rt, BackedgeTarget target);
275
276 bool handleAccessViolation(JSRuntime *rt, void *faultingAddress);
277
278 JitCode *getVMWrapper(const VMFunction &f) const;
279 JitCode *debugTrapHandler(JSContext *cx);
280 JitCode *getBaselineDebugModeOSRHandler(JSContext *cx);
281 void *getBaselineDebugModeOSRHandlerAddress(JSContext *cx, bool popFrameReg);
282
283 JitCode *getGenericBailoutHandler() const {
284 return bailoutHandler_;
285 }
286
287 JitCode *getExceptionTail() const {
288 return exceptionTail_;
289 }
290
291 JitCode *getBailoutTail() const {
292 return bailoutTail_;
293 }
294
295 JitCode *getBailoutTable(const FrameSizeClass &frameClass) const;
296
297 JitCode *getArgumentsRectifier(ExecutionMode mode) const {
298 switch (mode) {
299 case SequentialExecution: return argumentsRectifier_;
300 case ParallelExecution: return parallelArgumentsRectifier_;
301 default: MOZ_ASSUME_UNREACHABLE("No such execution mode");
302 }
303 }
304
305 void *getArgumentsRectifierReturnAddr() const {
306 return argumentsRectifierReturnAddr_;
307 }
308
309 JitCode *getInvalidationThunk() const {
310 return invalidator_;
311 }
312
313 EnterJitCode enterIon() const {
314 return enterJIT_->as<EnterJitCode>();
315 }
316
317 EnterJitCode enterBaseline() const {
318 return enterBaselineJIT_->as<EnterJitCode>();
319 }
320
321 JitCode *valuePreBarrier() const {
322 return valuePreBarrier_;
323 }
324
325 JitCode *shapePreBarrier() const {
326 return shapePreBarrier_;
327 }
328
329 bool ensureForkJoinGetSliceStubExists(JSContext *cx);
330 JitCode *forkJoinGetSliceStub() const {
331 return forkJoinGetSliceStub_;
332 }
333 };
334
335 class JitZone
336 {
337 // Allocated space for optimized baseline stubs.
338 OptimizedICStubSpace optimizedStubSpace_;
339
340 public:
341 OptimizedICStubSpace *optimizedStubSpace() {
342 return &optimizedStubSpace_;
343 }
344 };
345
346 class JitCompartment
347 {
348 friend class JitActivation;
349
350 // Map ICStub keys to ICStub shared code objects.
351 typedef WeakValueCache<uint32_t, ReadBarriered<JitCode> > ICStubCodeMap;
352 ICStubCodeMap *stubCodes_;
353
354 // Keep track of offset into various baseline stubs' code at return
355 // point from called script.
356 void *baselineCallReturnFromIonAddr_;
357 void *baselineGetPropReturnFromIonAddr_;
358 void *baselineSetPropReturnFromIonAddr_;
359
360 // Same as above, but is used for return from a baseline stub. This is
361 // used for recompiles of on-stack baseline scripts (e.g., for debug
362 // mode).
363 void *baselineCallReturnFromStubAddr_;
364 void *baselineGetPropReturnFromStubAddr_;
365 void *baselineSetPropReturnFromStubAddr_;
366
367 // Stub to concatenate two strings inline. Note that it can't be
368 // stored in JitRuntime because masm.newGCString bakes in zone-specific
369 // pointers. This has to be a weak pointer to avoid keeping the whole
370 // compartment alive.
371 ReadBarriered<JitCode> stringConcatStub_;
372 ReadBarriered<JitCode> parallelStringConcatStub_;
373
374 // Set of JSScripts invoked by ForkJoin (i.e. the entry script). These
375 // scripts are marked if their respective parallel IonScripts' age is less
376 // than a certain amount. See IonScript::parallelAge_.
377 typedef HashSet<EncapsulatedPtrScript> ScriptSet;
378 ScriptSet *activeParallelEntryScripts_;
379
380 JitCode *generateStringConcatStub(JSContext *cx, ExecutionMode mode);
381
382 public:
383 JitCode *getStubCode(uint32_t key) {
384 ICStubCodeMap::AddPtr p = stubCodes_->lookupForAdd(key);
385 if (p)
386 return p->value();
387 return nullptr;
388 }
389 bool putStubCode(uint32_t key, Handle<JitCode *> stubCode) {
390 // Make sure to do a lookupForAdd(key) and then insert into that slot, because
391 // that way if stubCode gets moved due to a GC caused by lookupForAdd, then
392 // we still write the correct pointer.
393 JS_ASSERT(!stubCodes_->has(key));
394 ICStubCodeMap::AddPtr p = stubCodes_->lookupForAdd(key);
395 return stubCodes_->add(p, key, stubCode.get());
396 }
397 void initBaselineCallReturnFromIonAddr(void *addr) {
398 JS_ASSERT(baselineCallReturnFromIonAddr_ == nullptr);
399 baselineCallReturnFromIonAddr_ = addr;
400 }
401 void *baselineCallReturnFromIonAddr() {
402 JS_ASSERT(baselineCallReturnFromIonAddr_ != nullptr);
403 return baselineCallReturnFromIonAddr_;
404 }
405 void initBaselineGetPropReturnFromIonAddr(void *addr) {
406 JS_ASSERT(baselineGetPropReturnFromIonAddr_ == nullptr);
407 baselineGetPropReturnFromIonAddr_ = addr;
408 }
409 void *baselineGetPropReturnFromIonAddr() {
410 JS_ASSERT(baselineGetPropReturnFromIonAddr_ != nullptr);
411 return baselineGetPropReturnFromIonAddr_;
412 }
413 void initBaselineSetPropReturnFromIonAddr(void *addr) {
414 JS_ASSERT(baselineSetPropReturnFromIonAddr_ == nullptr);
415 baselineSetPropReturnFromIonAddr_ = addr;
416 }
417 void *baselineSetPropReturnFromIonAddr() {
418 JS_ASSERT(baselineSetPropReturnFromIonAddr_ != nullptr);
419 return baselineSetPropReturnFromIonAddr_;
420 }
421
422 void initBaselineCallReturnFromStubAddr(void *addr) {
423 MOZ_ASSERT(baselineCallReturnFromStubAddr_ == nullptr);
424 baselineCallReturnFromStubAddr_ = addr;;
425 }
426 void *baselineCallReturnFromStubAddr() {
427 JS_ASSERT(baselineCallReturnFromStubAddr_ != nullptr);
428 return baselineCallReturnFromStubAddr_;
429 }
430 void initBaselineGetPropReturnFromStubAddr(void *addr) {
431 JS_ASSERT(baselineGetPropReturnFromStubAddr_ == nullptr);
432 baselineGetPropReturnFromStubAddr_ = addr;
433 }
434 void *baselineGetPropReturnFromStubAddr() {
435 JS_ASSERT(baselineGetPropReturnFromStubAddr_ != nullptr);
436 return baselineGetPropReturnFromStubAddr_;
437 }
438 void initBaselineSetPropReturnFromStubAddr(void *addr) {
439 JS_ASSERT(baselineSetPropReturnFromStubAddr_ == nullptr);
440 baselineSetPropReturnFromStubAddr_ = addr;
441 }
442 void *baselineSetPropReturnFromStubAddr() {
443 JS_ASSERT(baselineSetPropReturnFromStubAddr_ != nullptr);
444 return baselineSetPropReturnFromStubAddr_;
445 }
446
447 bool notifyOfActiveParallelEntryScript(JSContext *cx, HandleScript script);
448
449 void toggleBaselineStubBarriers(bool enabled);
450
451 JSC::ExecutableAllocator *createIonAlloc();
452
453 public:
454 JitCompartment();
455 ~JitCompartment();
456
457 bool initialize(JSContext *cx);
458
459 // Initialize code stubs only used by Ion, not Baseline.
460 bool ensureIonStubsExist(JSContext *cx);
461
462 void mark(JSTracer *trc, JSCompartment *compartment);
463 void sweep(FreeOp *fop);
464
465 JitCode *stringConcatStub(ExecutionMode mode) const {
466 switch (mode) {
467 case SequentialExecution: return stringConcatStub_;
468 case ParallelExecution: return parallelStringConcatStub_;
469 default: MOZ_ASSUME_UNREACHABLE("No such execution mode");
470 }
471 }
472 };
473
474 // Called from JSCompartment::discardJitCode().
475 void InvalidateAll(FreeOp *fop, JS::Zone *zone);
476 template <ExecutionMode mode>
477 void FinishInvalidation(FreeOp *fop, JSScript *script);
478
479 inline bool
480 ShouldPreserveParallelJITCode(JSRuntime *rt, JSScript *script, bool increase = false)
481 {
482 IonScript *parallelIon = script->parallelIonScript();
483 uint32_t age = increase ? parallelIon->increaseParallelAge() : parallelIon->parallelAge();
484 return age < jit::IonScript::MAX_PARALLEL_AGE && !rt->gcShouldCleanUpEverything;
485 }
486
487 // On windows systems, really large frames need to be incrementally touched.
488 // The following constant defines the minimum increment of the touch.
489 #ifdef XP_WIN
490 const unsigned WINDOWS_BIG_FRAME_TOUCH_INCREMENT = 4096 - 1;
491 #endif
492
493 } // namespace jit
494 } // namespace js
495
496 #endif // JS_ION
497
498 #endif /* jit_JitCompartment_h */

mercurial