Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef vm_Runtime_h
8 #define vm_Runtime_h
10 #include "mozilla/Atomics.h"
11 #include "mozilla/Attributes.h"
12 #include "mozilla/LinkedList.h"
13 #include "mozilla/MemoryReporting.h"
14 #include "mozilla/PodOperations.h"
15 #include "mozilla/Scoped.h"
16 #include "mozilla/ThreadLocal.h"
18 #include <setjmp.h>
20 #include "jsatom.h"
21 #include "jsclist.h"
22 #include "jsgc.h"
23 #ifdef DEBUG
24 # include "jsproxy.h"
25 #endif
26 #include "jsscript.h"
28 #include "ds/FixedSizeHash.h"
29 #include "frontend/ParseMaps.h"
30 #ifdef JSGC_GENERATIONAL
31 # include "gc/Nursery.h"
32 #endif
33 #include "gc/Statistics.h"
34 #ifdef JSGC_GENERATIONAL
35 # include "gc/StoreBuffer.h"
36 #endif
37 #include "gc/Tracer.h"
38 #ifdef XP_MACOSX
39 # include "jit/AsmJSSignalHandlers.h"
40 #endif
41 #include "js/HashTable.h"
42 #include "js/Vector.h"
43 #include "vm/CommonPropertyNames.h"
44 #include "vm/DateTime.h"
45 #include "vm/MallocProvider.h"
46 #include "vm/SPSProfiler.h"
47 #include "vm/Stack.h"
48 #include "vm/ThreadPool.h"
50 #ifdef _MSC_VER
51 #pragma warning(push)
52 #pragma warning(disable:4100) /* Silence unreferenced formal parameter warnings */
53 #endif
55 namespace js {
57 class PerThreadData;
58 class ThreadSafeContext;
59 class AutoKeepAtoms;
60 #ifdef JS_TRACE_LOGGING
61 class TraceLogger;
62 #endif
64 /* Thread Local Storage slot for storing the runtime for a thread. */
65 extern mozilla::ThreadLocal<PerThreadData*> TlsPerThreadData;
67 } // namespace js
69 struct DtoaState;
71 extern void
72 js_ReportOutOfMemory(js::ThreadSafeContext *cx);
74 extern void
75 js_ReportAllocationOverflow(js::ThreadSafeContext *cx);
77 extern void
78 js_ReportOverRecursed(js::ThreadSafeContext *cx);
80 namespace JSC { class ExecutableAllocator; }
82 namespace WTF { class BumpPointerAllocator; }
84 namespace js {
86 typedef Rooted<JSLinearString*> RootedLinearString;
88 class Activation;
89 class ActivationIterator;
90 class AsmJSActivation;
91 class MathCache;
93 namespace jit {
94 class JitRuntime;
95 class JitActivation;
96 struct PcScriptCache;
97 class Simulator;
98 class SimulatorRuntime;
99 class AutoFlushICache;
100 }
102 /*
103 * GetSrcNote cache to avoid O(n^2) growth in finding a source note for a
104 * given pc in a script. We use the script->code pointer to tag the cache,
105 * instead of the script address itself, so that source notes are always found
106 * by offset from the bytecode with which they were generated.
107 */
108 struct GSNCache {
109 typedef HashMap<jsbytecode *,
110 jssrcnote *,
111 PointerHasher<jsbytecode *, 0>,
112 SystemAllocPolicy> Map;
114 jsbytecode *code;
115 Map map;
117 GSNCache() : code(nullptr) { }
119 void purge();
120 };
122 /*
123 * ScopeCoordinateName cache to avoid O(n^2) growth in finding the name
124 * associated with a given aliasedvar operation.
125 */
126 struct ScopeCoordinateNameCache {
127 typedef HashMap<uint32_t,
128 jsid,
129 DefaultHasher<uint32_t>,
130 SystemAllocPolicy> Map;
132 Shape *shape;
133 Map map;
135 ScopeCoordinateNameCache() : shape(nullptr) {}
136 void purge();
137 };
139 typedef Vector<ScriptAndCounts, 0, SystemAllocPolicy> ScriptAndCountsVector;
141 struct ConservativeGCData
142 {
143 /*
144 * The GC scans conservatively between ThreadData::nativeStackBase and
145 * nativeStackTop unless the latter is nullptr.
146 */
147 uintptr_t *nativeStackTop;
149 union {
150 jmp_buf jmpbuf;
151 uintptr_t words[JS_HOWMANY(sizeof(jmp_buf), sizeof(uintptr_t))];
152 } registerSnapshot;
154 ConservativeGCData() {
155 mozilla::PodZero(this);
156 }
158 ~ConservativeGCData() {
159 #ifdef JS_THREADSAFE
160 /*
161 * The conservative GC scanner should be disabled when the thread leaves
162 * the last request.
163 */
164 JS_ASSERT(!hasStackToScan());
165 #endif
166 }
168 MOZ_NEVER_INLINE void recordStackTop();
170 #ifdef JS_THREADSAFE
171 void updateForRequestEnd() {
172 nativeStackTop = nullptr;
173 }
174 #endif
176 bool hasStackToScan() const {
177 return !!nativeStackTop;
178 }
179 };
181 struct EvalCacheEntry
182 {
183 JSScript *script;
184 JSScript *callerScript;
185 jsbytecode *pc;
186 };
188 struct EvalCacheLookup
189 {
190 EvalCacheLookup(JSContext *cx) : str(cx), callerScript(cx) {}
191 RootedLinearString str;
192 RootedScript callerScript;
193 JSVersion version;
194 jsbytecode *pc;
195 };
197 struct EvalCacheHashPolicy
198 {
199 typedef EvalCacheLookup Lookup;
201 static HashNumber hash(const Lookup &l);
202 static bool match(const EvalCacheEntry &entry, const EvalCacheLookup &l);
203 };
205 typedef HashSet<EvalCacheEntry, EvalCacheHashPolicy, SystemAllocPolicy> EvalCache;
207 struct LazyScriptHashPolicy
208 {
209 struct Lookup {
210 JSContext *cx;
211 LazyScript *lazy;
213 Lookup(JSContext *cx, LazyScript *lazy)
214 : cx(cx), lazy(lazy)
215 {}
216 };
218 static const size_t NumHashes = 3;
220 static void hash(const Lookup &lookup, HashNumber hashes[NumHashes]);
221 static bool match(JSScript *script, const Lookup &lookup);
223 // Alternate methods for use when removing scripts from the hash without an
224 // explicit LazyScript lookup.
225 static void hash(JSScript *script, HashNumber hashes[NumHashes]);
226 static bool match(JSScript *script, JSScript *lookup) { return script == lookup; }
228 static void clear(JSScript **pscript) { *pscript = nullptr; }
229 static bool isCleared(JSScript *script) { return !script; }
230 };
232 typedef FixedSizeHashSet<JSScript *, LazyScriptHashPolicy, 769> LazyScriptCache;
234 class PropertyIteratorObject;
236 class NativeIterCache
237 {
238 static const size_t SIZE = size_t(1) << 8;
240 /* Cached native iterators. */
241 PropertyIteratorObject *data[SIZE];
243 static size_t getIndex(uint32_t key) {
244 return size_t(key) % SIZE;
245 }
247 public:
248 /* Native iterator most recently started. */
249 PropertyIteratorObject *last;
251 NativeIterCache()
252 : last(nullptr)
253 {
254 mozilla::PodArrayZero(data);
255 }
257 void purge() {
258 last = nullptr;
259 mozilla::PodArrayZero(data);
260 }
262 PropertyIteratorObject *get(uint32_t key) const {
263 return data[getIndex(key)];
264 }
266 void set(uint32_t key, PropertyIteratorObject *iterobj) {
267 data[getIndex(key)] = iterobj;
268 }
269 };
271 /*
272 * Cache for speeding up repetitive creation of objects in the VM.
273 * When an object is created which matches the criteria in the 'key' section
274 * below, an entry is filled with the resulting object.
275 */
276 class NewObjectCache
277 {
278 /* Statically asserted to be equal to sizeof(JSObject_Slots16) */
279 static const unsigned MAX_OBJ_SIZE = 4 * sizeof(void*) + 16 * sizeof(Value);
281 static void staticAsserts() {
282 JS_STATIC_ASSERT(NewObjectCache::MAX_OBJ_SIZE == sizeof(JSObject_Slots16));
283 JS_STATIC_ASSERT(gc::FINALIZE_OBJECT_LAST == gc::FINALIZE_OBJECT16_BACKGROUND);
284 }
286 struct Entry
287 {
288 /* Class of the constructed object. */
289 const Class *clasp;
291 /*
292 * Key with one of three possible values:
293 *
294 * - Global for the object. The object must have a standard class for
295 * which the global's prototype can be determined, and the object's
296 * parent will be the global.
297 *
298 * - Prototype for the object (cannot be global). The object's parent
299 * will be the prototype's parent.
300 *
301 * - Type for the object. The object's parent will be the type's
302 * prototype's parent.
303 */
304 gc::Cell *key;
306 /* Allocation kind for the constructed object. */
307 gc::AllocKind kind;
309 /* Number of bytes to copy from the template object. */
310 uint32_t nbytes;
312 /*
313 * Template object to copy from, with the initial values of fields,
314 * fixed slots (undefined) and private data (nullptr).
315 */
316 char templateObject[MAX_OBJ_SIZE];
317 };
319 Entry entries[41]; // TODO: reconsider size
321 public:
323 typedef int EntryIndex;
325 NewObjectCache() { mozilla::PodZero(this); }
326 void purge() { mozilla::PodZero(this); }
328 /* Remove any cached items keyed on moved objects. */
329 void clearNurseryObjects(JSRuntime *rt);
331 /*
332 * Get the entry index for the given lookup, return whether there was a hit
333 * on an existing entry.
334 */
335 inline bool lookupProto(const Class *clasp, JSObject *proto, gc::AllocKind kind, EntryIndex *pentry);
336 inline bool lookupGlobal(const Class *clasp, js::GlobalObject *global, gc::AllocKind kind,
337 EntryIndex *pentry);
339 bool lookupType(js::types::TypeObject *type, gc::AllocKind kind, EntryIndex *pentry) {
340 return lookup(type->clasp(), type, kind, pentry);
341 }
343 /*
344 * Return a new object from a cache hit produced by a lookup method, or
345 * nullptr if returning the object could possibly trigger GC (does not
346 * indicate failure).
347 */
348 template <AllowGC allowGC>
349 inline JSObject *newObjectFromHit(JSContext *cx, EntryIndex entry, js::gc::InitialHeap heap);
351 /* Fill an entry after a cache miss. */
352 void fillProto(EntryIndex entry, const Class *clasp, js::TaggedProto proto, gc::AllocKind kind, JSObject *obj);
354 inline void fillGlobal(EntryIndex entry, const Class *clasp, js::GlobalObject *global,
355 gc::AllocKind kind, JSObject *obj);
357 void fillType(EntryIndex entry, js::types::TypeObject *type, gc::AllocKind kind,
358 JSObject *obj)
359 {
360 JS_ASSERT(obj->type() == type);
361 return fill(entry, type->clasp(), type, kind, obj);
362 }
364 /* Invalidate any entries which might produce an object with shape/proto. */
365 void invalidateEntriesForShape(JSContext *cx, HandleShape shape, HandleObject proto);
367 private:
368 bool lookup(const Class *clasp, gc::Cell *key, gc::AllocKind kind, EntryIndex *pentry) {
369 uintptr_t hash = (uintptr_t(clasp) ^ uintptr_t(key)) + kind;
370 *pentry = hash % mozilla::ArrayLength(entries);
372 Entry *entry = &entries[*pentry];
374 /* N.B. Lookups with the same clasp/key but different kinds map to different entries. */
375 return entry->clasp == clasp && entry->key == key;
376 }
378 void fill(EntryIndex entry_, const Class *clasp, gc::Cell *key, gc::AllocKind kind, JSObject *obj) {
379 JS_ASSERT(unsigned(entry_) < mozilla::ArrayLength(entries));
380 Entry *entry = &entries[entry_];
382 JS_ASSERT(!obj->hasDynamicSlots() && !obj->hasDynamicElements());
384 entry->clasp = clasp;
385 entry->key = key;
386 entry->kind = kind;
388 entry->nbytes = gc::Arena::thingSize(kind);
389 js_memcpy(&entry->templateObject, obj, entry->nbytes);
390 }
392 static void copyCachedToObject(JSObject *dst, JSObject *src, gc::AllocKind kind) {
393 js_memcpy(dst, src, gc::Arena::thingSize(kind));
394 #ifdef JSGC_GENERATIONAL
395 Shape::writeBarrierPost(dst->shape_, &dst->shape_);
396 types::TypeObject::writeBarrierPost(dst->type_, &dst->type_);
397 #endif
398 }
399 };
401 /*
402 * A FreeOp can do one thing: free memory. For convenience, it has delete_
403 * convenience methods that also call destructors.
404 *
405 * FreeOp is passed to finalizers and other sweep-phase hooks so that we do not
406 * need to pass a JSContext to those hooks.
407 */
408 class FreeOp : public JSFreeOp {
409 bool shouldFreeLater_;
411 public:
412 static FreeOp *get(JSFreeOp *fop) {
413 return static_cast<FreeOp *>(fop);
414 }
416 FreeOp(JSRuntime *rt, bool shouldFreeLater)
417 : JSFreeOp(rt),
418 shouldFreeLater_(shouldFreeLater)
419 {
420 }
422 bool shouldFreeLater() const {
423 return shouldFreeLater_;
424 }
426 inline void free_(void *p);
428 template <class T>
429 inline void delete_(T *p) {
430 if (p) {
431 p->~T();
432 free_(p);
433 }
434 }
436 static void staticAsserts() {
437 /*
438 * Check that JSFreeOp is the first base class for FreeOp and we can
439 * reinterpret a pointer to JSFreeOp as a pointer to FreeOp without
440 * any offset adjustments. JSClass::finalize <-> Class::finalize depends
441 * on this.
442 */
443 JS_STATIC_ASSERT(offsetof(FreeOp, shouldFreeLater_) == sizeof(JSFreeOp));
444 }
445 };
447 } /* namespace js */
449 namespace JS {
450 struct RuntimeSizes;
451 }
453 /* Various built-in or commonly-used names pinned on first context. */
454 struct JSAtomState
455 {
456 #define PROPERTYNAME_FIELD(idpart, id, text) js::FixedHeapPtr<js::PropertyName> id;
457 FOR_EACH_COMMON_PROPERTYNAME(PROPERTYNAME_FIELD)
458 #undef PROPERTYNAME_FIELD
459 #define PROPERTYNAME_FIELD(name, code, init, clasp) js::FixedHeapPtr<js::PropertyName> name;
460 JS_FOR_EACH_PROTOTYPE(PROPERTYNAME_FIELD)
461 #undef PROPERTYNAME_FIELD
462 };
464 namespace js {
466 #define NAME_OFFSET(name) offsetof(JSAtomState, name)
468 inline HandlePropertyName
469 AtomStateOffsetToName(const JSAtomState &atomState, size_t offset)
470 {
471 return *(js::FixedHeapPtr<js::PropertyName>*)((char*)&atomState + offset);
472 }
474 // There are several coarse locks in the enum below. These may be either
475 // per-runtime or per-process. When acquiring more than one of these locks,
476 // the acquisition must be done in the order below to avoid deadlocks.
477 enum RuntimeLock {
478 ExclusiveAccessLock,
479 WorkerThreadStateLock,
480 InterruptLock,
481 GCLock
482 };
484 #ifdef DEBUG
485 void AssertCurrentThreadCanLock(RuntimeLock which);
486 #else
487 inline void AssertCurrentThreadCanLock(RuntimeLock which) {}
488 #endif
490 /*
491 * Encapsulates portions of the runtime/context that are tied to a
492 * single active thread. Instances of this structure can occur for
493 * the main thread as |JSRuntime::mainThread|, for select operations
494 * performed off thread, such as parsing, and for Parallel JS worker
495 * threads.
496 */
497 class PerThreadData : public PerThreadDataFriendFields
498 {
499 /*
500 * Backpointer to the full shared JSRuntime* with which this
501 * thread is associated. This is private because accessing the
502 * fields of this runtime can provoke race conditions, so the
503 * intention is that access will be mediated through safe
504 * functions like |runtimeFromMainThread| and |associatedWith()| below.
505 */
506 JSRuntime *runtime_;
508 public:
509 /*
510 * We save all conservative scanned roots in this vector so that
511 * conservative scanning can be "replayed" deterministically. In DEBUG mode,
512 * this allows us to run a non-incremental GC after every incremental GC to
513 * ensure that no objects were missed.
514 */
515 #ifdef DEBUG
516 struct SavedGCRoot {
517 void *thing;
518 JSGCTraceKind kind;
520 SavedGCRoot(void *thing, JSGCTraceKind kind) : thing(thing), kind(kind) {}
521 };
522 js::Vector<SavedGCRoot, 0, js::SystemAllocPolicy> gcSavedRoots;
523 #endif
525 /*
526 * If Ion code is on the stack, and has called into C++, this will be
527 * aligned to an Ion exit frame.
528 */
529 uint8_t *ionTop;
531 /*
532 * The current JSContext when entering JIT code. This field may only be used
533 * from JIT code and C++ directly called by JIT code (otherwise it may refer
534 * to the wrong JSContext).
535 */
536 JSContext *jitJSContext;
538 /*
539 * The stack limit checked by JIT code. This stack limit may be temporarily
540 * set to null to force JIT code to exit (e.g., for the operation callback).
541 */
542 uintptr_t jitStackLimit;
544 inline void setJitStackLimit(uintptr_t limit);
546 #ifdef JS_TRACE_LOGGING
547 TraceLogger *traceLogger;
548 #endif
550 /*
551 * asm.js maintains a stack of AsmJSModule activations (see AsmJS.h). This
552 * stack is used by JSRuntime::requestInterrupt to stop long-running asm.js
553 * without requiring dynamic polling operations in the generated
554 * code. Since requestInterrupt may run on a separate thread than the
555 * JSRuntime's owner thread all reads/writes must be synchronized (by
556 * rt->interruptLock).
557 */
558 private:
559 friend class js::Activation;
560 friend class js::ActivationIterator;
561 friend class js::jit::JitActivation;
562 friend class js::AsmJSActivation;
563 #ifdef DEBUG
564 friend void js::AssertCurrentThreadCanLock(RuntimeLock which);
565 #endif
567 /*
568 * Points to the most recent activation running on the thread.
569 * See Activation comment in vm/Stack.h.
570 */
571 js::Activation *activation_;
573 /* See AsmJSActivation comment. Protected by rt->interruptLock. */
574 js::AsmJSActivation *asmJSActivationStack_;
576 /* Pointer to the current AutoFlushICache. */
577 js::jit::AutoFlushICache *autoFlushICache_;
579 #ifdef JS_ARM_SIMULATOR
580 js::jit::Simulator *simulator_;
581 uintptr_t simulatorStackLimit_;
582 #endif
584 public:
585 js::Activation *const *addressOfActivation() const {
586 return &activation_;
587 }
588 static unsigned offsetOfAsmJSActivationStackReadOnly() {
589 return offsetof(PerThreadData, asmJSActivationStack_);
590 }
591 static unsigned offsetOfActivation() {
592 return offsetof(PerThreadData, activation_);
593 }
595 js::AsmJSActivation *asmJSActivationStackFromAnyThread() const {
596 return asmJSActivationStack_;
597 }
598 js::AsmJSActivation *asmJSActivationStackFromOwnerThread() const {
599 return asmJSActivationStack_;
600 }
602 js::Activation *activation() const {
603 return activation_;
604 }
606 /* State used by jsdtoa.cpp. */
607 DtoaState *dtoaState;
609 /*
610 * When this flag is non-zero, any attempt to GC will be skipped. It is used
611 * to suppress GC when reporting an OOM (see js_ReportOutOfMemory) and in
612 * debugging facilities that cannot tolerate a GC and would rather OOM
613 * immediately, such as utilities exposed to GDB. Setting this flag is
614 * extremely dangerous and should only be used when in an OOM situation or
615 * in non-exposed debugging facilities.
616 */
617 int32_t suppressGC;
619 // Number of active bytecode compilation on this thread.
620 unsigned activeCompilations;
622 PerThreadData(JSRuntime *runtime);
623 ~PerThreadData();
625 bool init();
627 bool associatedWith(const JSRuntime *rt) { return runtime_ == rt; }
628 inline JSRuntime *runtimeFromMainThread();
629 inline JSRuntime *runtimeIfOnOwnerThread();
631 inline bool exclusiveThreadsPresent();
632 inline void addActiveCompilation();
633 inline void removeActiveCompilation();
635 // For threads which may be associated with different runtimes, depending
636 // on the work they are doing.
637 class MOZ_STACK_CLASS AutoEnterRuntime
638 {
639 PerThreadData *pt;
641 public:
642 AutoEnterRuntime(PerThreadData *pt, JSRuntime *rt)
643 : pt(pt)
644 {
645 JS_ASSERT(!pt->runtime_);
646 pt->runtime_ = rt;
647 }
649 ~AutoEnterRuntime() {
650 pt->runtime_ = nullptr;
651 }
652 };
654 js::jit::AutoFlushICache *autoFlushICache() const;
655 void setAutoFlushICache(js::jit::AutoFlushICache *afc);
657 #ifdef JS_ARM_SIMULATOR
658 js::jit::Simulator *simulator() const;
659 void setSimulator(js::jit::Simulator *sim);
660 js::jit::SimulatorRuntime *simulatorRuntime() const;
661 uintptr_t *addressOfSimulatorStackLimit();
662 #endif
663 };
665 namespace gc {
666 class MarkingValidator;
667 } // namespace gc
669 typedef Vector<JS::Zone *, 4, SystemAllocPolicy> ZoneVector;
671 class AutoLockForExclusiveAccess;
673 void RecomputeStackLimit(JSRuntime *rt, StackKind kind);
675 } // namespace js
677 struct JSRuntime : public JS::shadow::Runtime,
678 public js::MallocProvider<JSRuntime>
679 {
680 /*
681 * Per-thread data for the main thread that is associated with
682 * this JSRuntime, as opposed to any worker threads used in
683 * parallel sections. See definition of |PerThreadData| struct
684 * above for more details.
685 *
686 * NB: This field is statically asserted to be at offset
687 * sizeof(js::shadow::Runtime). See
688 * PerThreadDataFriendFields::getMainThread.
689 */
690 js::PerThreadData mainThread;
692 /*
693 * If non-null, another runtime guaranteed to outlive this one and whose
694 * permanent data may be used by this one where possible.
695 */
696 JSRuntime *parentRuntime;
698 /*
699 * If true, we've been asked to call the interrupt callback as soon as
700 * possible.
701 */
702 mozilla::Atomic<bool, mozilla::Relaxed> interrupt;
704 #if defined(JS_THREADSAFE) && defined(JS_ION)
705 /*
706 * If non-zero, ForkJoin should service an interrupt. This is a separate
707 * flag from |interrupt| because we cannot use the mprotect trick with PJS
708 * code and ignore the TriggerCallbackAnyThreadDontStopIon trigger.
709 */
710 mozilla::Atomic<bool, mozilla::Relaxed> interruptPar;
711 #endif
713 /* Set when handling a signal for a thread associated with this runtime. */
714 bool handlingSignal;
716 JSInterruptCallback interruptCallback;
718 #ifdef DEBUG
719 void assertCanLock(js::RuntimeLock which);
720 #else
721 void assertCanLock(js::RuntimeLock which) {}
722 #endif
724 private:
725 /*
726 * Lock taken when triggering an interrupt from another thread.
727 * Protects all data that is touched in this process.
728 */
729 #ifdef JS_THREADSAFE
730 PRLock *interruptLock;
731 PRThread *interruptLockOwner;
732 #else
733 bool interruptLockTaken;
734 #endif // JS_THREADSAFE
735 public:
737 class AutoLockForInterrupt {
738 JSRuntime *rt;
739 public:
740 AutoLockForInterrupt(JSRuntime *rt MOZ_GUARD_OBJECT_NOTIFIER_PARAM) : rt(rt) {
741 MOZ_GUARD_OBJECT_NOTIFIER_INIT;
742 rt->assertCanLock(js::InterruptLock);
743 #ifdef JS_THREADSAFE
744 PR_Lock(rt->interruptLock);
745 rt->interruptLockOwner = PR_GetCurrentThread();
746 #else
747 rt->interruptLockTaken = true;
748 #endif // JS_THREADSAFE
749 }
750 ~AutoLockForInterrupt() {
751 JS_ASSERT(rt->currentThreadOwnsInterruptLock());
752 #ifdef JS_THREADSAFE
753 rt->interruptLockOwner = nullptr;
754 PR_Unlock(rt->interruptLock);
755 #else
756 rt->interruptLockTaken = false;
757 #endif // JS_THREADSAFE
758 }
760 MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
761 };
763 bool currentThreadOwnsInterruptLock() {
764 #if defined(JS_THREADSAFE)
765 return interruptLockOwner == PR_GetCurrentThread();
766 #else
767 return interruptLockTaken;
768 #endif
769 }
771 #ifdef JS_THREADSAFE
773 private:
774 /*
775 * Lock taken when using per-runtime or per-zone data that could otherwise
776 * be accessed simultaneously by both the main thread and another thread
777 * with an ExclusiveContext.
778 *
779 * Locking this only occurs if there is actually a thread other than the
780 * main thread with an ExclusiveContext which could access such data.
781 */
782 PRLock *exclusiveAccessLock;
783 mozilla::DebugOnly<PRThread *> exclusiveAccessOwner;
784 mozilla::DebugOnly<bool> mainThreadHasExclusiveAccess;
786 /* Number of non-main threads with an ExclusiveContext. */
787 size_t numExclusiveThreads;
789 friend class js::AutoLockForExclusiveAccess;
791 public:
792 void setUsedByExclusiveThread(JS::Zone *zone);
793 void clearUsedByExclusiveThread(JS::Zone *zone);
795 #endif // JS_THREADSAFE
797 #ifdef DEBUG
798 bool currentThreadHasExclusiveAccess() {
799 #ifdef JS_THREADSAFE
800 return (!numExclusiveThreads && mainThreadHasExclusiveAccess) ||
801 exclusiveAccessOwner == PR_GetCurrentThread();
802 #else
803 return true;
804 #endif
805 }
806 #endif // DEBUG
808 bool exclusiveThreadsPresent() const {
809 #ifdef JS_THREADSAFE
810 return numExclusiveThreads > 0;
811 #else
812 return false;
813 #endif
814 }
816 /* Embedders can use this zone however they wish. */
817 JS::Zone *systemZone;
819 /* List of compartments and zones (protected by the GC lock). */
820 js::ZoneVector zones;
822 /* How many compartments there are across all zones. */
823 size_t numCompartments;
825 /* Locale-specific callbacks for string conversion. */
826 JSLocaleCallbacks *localeCallbacks;
828 /* Default locale for Internationalization API */
829 char *defaultLocale;
831 /* Default JSVersion. */
832 JSVersion defaultVersion_;
834 #ifdef JS_THREADSAFE
835 private:
836 /* See comment for JS_AbortIfWrongThread in jsapi.h. */
837 void *ownerThread_;
838 friend bool js::CurrentThreadCanAccessRuntime(JSRuntime *rt);
839 public:
840 #endif
842 /* Temporary arena pool used while compiling and decompiling. */
843 static const size_t TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE = 4 * 1024;
844 js::LifoAlloc tempLifoAlloc;
846 /*
847 * Free LIFO blocks are transferred to this allocator before being freed on
848 * the background GC thread.
849 */
850 js::LifoAlloc freeLifoAlloc;
852 private:
853 /*
854 * Both of these allocators are used for regular expression code which is shared at the
855 * thread-data level.
856 */
857 JSC::ExecutableAllocator *execAlloc_;
858 WTF::BumpPointerAllocator *bumpAlloc_;
859 js::jit::JitRuntime *jitRuntime_;
861 /*
862 * Self-hosting state cloned on demand into other compartments. Shared with the parent
863 * runtime if there is one.
864 */
865 JSObject *selfHostingGlobal_;
867 /* Space for interpreter frames. */
868 js::InterpreterStack interpreterStack_;
870 JSC::ExecutableAllocator *createExecutableAllocator(JSContext *cx);
871 WTF::BumpPointerAllocator *createBumpPointerAllocator(JSContext *cx);
872 js::jit::JitRuntime *createJitRuntime(JSContext *cx);
874 public:
875 JSC::ExecutableAllocator *getExecAlloc(JSContext *cx) {
876 return execAlloc_ ? execAlloc_ : createExecutableAllocator(cx);
877 }
878 JSC::ExecutableAllocator &execAlloc() {
879 JS_ASSERT(execAlloc_);
880 return *execAlloc_;
881 }
882 JSC::ExecutableAllocator *maybeExecAlloc() {
883 return execAlloc_;
884 }
885 WTF::BumpPointerAllocator *getBumpPointerAllocator(JSContext *cx) {
886 return bumpAlloc_ ? bumpAlloc_ : createBumpPointerAllocator(cx);
887 }
888 js::jit::JitRuntime *getJitRuntime(JSContext *cx) {
889 return jitRuntime_ ? jitRuntime_ : createJitRuntime(cx);
890 }
891 js::jit::JitRuntime *jitRuntime() const {
892 return jitRuntime_;
893 }
894 bool hasJitRuntime() const {
895 return !!jitRuntime_;
896 }
897 js::InterpreterStack &interpreterStack() {
898 return interpreterStack_;
899 }
901 //-------------------------------------------------------------------------
902 // Self-hosting support
903 //-------------------------------------------------------------------------
905 bool initSelfHosting(JSContext *cx);
906 void finishSelfHosting();
907 void markSelfHostingGlobal(JSTracer *trc);
908 bool isSelfHostingGlobal(JSObject *global) {
909 return global == selfHostingGlobal_;
910 }
911 bool isSelfHostingCompartment(JSCompartment *comp);
912 bool cloneSelfHostedFunctionScript(JSContext *cx, js::Handle<js::PropertyName*> name,
913 js::Handle<JSFunction*> targetFun);
914 bool cloneSelfHostedValue(JSContext *cx, js::Handle<js::PropertyName*> name,
915 js::MutableHandleValue vp);
917 //-------------------------------------------------------------------------
918 // Locale information
919 //-------------------------------------------------------------------------
921 /*
922 * Set the default locale for the ECMAScript Internationalization API
923 * (Intl.Collator, Intl.NumberFormat, Intl.DateTimeFormat).
924 * Note that the Internationalization API encourages clients to
925 * specify their own locales.
926 * The locale string remains owned by the caller.
927 */
928 bool setDefaultLocale(const char *locale);
930 /* Reset the default locale to OS defaults. */
931 void resetDefaultLocale();
933 /* Gets current default locale. String remains owned by context. */
934 const char *getDefaultLocale();
936 JSVersion defaultVersion() { return defaultVersion_; }
937 void setDefaultVersion(JSVersion v) { defaultVersion_ = v; }
939 /* Base address of the native stack for the current thread. */
940 uintptr_t nativeStackBase;
942 /* The native stack size limit that runtime should not exceed. */
943 size_t nativeStackQuota[js::StackKindCount];
945 /* Context create/destroy callback. */
946 JSContextCallback cxCallback;
947 void *cxCallbackData;
949 /* Compartment destroy callback. */
950 JSDestroyCompartmentCallback destroyCompartmentCallback;
952 /* Zone destroy callback. */
953 JSZoneCallback destroyZoneCallback;
955 /* Zone sweep callback. */
956 JSZoneCallback sweepZoneCallback;
958 /* Call this to get the name of a compartment. */
959 JSCompartmentNameCallback compartmentNameCallback;
961 js::ActivityCallback activityCallback;
962 void *activityCallbackArg;
963 void triggerActivityCallback(bool active);
965 #ifdef JS_THREADSAFE
966 /* The request depth for this thread. */
967 unsigned requestDepth;
969 # ifdef DEBUG
970 unsigned checkRequestDepth;
971 # endif
972 #endif
974 #ifdef DEBUG
975 /*
976 * To help embedders enforce their invariants, we allow them to specify in
977 * advance which JSContext should be passed to JSAPI calls. If this is set
978 * to a non-null value, the assertSameCompartment machinery does double-
979 * duty (in debug builds) to verify that it matches the cx being used.
980 */
981 JSContext *activeContext;
982 #endif
984 /* Garbage collector state, used by jsgc.c. */
986 /* Garbase collector state has been sucessfully initialized. */
987 bool gcInitialized;
989 /*
990 * Set of all GC chunks with at least one allocated thing. The
991 * conservative GC uses it to quickly check if a possible GC thing points
992 * into an allocated chunk.
993 */
994 js::GCChunkSet gcChunkSet;
996 /*
997 * Doubly-linked lists of chunks from user and system compartments. The GC
998 * allocates its arenas from the corresponding list and when all arenas
999 * in the list head are taken, then the chunk is removed from the list.
1000 * During the GC when all arenas in a chunk become free, that chunk is
1001 * removed from the list and scheduled for release.
1002 */
1003 js::gc::Chunk *gcSystemAvailableChunkListHead;
1004 js::gc::Chunk *gcUserAvailableChunkListHead;
1005 js::gc::ChunkPool gcChunkPool;
1007 js::RootedValueMap gcRootsHash;
1009 /* This is updated by both the main and GC helper threads. */
1010 mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcBytes;
1012 size_t gcMaxBytes;
1013 size_t gcMaxMallocBytes;
1015 /*
1016 * Number of the committed arenas in all GC chunks including empty chunks.
1017 */
1018 mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcNumArenasFreeCommitted;
1019 js::GCMarker gcMarker;
1020 void *gcVerifyPreData;
1021 void *gcVerifyPostData;
1022 bool gcChunkAllocationSinceLastGC;
1023 int64_t gcNextFullGCTime;
1024 int64_t gcLastGCTime;
1025 int64_t gcJitReleaseTime;
1026 private:
1027 JSGCMode gcMode_;
1029 public:
1030 JSGCMode gcMode() const { return gcMode_; }
1031 void setGCMode(JSGCMode mode) {
1032 gcMode_ = mode;
1033 gcMarker.setGCMode(mode);
1034 }
1036 size_t gcAllocationThreshold;
1037 bool gcHighFrequencyGC;
1038 uint64_t gcHighFrequencyTimeThreshold;
1039 uint64_t gcHighFrequencyLowLimitBytes;
1040 uint64_t gcHighFrequencyHighLimitBytes;
1041 double gcHighFrequencyHeapGrowthMax;
1042 double gcHighFrequencyHeapGrowthMin;
1043 double gcLowFrequencyHeapGrowth;
1044 bool gcDynamicHeapGrowth;
1045 bool gcDynamicMarkSlice;
1046 uint64_t gcDecommitThreshold;
1048 /* During shutdown, the GC needs to clean up every possible object. */
1049 bool gcShouldCleanUpEverything;
1051 /*
1052 * The gray bits can become invalid if UnmarkGray overflows the stack. A
1053 * full GC will reset this bit, since it fills in all the gray bits.
1054 */
1055 bool gcGrayBitsValid;
1057 /*
1058 * These flags must be kept separate so that a thread requesting a
1059 * compartment GC doesn't cancel another thread's concurrent request for a
1060 * full GC.
1061 */
1062 volatile uintptr_t gcIsNeeded;
1064 js::gcstats::Statistics gcStats;
1066 /* Incremented on every GC slice. */
1067 uint64_t gcNumber;
1069 /* The gcNumber at the time of the most recent GC's first slice. */
1070 uint64_t gcStartNumber;
1072 /* Whether the currently running GC can finish in multiple slices. */
1073 bool gcIsIncremental;
1075 /* Whether all compartments are being collected in first GC slice. */
1076 bool gcIsFull;
1078 /* The reason that an interrupt-triggered GC should be called. */
1079 JS::gcreason::Reason gcTriggerReason;
1081 /*
1082 * If this is true, all marked objects must belong to a compartment being
1083 * GCed. This is used to look for compartment bugs.
1084 */
1085 bool gcStrictCompartmentChecking;
1087 #ifdef DEBUG
1088 /*
1089 * If this is 0, all cross-compartment proxies must be registered in the
1090 * wrapper map. This checking must be disabled temporarily while creating
1091 * new wrappers. When non-zero, this records the recursion depth of wrapper
1092 * creation.
1093 */
1094 uintptr_t gcDisableStrictProxyCheckingCount;
1095 #else
1096 uintptr_t unused1;
1097 #endif
1099 /*
1100 * The current incremental GC phase. This is also used internally in
1101 * non-incremental GC.
1102 */
1103 js::gc::State gcIncrementalState;
1105 /* Indicates that the last incremental slice exhausted the mark stack. */
1106 bool gcLastMarkSlice;
1108 /* Whether any sweeping will take place in the separate GC helper thread. */
1109 bool gcSweepOnBackgroundThread;
1111 /* Whether any black->gray edges were found during marking. */
1112 bool gcFoundBlackGrayEdges;
1114 /* List head of zones to be swept in the background. */
1115 JS::Zone *gcSweepingZones;
1117 /* Index of current zone group (for stats). */
1118 unsigned gcZoneGroupIndex;
1120 /*
1121 * Incremental sweep state.
1122 */
1123 JS::Zone *gcZoneGroups;
1124 JS::Zone *gcCurrentZoneGroup;
1125 int gcSweepPhase;
1126 JS::Zone *gcSweepZone;
1127 int gcSweepKindIndex;
1128 bool gcAbortSweepAfterCurrentGroup;
1130 /*
1131 * List head of arenas allocated during the sweep phase.
1132 */
1133 js::gc::ArenaHeader *gcArenasAllocatedDuringSweep;
1135 #ifdef DEBUG
1136 js::gc::MarkingValidator *gcMarkingValidator;
1137 #endif
1139 /*
1140 * Indicates that a GC slice has taken place in the middle of an animation
1141 * frame, rather than at the beginning. In this case, the next slice will be
1142 * delayed so that we don't get back-to-back slices.
1143 */
1144 volatile uintptr_t gcInterFrameGC;
1146 /* Default budget for incremental GC slice. See SliceBudget in jsgc.h. */
1147 int64_t gcSliceBudget;
1149 /*
1150 * We disable incremental GC if we encounter a js::Class with a trace hook
1151 * that does not implement write barriers.
1152 */
1153 bool gcIncrementalEnabled;
1155 /*
1156 * GGC can be enabled from the command line while testing.
1157 */
1158 unsigned gcGenerationalDisabled;
1160 /*
1161 * This is true if we are in the middle of a brain transplant (e.g.,
1162 * JS_TransplantObject) or some other operation that can manipulate
1163 * dead zones.
1164 */
1165 bool gcManipulatingDeadZones;
1167 /*
1168 * This field is incremented each time we mark an object inside a
1169 * zone with no incoming cross-compartment pointers. Typically if
1170 * this happens it signals that an incremental GC is marking too much
1171 * stuff. At various times we check this counter and, if it has changed, we
1172 * run an immediate, non-incremental GC to clean up the dead
1173 * zones. This should happen very rarely.
1174 */
1175 unsigned gcObjectsMarkedInDeadZones;
1177 bool gcPoke;
1179 volatile js::HeapState heapState;
1181 bool isHeapBusy() { return heapState != js::Idle; }
1182 bool isHeapMajorCollecting() { return heapState == js::MajorCollecting; }
1183 bool isHeapMinorCollecting() { return heapState == js::MinorCollecting; }
1184 bool isHeapCollecting() { return isHeapMajorCollecting() || isHeapMinorCollecting(); }
1186 #ifdef JSGC_GENERATIONAL
1187 js::Nursery gcNursery;
1188 js::gc::StoreBuffer gcStoreBuffer;
1189 #endif
1191 /*
1192 * These options control the zealousness of the GC. The fundamental values
1193 * are gcNextScheduled and gcDebugCompartmentGC. At every allocation,
1194 * gcNextScheduled is decremented. When it reaches zero, we do either a
1195 * full or a compartmental GC, based on gcDebugCompartmentGC.
1196 *
1197 * At this point, if gcZeal_ is one of the types that trigger periodic
1198 * collection, then gcNextScheduled is reset to the value of
1199 * gcZealFrequency. Otherwise, no additional GCs take place.
1200 *
1201 * You can control these values in several ways:
1202 * - Pass the -Z flag to the shell (see the usage info for details)
1203 * - Call gczeal() or schedulegc() from inside shell-executed JS code
1204 * (see the help for details)
1205 *
1206 * If gzZeal_ == 1 then we perform GCs in select places (during MaybeGC and
1207 * whenever a GC poke happens). This option is mainly useful to embedders.
1208 *
1209 * We use gcZeal_ == 4 to enable write barrier verification. See the comment
1210 * in jsgc.cpp for more information about this.
1211 *
1212 * gcZeal_ values from 8 to 10 periodically run different types of
1213 * incremental GC.
1214 */
1215 #ifdef JS_GC_ZEAL
1216 int gcZeal_;
1217 int gcZealFrequency;
1218 int gcNextScheduled;
1219 bool gcDeterministicOnly;
1220 int gcIncrementalLimit;
1222 js::Vector<JSObject *, 0, js::SystemAllocPolicy> gcSelectedForMarking;
1224 int gcZeal() { return gcZeal_; }
1226 bool upcomingZealousGC() {
1227 return gcNextScheduled == 1;
1228 }
1230 bool needZealousGC() {
1231 if (gcNextScheduled > 0 && --gcNextScheduled == 0) {
1232 if (gcZeal() == js::gc::ZealAllocValue ||
1233 gcZeal() == js::gc::ZealGenerationalGCValue ||
1234 (gcZeal() >= js::gc::ZealIncrementalRootsThenFinish &&
1235 gcZeal() <= js::gc::ZealIncrementalMultipleSlices))
1236 {
1237 gcNextScheduled = gcZealFrequency;
1238 }
1239 return true;
1240 }
1241 return false;
1242 }
1243 #else
1244 int gcZeal() { return 0; }
1245 bool upcomingZealousGC() { return false; }
1246 bool needZealousGC() { return false; }
1247 #endif
1249 bool gcValidate;
1250 bool gcFullCompartmentChecks;
1252 JSGCCallback gcCallback;
1253 JS::GCSliceCallback gcSliceCallback;
1254 JSFinalizeCallback gcFinalizeCallback;
1256 void *gcCallbackData;
1258 private:
1259 /*
1260 * Malloc counter to measure memory pressure for GC scheduling. It runs
1261 * from gcMaxMallocBytes down to zero.
1262 */
1263 mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> gcMallocBytes;
1265 /*
1266 * Whether a GC has been triggered as a result of gcMallocBytes falling
1267 * below zero.
1268 */
1269 mozilla::Atomic<bool, mozilla::ReleaseAcquire> gcMallocGCTriggered;
1271 #ifdef JS_ARM_SIMULATOR
1272 js::jit::SimulatorRuntime *simulatorRuntime_;
1273 #endif
1275 public:
1276 void setNeedsBarrier(bool needs) {
1277 needsBarrier_ = needs;
1278 }
1280 struct ExtraTracer {
1281 JSTraceDataOp op;
1282 void *data;
1284 ExtraTracer()
1285 : op(nullptr), data(nullptr)
1286 {}
1287 ExtraTracer(JSTraceDataOp op, void *data)
1288 : op(op), data(data)
1289 {}
1290 };
1292 #ifdef JS_ARM_SIMULATOR
1293 js::jit::SimulatorRuntime *simulatorRuntime() const;
1294 void setSimulatorRuntime(js::jit::SimulatorRuntime *srt);
1295 #endif
1297 /*
1298 * The trace operations to trace embedding-specific GC roots. One is for
1299 * tracing through black roots and the other is for tracing through gray
1300 * roots. The black/gray distinction is only relevant to the cycle
1301 * collector.
1302 */
1303 typedef js::Vector<ExtraTracer, 4, js::SystemAllocPolicy> ExtraTracerVector;
1304 ExtraTracerVector gcBlackRootTracers;
1305 ExtraTracer gcGrayRootTracer;
1307 /*
1308 * The GC can only safely decommit memory when the page size of the
1309 * running process matches the compiled arena size.
1310 */
1311 size_t gcSystemPageSize;
1313 /* The OS allocation granularity may not match the page size. */
1314 size_t gcSystemAllocGranularity;
1316 /* Strong references on scripts held for PCCount profiling API. */
1317 js::ScriptAndCountsVector *scriptAndCountsVector;
1319 /* Well-known numbers held for use by this runtime's contexts. */
1320 const js::Value NaNValue;
1321 const js::Value negativeInfinityValue;
1322 const js::Value positiveInfinityValue;
1324 js::PropertyName *emptyString;
1326 /* List of active contexts sharing this runtime. */
1327 mozilla::LinkedList<JSContext> contextList;
1329 bool hasContexts() const {
1330 return !contextList.isEmpty();
1331 }
1333 mozilla::ScopedDeletePtr<js::SourceHook> sourceHook;
1335 /* Per runtime debug hooks -- see js/OldDebugAPI.h. */
1336 JSDebugHooks debugHooks;
1338 /* If true, new compartments are initially in debug mode. */
1339 bool debugMode;
1341 /* SPS profiling metadata */
1342 js::SPSProfiler spsProfiler;
1344 /* If true, new scripts must be created with PC counter information. */
1345 bool profilingScripts;
1347 /* Always preserve JIT code during GCs, for testing. */
1348 bool alwaysPreserveCode;
1350 /* Had an out-of-memory error which did not populate an exception. */
1351 bool hadOutOfMemory;
1353 /* A context has been created on this runtime. */
1354 bool haveCreatedContext;
1356 /* Linked list of all Debugger objects in the runtime. */
1357 mozilla::LinkedList<js::Debugger> debuggerList;
1359 /*
1360 * Head of circular list of all enabled Debuggers that have
1361 * onNewGlobalObject handler methods established.
1362 */
1363 JSCList onNewGlobalObjectWatchers;
1365 /* Client opaque pointers */
1366 void *data;
1368 private:
1369 /* Synchronize GC heap access between main thread and GCHelperThread. */
1370 PRLock *gcLock;
1371 mozilla::DebugOnly<PRThread *> gcLockOwner;
1373 friend class js::GCHelperThread;
1374 public:
1376 void lockGC() {
1377 #ifdef JS_THREADSAFE
1378 assertCanLock(js::GCLock);
1379 PR_Lock(gcLock);
1380 JS_ASSERT(!gcLockOwner);
1381 #ifdef DEBUG
1382 gcLockOwner = PR_GetCurrentThread();
1383 #endif
1384 #endif
1385 }
1387 void unlockGC() {
1388 #ifdef JS_THREADSAFE
1389 JS_ASSERT(gcLockOwner == PR_GetCurrentThread());
1390 gcLockOwner = nullptr;
1391 PR_Unlock(gcLock);
1392 #endif
1393 }
1395 js::GCHelperThread gcHelperThread;
1397 #if defined(XP_MACOSX) && defined(JS_ION)
1398 js::AsmJSMachExceptionHandler asmJSMachExceptionHandler;
1399 #endif
1401 // Whether asm.js signal handlers have been installed and can be used for
1402 // performing interrupt checks in loops.
1403 private:
1404 bool signalHandlersInstalled_;
1405 public:
1406 bool signalHandlersInstalled() const {
1407 return signalHandlersInstalled_;
1408 }
1410 private:
1411 js::FreeOp defaultFreeOp_;
1413 public:
1414 js::FreeOp *defaultFreeOp() {
1415 return &defaultFreeOp_;
1416 }
1418 uint32_t debuggerMutations;
1420 const JSSecurityCallbacks *securityCallbacks;
1421 const js::DOMCallbacks *DOMcallbacks;
1422 JSDestroyPrincipalsOp destroyPrincipals;
1424 /* Structured data callbacks are runtime-wide. */
1425 const JSStructuredCloneCallbacks *structuredCloneCallbacks;
1427 /* Call this to accumulate telemetry data. */
1428 JSAccumulateTelemetryDataCallback telemetryCallback;
1430 /* AsmJSCache callbacks are runtime-wide. */
1431 JS::AsmJSCacheOps asmJSCacheOps;
1433 /*
1434 * The propertyRemovals counter is incremented for every JSObject::clear,
1435 * and for each JSObject::remove method call that frees a slot in the given
1436 * object. See js_NativeGet and js_NativeSet in jsobj.cpp.
1437 */
1438 uint32_t propertyRemovals;
1440 #if !EXPOSE_INTL_API
1441 /* Number localization, used by jsnum.cpp. */
1442 const char *thousandsSeparator;
1443 const char *decimalSeparator;
1444 const char *numGrouping;
1445 #endif
1447 private:
1448 js::MathCache *mathCache_;
1449 js::MathCache *createMathCache(JSContext *cx);
1450 public:
1451 js::MathCache *getMathCache(JSContext *cx) {
1452 return mathCache_ ? mathCache_ : createMathCache(cx);
1453 }
1454 js::MathCache *maybeGetMathCache() {
1455 return mathCache_;
1456 }
1458 js::GSNCache gsnCache;
1459 js::ScopeCoordinateNameCache scopeCoordinateNameCache;
1460 js::NewObjectCache newObjectCache;
1461 js::NativeIterCache nativeIterCache;
1462 js::SourceDataCache sourceDataCache;
1463 js::EvalCache evalCache;
1464 js::LazyScriptCache lazyScriptCache;
1466 js::DateTimeInfo dateTimeInfo;
1468 js::ConservativeGCData conservativeGC;
1470 // Pool of maps used during parse/emit. This may be modified by threads
1471 // with an ExclusiveContext and requires a lock. Active compilations
1472 // prevent the pool from being purged during GCs.
1473 private:
1474 js::frontend::ParseMapPool parseMapPool_;
1475 unsigned activeCompilations_;
1476 public:
1477 js::frontend::ParseMapPool &parseMapPool() {
1478 JS_ASSERT(currentThreadHasExclusiveAccess());
1479 return parseMapPool_;
1480 }
1481 bool hasActiveCompilations() {
1482 return activeCompilations_ != 0;
1483 }
1484 void addActiveCompilation() {
1485 JS_ASSERT(currentThreadHasExclusiveAccess());
1486 activeCompilations_++;
1487 }
1488 void removeActiveCompilation() {
1489 JS_ASSERT(currentThreadHasExclusiveAccess());
1490 activeCompilations_--;
1491 }
1493 // Count of AutoKeepAtoms instances on the main thread's stack. When any
1494 // instances exist, atoms in the runtime will not be collected. Threads
1495 // with an ExclusiveContext do not increment this value, but the presence
1496 // of any such threads also inhibits collection of atoms. We don't scan the
1497 // stacks of exclusive threads, so we need to avoid collecting their
1498 // objects in another way. The only GC thing pointers they have are to
1499 // their exclusive compartment (which is not collected) or to the atoms
1500 // compartment. Therefore, we avoid collecting the atoms compartment when
1501 // exclusive threads are running.
1502 private:
1503 unsigned keepAtoms_;
1504 friend class js::AutoKeepAtoms;
1505 public:
1506 bool keepAtoms() {
1507 JS_ASSERT(CurrentThreadCanAccessRuntime(this));
1508 return keepAtoms_ != 0 || exclusiveThreadsPresent();
1509 }
1511 private:
1512 const JSPrincipals *trustedPrincipals_;
1513 public:
1514 void setTrustedPrincipals(const JSPrincipals *p) { trustedPrincipals_ = p; }
1515 const JSPrincipals *trustedPrincipals() const { return trustedPrincipals_; }
1517 private:
1518 bool beingDestroyed_;
1519 public:
1520 bool isBeingDestroyed() const {
1521 return beingDestroyed_;
1522 }
1524 private:
1525 // Set of all atoms other than those in permanentAtoms and staticStrings.
1526 // This may be modified by threads with an ExclusiveContext and requires
1527 // a lock.
1528 js::AtomSet *atoms_;
1530 // Compartment and associated zone containing all atoms in the runtime,
1531 // as well as runtime wide IonCode stubs. The contents of this compartment
1532 // may be modified by threads with an ExclusiveContext and requires a lock.
1533 JSCompartment *atomsCompartment_;
1535 public:
1536 bool initializeAtoms(JSContext *cx);
1537 void finishAtoms();
1539 void sweepAtoms();
1541 js::AtomSet &atoms() {
1542 JS_ASSERT(currentThreadHasExclusiveAccess());
1543 return *atoms_;
1544 }
1545 JSCompartment *atomsCompartment() {
1546 JS_ASSERT(currentThreadHasExclusiveAccess());
1547 return atomsCompartment_;
1548 }
1550 bool isAtomsCompartment(JSCompartment *comp) {
1551 return comp == atomsCompartment_;
1552 }
1554 // The atoms compartment is the only one in its zone.
1555 inline bool isAtomsZone(JS::Zone *zone);
1557 bool activeGCInAtomsZone();
1559 // Permanent atoms are fixed during initialization of the runtime and are
1560 // not modified or collected until the runtime is destroyed. These may be
1561 // shared with another, longer living runtime through |parentRuntime| and
1562 // can be freely accessed with no locking necessary.
1564 // Permanent atoms pre-allocated for general use.
1565 js::StaticStrings *staticStrings;
1567 // Cached pointers to various permanent property names.
1568 JSAtomState *commonNames;
1570 // All permanent atoms in the runtime, other than those in staticStrings.
1571 js::AtomSet *permanentAtoms;
1573 bool transformToPermanentAtoms();
1575 const JSWrapObjectCallbacks *wrapObjectCallbacks;
1576 js::PreserveWrapperCallback preserveWrapperCallback;
1578 // Table of bytecode and other data that may be shared across scripts
1579 // within the runtime. This may be modified by threads with an
1580 // ExclusiveContext and requires a lock.
1581 private:
1582 js::ScriptDataTable scriptDataTable_;
1583 public:
1584 js::ScriptDataTable &scriptDataTable() {
1585 JS_ASSERT(currentThreadHasExclusiveAccess());
1586 return scriptDataTable_;
1587 }
1589 #ifdef DEBUG
1590 size_t noGCOrAllocationCheck;
1591 #endif
1593 bool jitSupportsFloatingPoint;
1595 // Used to reset stack limit after a signaled interrupt (i.e. jitStackLimit_ = -1)
1596 // has been noticed by Ion/Baseline.
1597 void resetJitStackLimit();
1599 // Cache for jit::GetPcScript().
1600 js::jit::PcScriptCache *ionPcScriptCache;
1602 js::ThreadPool threadPool;
1604 js::DefaultJSContextCallback defaultJSContextCallback;
1606 js::CTypesActivityCallback ctypesActivityCallback;
1608 // Non-zero if this is a ForkJoin warmup execution. See
1609 // js::ForkJoin() for more information.
1610 uint32_t forkJoinWarmup;
1612 private:
1613 // In certain cases, we want to optimize certain opcodes to typed instructions,
1614 // to avoid carrying an extra register to feed into an unbox. Unfortunately,
1615 // that's not always possible. For example, a GetPropertyCacheT could return a
1616 // typed double, but if it takes its out-of-line path, it could return an
1617 // object, and trigger invalidation. The invalidation bailout will consider the
1618 // return value to be a double, and create a garbage Value.
1619 //
1620 // To allow the GetPropertyCacheT optimization, we allow the ability for
1621 // GetPropertyCache to override the return value at the top of the stack - the
1622 // value that will be temporarily corrupt. This special override value is set
1623 // only in callVM() targets that are about to return *and* have invalidated
1624 // their callee.
1625 js::Value ionReturnOverride_;
1627 #ifdef JS_THREADSAFE
1628 static mozilla::Atomic<size_t> liveRuntimesCount;
1629 #else
1630 static size_t liveRuntimesCount;
1631 #endif
1633 public:
1634 static bool hasLiveRuntimes() {
1635 return liveRuntimesCount > 0;
1636 }
1638 bool hasIonReturnOverride() const {
1639 return !ionReturnOverride_.isMagic();
1640 }
1641 js::Value takeIonReturnOverride() {
1642 js::Value v = ionReturnOverride_;
1643 ionReturnOverride_ = js::MagicValue(JS_ARG_POISON);
1644 return v;
1645 }
1646 void setIonReturnOverride(const js::Value &v) {
1647 JS_ASSERT(!hasIonReturnOverride());
1648 ionReturnOverride_ = v;
1649 }
1651 JSRuntime(JSRuntime *parentRuntime, JSUseHelperThreads useHelperThreads);
1652 ~JSRuntime();
1654 bool init(uint32_t maxbytes);
1656 JSRuntime *thisFromCtor() { return this; }
1658 void setGCMaxMallocBytes(size_t value);
1660 void resetGCMallocBytes() {
1661 gcMallocBytes = ptrdiff_t(gcMaxMallocBytes);
1662 gcMallocGCTriggered = false;
1663 }
1665 /*
1666 * Call this after allocating memory held by GC things, to update memory
1667 * pressure counters or report the OOM error if necessary. If oomError and
1668 * cx is not null the function also reports OOM error.
1669 *
1670 * The function must be called outside the GC lock and in case of OOM error
1671 * the caller must ensure that no deadlock possible during OOM reporting.
1672 */
1673 void updateMallocCounter(size_t nbytes);
1674 void updateMallocCounter(JS::Zone *zone, size_t nbytes);
1676 void reportAllocationOverflow() { js_ReportAllocationOverflow(nullptr); }
1678 bool isTooMuchMalloc() const {
1679 return gcMallocBytes <= 0;
1680 }
1682 /*
1683 * The function must be called outside the GC lock.
1684 */
1685 JS_FRIEND_API(void) onTooMuchMalloc();
1687 /*
1688 * This should be called after system malloc/realloc returns nullptr to try
1689 * to recove some memory or to report an error. Failures in malloc and
1690 * calloc are signaled by p == null and p == reinterpret_cast<void *>(1).
1691 * Other values of p mean a realloc failure.
1692 *
1693 * The function must be called outside the GC lock.
1694 */
1695 JS_FRIEND_API(void *) onOutOfMemory(void *p, size_t nbytes);
1696 JS_FRIEND_API(void *) onOutOfMemory(void *p, size_t nbytes, JSContext *cx);
1698 // Ways in which the interrupt callback on the runtime can be triggered,
1699 // varying based on which thread is triggering the callback.
1700 enum InterruptMode {
1701 RequestInterruptMainThread,
1702 RequestInterruptAnyThread,
1703 RequestInterruptAnyThreadDontStopIon,
1704 RequestInterruptAnyThreadForkJoin
1705 };
1707 void requestInterrupt(InterruptMode mode);
1709 void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, JS::RuntimeSizes *runtime);
1711 private:
1712 JS::RuntimeOptions options_;
1714 JSUseHelperThreads useHelperThreads_;
1716 // Settings for how helper threads can be used.
1717 bool parallelIonCompilationEnabled_;
1718 bool parallelParsingEnabled_;
1720 // True iff this is a DOM Worker runtime.
1721 bool isWorkerRuntime_;
1723 public:
1725 // This controls whether the JSRuntime is allowed to create any helper
1726 // threads at all. This means both specific threads (background GC thread)
1727 // and the general JS worker thread pool.
1728 bool useHelperThreads() const {
1729 #ifdef JS_THREADSAFE
1730 return useHelperThreads_ == JS_USE_HELPER_THREADS;
1731 #else
1732 return false;
1733 #endif
1734 }
1736 // Note: these values may be toggled dynamically (in response to about:config
1737 // prefs changing).
1738 void setParallelIonCompilationEnabled(bool value) {
1739 parallelIonCompilationEnabled_ = value;
1740 }
1741 bool canUseParallelIonCompilation() const {
1742 return useHelperThreads() &&
1743 parallelIonCompilationEnabled_;
1744 }
1745 void setParallelParsingEnabled(bool value) {
1746 parallelParsingEnabled_ = value;
1747 }
1748 bool canUseParallelParsing() const {
1749 return useHelperThreads() &&
1750 parallelParsingEnabled_;
1751 }
1753 void setIsWorkerRuntime() {
1754 isWorkerRuntime_ = true;
1755 }
1756 bool isWorkerRuntime() const {
1757 return isWorkerRuntime_;
1758 }
1760 const JS::RuntimeOptions &options() const {
1761 return options_;
1762 }
1763 JS::RuntimeOptions &options() {
1764 return options_;
1765 }
1767 #ifdef DEBUG
1768 public:
1769 js::AutoEnterPolicy *enteredPolicy;
1770 #endif
1772 /* See comment for JS::SetLargeAllocationFailureCallback in jsapi.h. */
1773 JS::LargeAllocationFailureCallback largeAllocationFailureCallback;
1774 /* See comment for JS::SetOutOfMemoryCallback in jsapi.h. */
1775 JS::OutOfMemoryCallback oomCallback;
1777 /*
1778 * These variations of malloc/calloc/realloc will call the
1779 * large-allocation-failure callback on OOM and retry the allocation.
1780 */
1782 static const unsigned LARGE_ALLOCATION = 25 * 1024 * 1024;
1784 void *callocCanGC(size_t bytes) {
1785 void *p = calloc_(bytes);
1786 if (MOZ_LIKELY(!!p))
1787 return p;
1788 if (!largeAllocationFailureCallback || bytes < LARGE_ALLOCATION)
1789 return nullptr;
1790 largeAllocationFailureCallback();
1791 return onOutOfMemory(reinterpret_cast<void *>(1), bytes);
1792 }
1794 void *reallocCanGC(void *p, size_t bytes) {
1795 void *p2 = realloc_(p, bytes);
1796 if (MOZ_LIKELY(!!p2))
1797 return p2;
1798 if (!largeAllocationFailureCallback || bytes < LARGE_ALLOCATION)
1799 return nullptr;
1800 largeAllocationFailureCallback();
1801 return onOutOfMemory(p, bytes);
1802 }
1803 };
1805 namespace js {
1807 // When entering JIT code, the calling JSContext* is stored into the thread's
1808 // PerThreadData. This function retrieves the JSContext with the pre-condition
1809 // that the caller is JIT code or C++ called directly from JIT code. This
1810 // function should not be called from arbitrary locations since the JSContext
1811 // may be the wrong one.
1812 static inline JSContext *
1813 GetJSContextFromJitCode()
1814 {
1815 JSContext *cx = TlsPerThreadData.get()->jitJSContext;
1816 JS_ASSERT(cx);
1817 return cx;
1818 }
1820 /*
1821 * Flags accompany script version data so that a) dynamically created scripts
1822 * can inherit their caller's compile-time properties and b) scripts can be
1823 * appropriately compared in the eval cache across global option changes. An
1824 * example of the latter is enabling the top-level-anonymous-function-is-error
1825 * option: subsequent evals of the same, previously-valid script text may have
1826 * become invalid.
1827 */
1828 namespace VersionFlags {
1829 static const unsigned MASK = 0x0FFF; /* see JSVersion in jspubtd.h */
1830 } /* namespace VersionFlags */
1832 static inline JSVersion
1833 VersionNumber(JSVersion version)
1834 {
1835 return JSVersion(uint32_t(version) & VersionFlags::MASK);
1836 }
1838 static inline JSVersion
1839 VersionExtractFlags(JSVersion version)
1840 {
1841 return JSVersion(uint32_t(version) & ~VersionFlags::MASK);
1842 }
1844 static inline void
1845 VersionCopyFlags(JSVersion *version, JSVersion from)
1846 {
1847 *version = JSVersion(VersionNumber(*version) | VersionExtractFlags(from));
1848 }
1850 static inline bool
1851 VersionHasFlags(JSVersion version)
1852 {
1853 return !!VersionExtractFlags(version);
1854 }
1856 static inline bool
1857 VersionIsKnown(JSVersion version)
1858 {
1859 return VersionNumber(version) != JSVERSION_UNKNOWN;
1860 }
1862 inline void
1863 FreeOp::free_(void *p)
1864 {
1865 if (shouldFreeLater()) {
1866 runtime()->gcHelperThread.freeLater(p);
1867 return;
1868 }
1869 js_free(p);
1870 }
1872 class AutoLockGC
1873 {
1874 public:
1875 explicit AutoLockGC(JSRuntime *rt = nullptr
1876 MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
1877 : runtime(rt)
1878 {
1879 MOZ_GUARD_OBJECT_NOTIFIER_INIT;
1880 // Avoid MSVC warning C4390 for non-threadsafe builds.
1881 if (rt)
1882 rt->lockGC();
1883 }
1885 ~AutoLockGC()
1886 {
1887 if (runtime)
1888 runtime->unlockGC();
1889 }
1891 bool locked() const {
1892 return !!runtime;
1893 }
1895 void lock(JSRuntime *rt) {
1896 JS_ASSERT(rt);
1897 JS_ASSERT(!runtime);
1898 runtime = rt;
1899 rt->lockGC();
1900 }
1902 private:
1903 JSRuntime *runtime;
1904 MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
1905 };
1907 class AutoUnlockGC
1908 {
1909 private:
1910 JSRuntime *rt;
1911 MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
1913 public:
1914 explicit AutoUnlockGC(JSRuntime *rt
1915 MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
1916 : rt(rt)
1917 {
1918 MOZ_GUARD_OBJECT_NOTIFIER_INIT;
1919 rt->unlockGC();
1920 }
1921 ~AutoUnlockGC() { rt->lockGC(); }
1922 };
1924 class MOZ_STACK_CLASS AutoKeepAtoms
1925 {
1926 PerThreadData *pt;
1927 MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
1929 public:
1930 explicit AutoKeepAtoms(PerThreadData *pt
1931 MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
1932 : pt(pt)
1933 {
1934 MOZ_GUARD_OBJECT_NOTIFIER_INIT;
1935 if (JSRuntime *rt = pt->runtimeIfOnOwnerThread()) {
1936 rt->keepAtoms_++;
1937 } else {
1938 // This should be a thread with an exclusive context, which will
1939 // always inhibit collection of atoms.
1940 JS_ASSERT(pt->exclusiveThreadsPresent());
1941 }
1942 }
1943 ~AutoKeepAtoms() {
1944 if (JSRuntime *rt = pt->runtimeIfOnOwnerThread()) {
1945 JS_ASSERT(rt->keepAtoms_);
1946 rt->keepAtoms_--;
1947 }
1948 }
1949 };
1951 inline void
1952 PerThreadData::setJitStackLimit(uintptr_t limit)
1953 {
1954 JS_ASSERT(runtime_->currentThreadOwnsInterruptLock());
1955 jitStackLimit = limit;
1956 }
1958 inline JSRuntime *
1959 PerThreadData::runtimeFromMainThread()
1960 {
1961 JS_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
1962 return runtime_;
1963 }
1965 inline JSRuntime *
1966 PerThreadData::runtimeIfOnOwnerThread()
1967 {
1968 return CurrentThreadCanAccessRuntime(runtime_) ? runtime_ : nullptr;
1969 }
1971 inline bool
1972 PerThreadData::exclusiveThreadsPresent()
1973 {
1974 return runtime_->exclusiveThreadsPresent();
1975 }
1977 inline void
1978 PerThreadData::addActiveCompilation()
1979 {
1980 activeCompilations++;
1981 runtime_->addActiveCompilation();
1982 }
1984 inline void
1985 PerThreadData::removeActiveCompilation()
1986 {
1987 JS_ASSERT(activeCompilations);
1988 activeCompilations--;
1989 runtime_->removeActiveCompilation();
1990 }
1992 /************************************************************************/
1994 static MOZ_ALWAYS_INLINE void
1995 MakeRangeGCSafe(Value *vec, size_t len)
1996 {
1997 mozilla::PodZero(vec, len);
1998 }
2000 static MOZ_ALWAYS_INLINE void
2001 MakeRangeGCSafe(Value *beg, Value *end)
2002 {
2003 mozilla::PodZero(beg, end - beg);
2004 }
2006 static MOZ_ALWAYS_INLINE void
2007 MakeRangeGCSafe(jsid *beg, jsid *end)
2008 {
2009 for (jsid *id = beg; id != end; ++id)
2010 *id = INT_TO_JSID(0);
2011 }
2013 static MOZ_ALWAYS_INLINE void
2014 MakeRangeGCSafe(jsid *vec, size_t len)
2015 {
2016 MakeRangeGCSafe(vec, vec + len);
2017 }
2019 static MOZ_ALWAYS_INLINE void
2020 MakeRangeGCSafe(Shape **beg, Shape **end)
2021 {
2022 mozilla::PodZero(beg, end - beg);
2023 }
2025 static MOZ_ALWAYS_INLINE void
2026 MakeRangeGCSafe(Shape **vec, size_t len)
2027 {
2028 mozilla::PodZero(vec, len);
2029 }
2031 static MOZ_ALWAYS_INLINE void
2032 SetValueRangeToUndefined(Value *beg, Value *end)
2033 {
2034 for (Value *v = beg; v != end; ++v)
2035 v->setUndefined();
2036 }
2038 static MOZ_ALWAYS_INLINE void
2039 SetValueRangeToUndefined(Value *vec, size_t len)
2040 {
2041 SetValueRangeToUndefined(vec, vec + len);
2042 }
2044 static MOZ_ALWAYS_INLINE void
2045 SetValueRangeToNull(Value *beg, Value *end)
2046 {
2047 for (Value *v = beg; v != end; ++v)
2048 v->setNull();
2049 }
2051 static MOZ_ALWAYS_INLINE void
2052 SetValueRangeToNull(Value *vec, size_t len)
2053 {
2054 SetValueRangeToNull(vec, vec + len);
2055 }
2057 /*
2058 * Allocation policy that uses JSRuntime::malloc_ and friends, so that
2059 * memory pressure is properly accounted for. This is suitable for
2060 * long-lived objects owned by the JSRuntime.
2061 *
2062 * Since it doesn't hold a JSContext (those may not live long enough), it
2063 * can't report out-of-memory conditions itself; the caller must check for
2064 * OOM and take the appropriate action.
2065 *
2066 * FIXME bug 647103 - replace these *AllocPolicy names.
2067 */
2068 class RuntimeAllocPolicy
2069 {
2070 JSRuntime *const runtime;
2072 public:
2073 RuntimeAllocPolicy(JSRuntime *rt) : runtime(rt) {}
2074 void *malloc_(size_t bytes) { return runtime->malloc_(bytes); }
2075 void *calloc_(size_t bytes) { return runtime->calloc_(bytes); }
2076 void *realloc_(void *p, size_t bytes) { return runtime->realloc_(p, bytes); }
2077 void free_(void *p) { js_free(p); }
2078 void reportAllocOverflow() const {}
2079 };
2081 extern const JSSecurityCallbacks NullSecurityCallbacks;
2083 } /* namespace js */
2085 #ifdef _MSC_VER
2086 #pragma warning(pop)
2087 #endif
2089 #endif /* vm_Runtime_h */