Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #ifndef vm_Runtime_h |
michael@0 | 8 | #define vm_Runtime_h |
michael@0 | 9 | |
michael@0 | 10 | #include "mozilla/Atomics.h" |
michael@0 | 11 | #include "mozilla/Attributes.h" |
michael@0 | 12 | #include "mozilla/LinkedList.h" |
michael@0 | 13 | #include "mozilla/MemoryReporting.h" |
michael@0 | 14 | #include "mozilla/PodOperations.h" |
michael@0 | 15 | #include "mozilla/Scoped.h" |
michael@0 | 16 | #include "mozilla/ThreadLocal.h" |
michael@0 | 17 | |
michael@0 | 18 | #include <setjmp.h> |
michael@0 | 19 | |
michael@0 | 20 | #include "jsatom.h" |
michael@0 | 21 | #include "jsclist.h" |
michael@0 | 22 | #include "jsgc.h" |
michael@0 | 23 | #ifdef DEBUG |
michael@0 | 24 | # include "jsproxy.h" |
michael@0 | 25 | #endif |
michael@0 | 26 | #include "jsscript.h" |
michael@0 | 27 | |
michael@0 | 28 | #include "ds/FixedSizeHash.h" |
michael@0 | 29 | #include "frontend/ParseMaps.h" |
michael@0 | 30 | #ifdef JSGC_GENERATIONAL |
michael@0 | 31 | # include "gc/Nursery.h" |
michael@0 | 32 | #endif |
michael@0 | 33 | #include "gc/Statistics.h" |
michael@0 | 34 | #ifdef JSGC_GENERATIONAL |
michael@0 | 35 | # include "gc/StoreBuffer.h" |
michael@0 | 36 | #endif |
michael@0 | 37 | #include "gc/Tracer.h" |
michael@0 | 38 | #ifdef XP_MACOSX |
michael@0 | 39 | # include "jit/AsmJSSignalHandlers.h" |
michael@0 | 40 | #endif |
michael@0 | 41 | #include "js/HashTable.h" |
michael@0 | 42 | #include "js/Vector.h" |
michael@0 | 43 | #include "vm/CommonPropertyNames.h" |
michael@0 | 44 | #include "vm/DateTime.h" |
michael@0 | 45 | #include "vm/MallocProvider.h" |
michael@0 | 46 | #include "vm/SPSProfiler.h" |
michael@0 | 47 | #include "vm/Stack.h" |
michael@0 | 48 | #include "vm/ThreadPool.h" |
michael@0 | 49 | |
michael@0 | 50 | #ifdef _MSC_VER |
michael@0 | 51 | #pragma warning(push) |
michael@0 | 52 | #pragma warning(disable:4100) /* Silence unreferenced formal parameter warnings */ |
michael@0 | 53 | #endif |
michael@0 | 54 | |
michael@0 | 55 | namespace js { |
michael@0 | 56 | |
michael@0 | 57 | class PerThreadData; |
michael@0 | 58 | class ThreadSafeContext; |
michael@0 | 59 | class AutoKeepAtoms; |
michael@0 | 60 | #ifdef JS_TRACE_LOGGING |
michael@0 | 61 | class TraceLogger; |
michael@0 | 62 | #endif |
michael@0 | 63 | |
michael@0 | 64 | /* Thread Local Storage slot for storing the runtime for a thread. */ |
michael@0 | 65 | extern mozilla::ThreadLocal<PerThreadData*> TlsPerThreadData; |
michael@0 | 66 | |
michael@0 | 67 | } // namespace js |
michael@0 | 68 | |
michael@0 | 69 | struct DtoaState; |
michael@0 | 70 | |
michael@0 | 71 | extern void |
michael@0 | 72 | js_ReportOutOfMemory(js::ThreadSafeContext *cx); |
michael@0 | 73 | |
michael@0 | 74 | extern void |
michael@0 | 75 | js_ReportAllocationOverflow(js::ThreadSafeContext *cx); |
michael@0 | 76 | |
michael@0 | 77 | extern void |
michael@0 | 78 | js_ReportOverRecursed(js::ThreadSafeContext *cx); |
michael@0 | 79 | |
michael@0 | 80 | namespace JSC { class ExecutableAllocator; } |
michael@0 | 81 | |
michael@0 | 82 | namespace WTF { class BumpPointerAllocator; } |
michael@0 | 83 | |
michael@0 | 84 | namespace js { |
michael@0 | 85 | |
michael@0 | 86 | typedef Rooted<JSLinearString*> RootedLinearString; |
michael@0 | 87 | |
michael@0 | 88 | class Activation; |
michael@0 | 89 | class ActivationIterator; |
michael@0 | 90 | class AsmJSActivation; |
michael@0 | 91 | class MathCache; |
michael@0 | 92 | |
michael@0 | 93 | namespace jit { |
michael@0 | 94 | class JitRuntime; |
michael@0 | 95 | class JitActivation; |
michael@0 | 96 | struct PcScriptCache; |
michael@0 | 97 | class Simulator; |
michael@0 | 98 | class SimulatorRuntime; |
michael@0 | 99 | class AutoFlushICache; |
michael@0 | 100 | } |
michael@0 | 101 | |
michael@0 | 102 | /* |
michael@0 | 103 | * GetSrcNote cache to avoid O(n^2) growth in finding a source note for a |
michael@0 | 104 | * given pc in a script. We use the script->code pointer to tag the cache, |
michael@0 | 105 | * instead of the script address itself, so that source notes are always found |
michael@0 | 106 | * by offset from the bytecode with which they were generated. |
michael@0 | 107 | */ |
michael@0 | 108 | struct GSNCache { |
michael@0 | 109 | typedef HashMap<jsbytecode *, |
michael@0 | 110 | jssrcnote *, |
michael@0 | 111 | PointerHasher<jsbytecode *, 0>, |
michael@0 | 112 | SystemAllocPolicy> Map; |
michael@0 | 113 | |
michael@0 | 114 | jsbytecode *code; |
michael@0 | 115 | Map map; |
michael@0 | 116 | |
michael@0 | 117 | GSNCache() : code(nullptr) { } |
michael@0 | 118 | |
michael@0 | 119 | void purge(); |
michael@0 | 120 | }; |
michael@0 | 121 | |
michael@0 | 122 | /* |
michael@0 | 123 | * ScopeCoordinateName cache to avoid O(n^2) growth in finding the name |
michael@0 | 124 | * associated with a given aliasedvar operation. |
michael@0 | 125 | */ |
michael@0 | 126 | struct ScopeCoordinateNameCache { |
michael@0 | 127 | typedef HashMap<uint32_t, |
michael@0 | 128 | jsid, |
michael@0 | 129 | DefaultHasher<uint32_t>, |
michael@0 | 130 | SystemAllocPolicy> Map; |
michael@0 | 131 | |
michael@0 | 132 | Shape *shape; |
michael@0 | 133 | Map map; |
michael@0 | 134 | |
michael@0 | 135 | ScopeCoordinateNameCache() : shape(nullptr) {} |
michael@0 | 136 | void purge(); |
michael@0 | 137 | }; |
michael@0 | 138 | |
michael@0 | 139 | typedef Vector<ScriptAndCounts, 0, SystemAllocPolicy> ScriptAndCountsVector; |
michael@0 | 140 | |
michael@0 | 141 | struct ConservativeGCData |
michael@0 | 142 | { |
michael@0 | 143 | /* |
michael@0 | 144 | * The GC scans conservatively between ThreadData::nativeStackBase and |
michael@0 | 145 | * nativeStackTop unless the latter is nullptr. |
michael@0 | 146 | */ |
michael@0 | 147 | uintptr_t *nativeStackTop; |
michael@0 | 148 | |
michael@0 | 149 | union { |
michael@0 | 150 | jmp_buf jmpbuf; |
michael@0 | 151 | uintptr_t words[JS_HOWMANY(sizeof(jmp_buf), sizeof(uintptr_t))]; |
michael@0 | 152 | } registerSnapshot; |
michael@0 | 153 | |
michael@0 | 154 | ConservativeGCData() { |
michael@0 | 155 | mozilla::PodZero(this); |
michael@0 | 156 | } |
michael@0 | 157 | |
michael@0 | 158 | ~ConservativeGCData() { |
michael@0 | 159 | #ifdef JS_THREADSAFE |
michael@0 | 160 | /* |
michael@0 | 161 | * The conservative GC scanner should be disabled when the thread leaves |
michael@0 | 162 | * the last request. |
michael@0 | 163 | */ |
michael@0 | 164 | JS_ASSERT(!hasStackToScan()); |
michael@0 | 165 | #endif |
michael@0 | 166 | } |
michael@0 | 167 | |
michael@0 | 168 | MOZ_NEVER_INLINE void recordStackTop(); |
michael@0 | 169 | |
michael@0 | 170 | #ifdef JS_THREADSAFE |
michael@0 | 171 | void updateForRequestEnd() { |
michael@0 | 172 | nativeStackTop = nullptr; |
michael@0 | 173 | } |
michael@0 | 174 | #endif |
michael@0 | 175 | |
michael@0 | 176 | bool hasStackToScan() const { |
michael@0 | 177 | return !!nativeStackTop; |
michael@0 | 178 | } |
michael@0 | 179 | }; |
michael@0 | 180 | |
michael@0 | 181 | struct EvalCacheEntry |
michael@0 | 182 | { |
michael@0 | 183 | JSScript *script; |
michael@0 | 184 | JSScript *callerScript; |
michael@0 | 185 | jsbytecode *pc; |
michael@0 | 186 | }; |
michael@0 | 187 | |
michael@0 | 188 | struct EvalCacheLookup |
michael@0 | 189 | { |
michael@0 | 190 | EvalCacheLookup(JSContext *cx) : str(cx), callerScript(cx) {} |
michael@0 | 191 | RootedLinearString str; |
michael@0 | 192 | RootedScript callerScript; |
michael@0 | 193 | JSVersion version; |
michael@0 | 194 | jsbytecode *pc; |
michael@0 | 195 | }; |
michael@0 | 196 | |
michael@0 | 197 | struct EvalCacheHashPolicy |
michael@0 | 198 | { |
michael@0 | 199 | typedef EvalCacheLookup Lookup; |
michael@0 | 200 | |
michael@0 | 201 | static HashNumber hash(const Lookup &l); |
michael@0 | 202 | static bool match(const EvalCacheEntry &entry, const EvalCacheLookup &l); |
michael@0 | 203 | }; |
michael@0 | 204 | |
michael@0 | 205 | typedef HashSet<EvalCacheEntry, EvalCacheHashPolicy, SystemAllocPolicy> EvalCache; |
michael@0 | 206 | |
michael@0 | 207 | struct LazyScriptHashPolicy |
michael@0 | 208 | { |
michael@0 | 209 | struct Lookup { |
michael@0 | 210 | JSContext *cx; |
michael@0 | 211 | LazyScript *lazy; |
michael@0 | 212 | |
michael@0 | 213 | Lookup(JSContext *cx, LazyScript *lazy) |
michael@0 | 214 | : cx(cx), lazy(lazy) |
michael@0 | 215 | {} |
michael@0 | 216 | }; |
michael@0 | 217 | |
michael@0 | 218 | static const size_t NumHashes = 3; |
michael@0 | 219 | |
michael@0 | 220 | static void hash(const Lookup &lookup, HashNumber hashes[NumHashes]); |
michael@0 | 221 | static bool match(JSScript *script, const Lookup &lookup); |
michael@0 | 222 | |
michael@0 | 223 | // Alternate methods for use when removing scripts from the hash without an |
michael@0 | 224 | // explicit LazyScript lookup. |
michael@0 | 225 | static void hash(JSScript *script, HashNumber hashes[NumHashes]); |
michael@0 | 226 | static bool match(JSScript *script, JSScript *lookup) { return script == lookup; } |
michael@0 | 227 | |
michael@0 | 228 | static void clear(JSScript **pscript) { *pscript = nullptr; } |
michael@0 | 229 | static bool isCleared(JSScript *script) { return !script; } |
michael@0 | 230 | }; |
michael@0 | 231 | |
michael@0 | 232 | typedef FixedSizeHashSet<JSScript *, LazyScriptHashPolicy, 769> LazyScriptCache; |
michael@0 | 233 | |
michael@0 | 234 | class PropertyIteratorObject; |
michael@0 | 235 | |
michael@0 | 236 | class NativeIterCache |
michael@0 | 237 | { |
michael@0 | 238 | static const size_t SIZE = size_t(1) << 8; |
michael@0 | 239 | |
michael@0 | 240 | /* Cached native iterators. */ |
michael@0 | 241 | PropertyIteratorObject *data[SIZE]; |
michael@0 | 242 | |
michael@0 | 243 | static size_t getIndex(uint32_t key) { |
michael@0 | 244 | return size_t(key) % SIZE; |
michael@0 | 245 | } |
michael@0 | 246 | |
michael@0 | 247 | public: |
michael@0 | 248 | /* Native iterator most recently started. */ |
michael@0 | 249 | PropertyIteratorObject *last; |
michael@0 | 250 | |
michael@0 | 251 | NativeIterCache() |
michael@0 | 252 | : last(nullptr) |
michael@0 | 253 | { |
michael@0 | 254 | mozilla::PodArrayZero(data); |
michael@0 | 255 | } |
michael@0 | 256 | |
michael@0 | 257 | void purge() { |
michael@0 | 258 | last = nullptr; |
michael@0 | 259 | mozilla::PodArrayZero(data); |
michael@0 | 260 | } |
michael@0 | 261 | |
michael@0 | 262 | PropertyIteratorObject *get(uint32_t key) const { |
michael@0 | 263 | return data[getIndex(key)]; |
michael@0 | 264 | } |
michael@0 | 265 | |
michael@0 | 266 | void set(uint32_t key, PropertyIteratorObject *iterobj) { |
michael@0 | 267 | data[getIndex(key)] = iterobj; |
michael@0 | 268 | } |
michael@0 | 269 | }; |
michael@0 | 270 | |
michael@0 | 271 | /* |
michael@0 | 272 | * Cache for speeding up repetitive creation of objects in the VM. |
michael@0 | 273 | * When an object is created which matches the criteria in the 'key' section |
michael@0 | 274 | * below, an entry is filled with the resulting object. |
michael@0 | 275 | */ |
michael@0 | 276 | class NewObjectCache |
michael@0 | 277 | { |
michael@0 | 278 | /* Statically asserted to be equal to sizeof(JSObject_Slots16) */ |
michael@0 | 279 | static const unsigned MAX_OBJ_SIZE = 4 * sizeof(void*) + 16 * sizeof(Value); |
michael@0 | 280 | |
michael@0 | 281 | static void staticAsserts() { |
michael@0 | 282 | JS_STATIC_ASSERT(NewObjectCache::MAX_OBJ_SIZE == sizeof(JSObject_Slots16)); |
michael@0 | 283 | JS_STATIC_ASSERT(gc::FINALIZE_OBJECT_LAST == gc::FINALIZE_OBJECT16_BACKGROUND); |
michael@0 | 284 | } |
michael@0 | 285 | |
michael@0 | 286 | struct Entry |
michael@0 | 287 | { |
michael@0 | 288 | /* Class of the constructed object. */ |
michael@0 | 289 | const Class *clasp; |
michael@0 | 290 | |
michael@0 | 291 | /* |
michael@0 | 292 | * Key with one of three possible values: |
michael@0 | 293 | * |
michael@0 | 294 | * - Global for the object. The object must have a standard class for |
michael@0 | 295 | * which the global's prototype can be determined, and the object's |
michael@0 | 296 | * parent will be the global. |
michael@0 | 297 | * |
michael@0 | 298 | * - Prototype for the object (cannot be global). The object's parent |
michael@0 | 299 | * will be the prototype's parent. |
michael@0 | 300 | * |
michael@0 | 301 | * - Type for the object. The object's parent will be the type's |
michael@0 | 302 | * prototype's parent. |
michael@0 | 303 | */ |
michael@0 | 304 | gc::Cell *key; |
michael@0 | 305 | |
michael@0 | 306 | /* Allocation kind for the constructed object. */ |
michael@0 | 307 | gc::AllocKind kind; |
michael@0 | 308 | |
michael@0 | 309 | /* Number of bytes to copy from the template object. */ |
michael@0 | 310 | uint32_t nbytes; |
michael@0 | 311 | |
michael@0 | 312 | /* |
michael@0 | 313 | * Template object to copy from, with the initial values of fields, |
michael@0 | 314 | * fixed slots (undefined) and private data (nullptr). |
michael@0 | 315 | */ |
michael@0 | 316 | char templateObject[MAX_OBJ_SIZE]; |
michael@0 | 317 | }; |
michael@0 | 318 | |
michael@0 | 319 | Entry entries[41]; // TODO: reconsider size |
michael@0 | 320 | |
michael@0 | 321 | public: |
michael@0 | 322 | |
michael@0 | 323 | typedef int EntryIndex; |
michael@0 | 324 | |
michael@0 | 325 | NewObjectCache() { mozilla::PodZero(this); } |
michael@0 | 326 | void purge() { mozilla::PodZero(this); } |
michael@0 | 327 | |
michael@0 | 328 | /* Remove any cached items keyed on moved objects. */ |
michael@0 | 329 | void clearNurseryObjects(JSRuntime *rt); |
michael@0 | 330 | |
michael@0 | 331 | /* |
michael@0 | 332 | * Get the entry index for the given lookup, return whether there was a hit |
michael@0 | 333 | * on an existing entry. |
michael@0 | 334 | */ |
michael@0 | 335 | inline bool lookupProto(const Class *clasp, JSObject *proto, gc::AllocKind kind, EntryIndex *pentry); |
michael@0 | 336 | inline bool lookupGlobal(const Class *clasp, js::GlobalObject *global, gc::AllocKind kind, |
michael@0 | 337 | EntryIndex *pentry); |
michael@0 | 338 | |
michael@0 | 339 | bool lookupType(js::types::TypeObject *type, gc::AllocKind kind, EntryIndex *pentry) { |
michael@0 | 340 | return lookup(type->clasp(), type, kind, pentry); |
michael@0 | 341 | } |
michael@0 | 342 | |
michael@0 | 343 | /* |
michael@0 | 344 | * Return a new object from a cache hit produced by a lookup method, or |
michael@0 | 345 | * nullptr if returning the object could possibly trigger GC (does not |
michael@0 | 346 | * indicate failure). |
michael@0 | 347 | */ |
michael@0 | 348 | template <AllowGC allowGC> |
michael@0 | 349 | inline JSObject *newObjectFromHit(JSContext *cx, EntryIndex entry, js::gc::InitialHeap heap); |
michael@0 | 350 | |
michael@0 | 351 | /* Fill an entry after a cache miss. */ |
michael@0 | 352 | void fillProto(EntryIndex entry, const Class *clasp, js::TaggedProto proto, gc::AllocKind kind, JSObject *obj); |
michael@0 | 353 | |
michael@0 | 354 | inline void fillGlobal(EntryIndex entry, const Class *clasp, js::GlobalObject *global, |
michael@0 | 355 | gc::AllocKind kind, JSObject *obj); |
michael@0 | 356 | |
michael@0 | 357 | void fillType(EntryIndex entry, js::types::TypeObject *type, gc::AllocKind kind, |
michael@0 | 358 | JSObject *obj) |
michael@0 | 359 | { |
michael@0 | 360 | JS_ASSERT(obj->type() == type); |
michael@0 | 361 | return fill(entry, type->clasp(), type, kind, obj); |
michael@0 | 362 | } |
michael@0 | 363 | |
michael@0 | 364 | /* Invalidate any entries which might produce an object with shape/proto. */ |
michael@0 | 365 | void invalidateEntriesForShape(JSContext *cx, HandleShape shape, HandleObject proto); |
michael@0 | 366 | |
michael@0 | 367 | private: |
michael@0 | 368 | bool lookup(const Class *clasp, gc::Cell *key, gc::AllocKind kind, EntryIndex *pentry) { |
michael@0 | 369 | uintptr_t hash = (uintptr_t(clasp) ^ uintptr_t(key)) + kind; |
michael@0 | 370 | *pentry = hash % mozilla::ArrayLength(entries); |
michael@0 | 371 | |
michael@0 | 372 | Entry *entry = &entries[*pentry]; |
michael@0 | 373 | |
michael@0 | 374 | /* N.B. Lookups with the same clasp/key but different kinds map to different entries. */ |
michael@0 | 375 | return entry->clasp == clasp && entry->key == key; |
michael@0 | 376 | } |
michael@0 | 377 | |
michael@0 | 378 | void fill(EntryIndex entry_, const Class *clasp, gc::Cell *key, gc::AllocKind kind, JSObject *obj) { |
michael@0 | 379 | JS_ASSERT(unsigned(entry_) < mozilla::ArrayLength(entries)); |
michael@0 | 380 | Entry *entry = &entries[entry_]; |
michael@0 | 381 | |
michael@0 | 382 | JS_ASSERT(!obj->hasDynamicSlots() && !obj->hasDynamicElements()); |
michael@0 | 383 | |
michael@0 | 384 | entry->clasp = clasp; |
michael@0 | 385 | entry->key = key; |
michael@0 | 386 | entry->kind = kind; |
michael@0 | 387 | |
michael@0 | 388 | entry->nbytes = gc::Arena::thingSize(kind); |
michael@0 | 389 | js_memcpy(&entry->templateObject, obj, entry->nbytes); |
michael@0 | 390 | } |
michael@0 | 391 | |
michael@0 | 392 | static void copyCachedToObject(JSObject *dst, JSObject *src, gc::AllocKind kind) { |
michael@0 | 393 | js_memcpy(dst, src, gc::Arena::thingSize(kind)); |
michael@0 | 394 | #ifdef JSGC_GENERATIONAL |
michael@0 | 395 | Shape::writeBarrierPost(dst->shape_, &dst->shape_); |
michael@0 | 396 | types::TypeObject::writeBarrierPost(dst->type_, &dst->type_); |
michael@0 | 397 | #endif |
michael@0 | 398 | } |
michael@0 | 399 | }; |
michael@0 | 400 | |
michael@0 | 401 | /* |
michael@0 | 402 | * A FreeOp can do one thing: free memory. For convenience, it has delete_ |
michael@0 | 403 | * convenience methods that also call destructors. |
michael@0 | 404 | * |
michael@0 | 405 | * FreeOp is passed to finalizers and other sweep-phase hooks so that we do not |
michael@0 | 406 | * need to pass a JSContext to those hooks. |
michael@0 | 407 | */ |
michael@0 | 408 | class FreeOp : public JSFreeOp { |
michael@0 | 409 | bool shouldFreeLater_; |
michael@0 | 410 | |
michael@0 | 411 | public: |
michael@0 | 412 | static FreeOp *get(JSFreeOp *fop) { |
michael@0 | 413 | return static_cast<FreeOp *>(fop); |
michael@0 | 414 | } |
michael@0 | 415 | |
michael@0 | 416 | FreeOp(JSRuntime *rt, bool shouldFreeLater) |
michael@0 | 417 | : JSFreeOp(rt), |
michael@0 | 418 | shouldFreeLater_(shouldFreeLater) |
michael@0 | 419 | { |
michael@0 | 420 | } |
michael@0 | 421 | |
michael@0 | 422 | bool shouldFreeLater() const { |
michael@0 | 423 | return shouldFreeLater_; |
michael@0 | 424 | } |
michael@0 | 425 | |
michael@0 | 426 | inline void free_(void *p); |
michael@0 | 427 | |
michael@0 | 428 | template <class T> |
michael@0 | 429 | inline void delete_(T *p) { |
michael@0 | 430 | if (p) { |
michael@0 | 431 | p->~T(); |
michael@0 | 432 | free_(p); |
michael@0 | 433 | } |
michael@0 | 434 | } |
michael@0 | 435 | |
michael@0 | 436 | static void staticAsserts() { |
michael@0 | 437 | /* |
michael@0 | 438 | * Check that JSFreeOp is the first base class for FreeOp and we can |
michael@0 | 439 | * reinterpret a pointer to JSFreeOp as a pointer to FreeOp without |
michael@0 | 440 | * any offset adjustments. JSClass::finalize <-> Class::finalize depends |
michael@0 | 441 | * on this. |
michael@0 | 442 | */ |
michael@0 | 443 | JS_STATIC_ASSERT(offsetof(FreeOp, shouldFreeLater_) == sizeof(JSFreeOp)); |
michael@0 | 444 | } |
michael@0 | 445 | }; |
michael@0 | 446 | |
michael@0 | 447 | } /* namespace js */ |
michael@0 | 448 | |
michael@0 | 449 | namespace JS { |
michael@0 | 450 | struct RuntimeSizes; |
michael@0 | 451 | } |
michael@0 | 452 | |
michael@0 | 453 | /* Various built-in or commonly-used names pinned on first context. */ |
michael@0 | 454 | struct JSAtomState |
michael@0 | 455 | { |
michael@0 | 456 | #define PROPERTYNAME_FIELD(idpart, id, text) js::FixedHeapPtr<js::PropertyName> id; |
michael@0 | 457 | FOR_EACH_COMMON_PROPERTYNAME(PROPERTYNAME_FIELD) |
michael@0 | 458 | #undef PROPERTYNAME_FIELD |
michael@0 | 459 | #define PROPERTYNAME_FIELD(name, code, init, clasp) js::FixedHeapPtr<js::PropertyName> name; |
michael@0 | 460 | JS_FOR_EACH_PROTOTYPE(PROPERTYNAME_FIELD) |
michael@0 | 461 | #undef PROPERTYNAME_FIELD |
michael@0 | 462 | }; |
michael@0 | 463 | |
michael@0 | 464 | namespace js { |
michael@0 | 465 | |
michael@0 | 466 | #define NAME_OFFSET(name) offsetof(JSAtomState, name) |
michael@0 | 467 | |
michael@0 | 468 | inline HandlePropertyName |
michael@0 | 469 | AtomStateOffsetToName(const JSAtomState &atomState, size_t offset) |
michael@0 | 470 | { |
michael@0 | 471 | return *(js::FixedHeapPtr<js::PropertyName>*)((char*)&atomState + offset); |
michael@0 | 472 | } |
michael@0 | 473 | |
michael@0 | 474 | // There are several coarse locks in the enum below. These may be either |
michael@0 | 475 | // per-runtime or per-process. When acquiring more than one of these locks, |
michael@0 | 476 | // the acquisition must be done in the order below to avoid deadlocks. |
michael@0 | 477 | enum RuntimeLock { |
michael@0 | 478 | ExclusiveAccessLock, |
michael@0 | 479 | WorkerThreadStateLock, |
michael@0 | 480 | InterruptLock, |
michael@0 | 481 | GCLock |
michael@0 | 482 | }; |
michael@0 | 483 | |
michael@0 | 484 | #ifdef DEBUG |
michael@0 | 485 | void AssertCurrentThreadCanLock(RuntimeLock which); |
michael@0 | 486 | #else |
michael@0 | 487 | inline void AssertCurrentThreadCanLock(RuntimeLock which) {} |
michael@0 | 488 | #endif |
michael@0 | 489 | |
michael@0 | 490 | /* |
michael@0 | 491 | * Encapsulates portions of the runtime/context that are tied to a |
michael@0 | 492 | * single active thread. Instances of this structure can occur for |
michael@0 | 493 | * the main thread as |JSRuntime::mainThread|, for select operations |
michael@0 | 494 | * performed off thread, such as parsing, and for Parallel JS worker |
michael@0 | 495 | * threads. |
michael@0 | 496 | */ |
michael@0 | 497 | class PerThreadData : public PerThreadDataFriendFields |
michael@0 | 498 | { |
michael@0 | 499 | /* |
michael@0 | 500 | * Backpointer to the full shared JSRuntime* with which this |
michael@0 | 501 | * thread is associated. This is private because accessing the |
michael@0 | 502 | * fields of this runtime can provoke race conditions, so the |
michael@0 | 503 | * intention is that access will be mediated through safe |
michael@0 | 504 | * functions like |runtimeFromMainThread| and |associatedWith()| below. |
michael@0 | 505 | */ |
michael@0 | 506 | JSRuntime *runtime_; |
michael@0 | 507 | |
michael@0 | 508 | public: |
michael@0 | 509 | /* |
michael@0 | 510 | * We save all conservative scanned roots in this vector so that |
michael@0 | 511 | * conservative scanning can be "replayed" deterministically. In DEBUG mode, |
michael@0 | 512 | * this allows us to run a non-incremental GC after every incremental GC to |
michael@0 | 513 | * ensure that no objects were missed. |
michael@0 | 514 | */ |
michael@0 | 515 | #ifdef DEBUG |
michael@0 | 516 | struct SavedGCRoot { |
michael@0 | 517 | void *thing; |
michael@0 | 518 | JSGCTraceKind kind; |
michael@0 | 519 | |
michael@0 | 520 | SavedGCRoot(void *thing, JSGCTraceKind kind) : thing(thing), kind(kind) {} |
michael@0 | 521 | }; |
michael@0 | 522 | js::Vector<SavedGCRoot, 0, js::SystemAllocPolicy> gcSavedRoots; |
michael@0 | 523 | #endif |
michael@0 | 524 | |
michael@0 | 525 | /* |
michael@0 | 526 | * If Ion code is on the stack, and has called into C++, this will be |
michael@0 | 527 | * aligned to an Ion exit frame. |
michael@0 | 528 | */ |
michael@0 | 529 | uint8_t *ionTop; |
michael@0 | 530 | |
michael@0 | 531 | /* |
michael@0 | 532 | * The current JSContext when entering JIT code. This field may only be used |
michael@0 | 533 | * from JIT code and C++ directly called by JIT code (otherwise it may refer |
michael@0 | 534 | * to the wrong JSContext). |
michael@0 | 535 | */ |
michael@0 | 536 | JSContext *jitJSContext; |
michael@0 | 537 | |
michael@0 | 538 | /* |
michael@0 | 539 | * The stack limit checked by JIT code. This stack limit may be temporarily |
michael@0 | 540 | * set to null to force JIT code to exit (e.g., for the operation callback). |
michael@0 | 541 | */ |
michael@0 | 542 | uintptr_t jitStackLimit; |
michael@0 | 543 | |
michael@0 | 544 | inline void setJitStackLimit(uintptr_t limit); |
michael@0 | 545 | |
michael@0 | 546 | #ifdef JS_TRACE_LOGGING |
michael@0 | 547 | TraceLogger *traceLogger; |
michael@0 | 548 | #endif |
michael@0 | 549 | |
michael@0 | 550 | /* |
michael@0 | 551 | * asm.js maintains a stack of AsmJSModule activations (see AsmJS.h). This |
michael@0 | 552 | * stack is used by JSRuntime::requestInterrupt to stop long-running asm.js |
michael@0 | 553 | * without requiring dynamic polling operations in the generated |
michael@0 | 554 | * code. Since requestInterrupt may run on a separate thread than the |
michael@0 | 555 | * JSRuntime's owner thread all reads/writes must be synchronized (by |
michael@0 | 556 | * rt->interruptLock). |
michael@0 | 557 | */ |
michael@0 | 558 | private: |
michael@0 | 559 | friend class js::Activation; |
michael@0 | 560 | friend class js::ActivationIterator; |
michael@0 | 561 | friend class js::jit::JitActivation; |
michael@0 | 562 | friend class js::AsmJSActivation; |
michael@0 | 563 | #ifdef DEBUG |
michael@0 | 564 | friend void js::AssertCurrentThreadCanLock(RuntimeLock which); |
michael@0 | 565 | #endif |
michael@0 | 566 | |
michael@0 | 567 | /* |
michael@0 | 568 | * Points to the most recent activation running on the thread. |
michael@0 | 569 | * See Activation comment in vm/Stack.h. |
michael@0 | 570 | */ |
michael@0 | 571 | js::Activation *activation_; |
michael@0 | 572 | |
michael@0 | 573 | /* See AsmJSActivation comment. Protected by rt->interruptLock. */ |
michael@0 | 574 | js::AsmJSActivation *asmJSActivationStack_; |
michael@0 | 575 | |
michael@0 | 576 | /* Pointer to the current AutoFlushICache. */ |
michael@0 | 577 | js::jit::AutoFlushICache *autoFlushICache_; |
michael@0 | 578 | |
michael@0 | 579 | #ifdef JS_ARM_SIMULATOR |
michael@0 | 580 | js::jit::Simulator *simulator_; |
michael@0 | 581 | uintptr_t simulatorStackLimit_; |
michael@0 | 582 | #endif |
michael@0 | 583 | |
michael@0 | 584 | public: |
michael@0 | 585 | js::Activation *const *addressOfActivation() const { |
michael@0 | 586 | return &activation_; |
michael@0 | 587 | } |
michael@0 | 588 | static unsigned offsetOfAsmJSActivationStackReadOnly() { |
michael@0 | 589 | return offsetof(PerThreadData, asmJSActivationStack_); |
michael@0 | 590 | } |
michael@0 | 591 | static unsigned offsetOfActivation() { |
michael@0 | 592 | return offsetof(PerThreadData, activation_); |
michael@0 | 593 | } |
michael@0 | 594 | |
michael@0 | 595 | js::AsmJSActivation *asmJSActivationStackFromAnyThread() const { |
michael@0 | 596 | return asmJSActivationStack_; |
michael@0 | 597 | } |
michael@0 | 598 | js::AsmJSActivation *asmJSActivationStackFromOwnerThread() const { |
michael@0 | 599 | return asmJSActivationStack_; |
michael@0 | 600 | } |
michael@0 | 601 | |
michael@0 | 602 | js::Activation *activation() const { |
michael@0 | 603 | return activation_; |
michael@0 | 604 | } |
michael@0 | 605 | |
michael@0 | 606 | /* State used by jsdtoa.cpp. */ |
michael@0 | 607 | DtoaState *dtoaState; |
michael@0 | 608 | |
michael@0 | 609 | /* |
michael@0 | 610 | * When this flag is non-zero, any attempt to GC will be skipped. It is used |
michael@0 | 611 | * to suppress GC when reporting an OOM (see js_ReportOutOfMemory) and in |
michael@0 | 612 | * debugging facilities that cannot tolerate a GC and would rather OOM |
michael@0 | 613 | * immediately, such as utilities exposed to GDB. Setting this flag is |
michael@0 | 614 | * extremely dangerous and should only be used when in an OOM situation or |
michael@0 | 615 | * in non-exposed debugging facilities. |
michael@0 | 616 | */ |
michael@0 | 617 | int32_t suppressGC; |
michael@0 | 618 | |
michael@0 | 619 | // Number of active bytecode compilation on this thread. |
michael@0 | 620 | unsigned activeCompilations; |
michael@0 | 621 | |
michael@0 | 622 | PerThreadData(JSRuntime *runtime); |
michael@0 | 623 | ~PerThreadData(); |
michael@0 | 624 | |
michael@0 | 625 | bool init(); |
michael@0 | 626 | |
michael@0 | 627 | bool associatedWith(const JSRuntime *rt) { return runtime_ == rt; } |
michael@0 | 628 | inline JSRuntime *runtimeFromMainThread(); |
michael@0 | 629 | inline JSRuntime *runtimeIfOnOwnerThread(); |
michael@0 | 630 | |
michael@0 | 631 | inline bool exclusiveThreadsPresent(); |
michael@0 | 632 | inline void addActiveCompilation(); |
michael@0 | 633 | inline void removeActiveCompilation(); |
michael@0 | 634 | |
michael@0 | 635 | // For threads which may be associated with different runtimes, depending |
michael@0 | 636 | // on the work they are doing. |
michael@0 | 637 | class MOZ_STACK_CLASS AutoEnterRuntime |
michael@0 | 638 | { |
michael@0 | 639 | PerThreadData *pt; |
michael@0 | 640 | |
michael@0 | 641 | public: |
michael@0 | 642 | AutoEnterRuntime(PerThreadData *pt, JSRuntime *rt) |
michael@0 | 643 | : pt(pt) |
michael@0 | 644 | { |
michael@0 | 645 | JS_ASSERT(!pt->runtime_); |
michael@0 | 646 | pt->runtime_ = rt; |
michael@0 | 647 | } |
michael@0 | 648 | |
michael@0 | 649 | ~AutoEnterRuntime() { |
michael@0 | 650 | pt->runtime_ = nullptr; |
michael@0 | 651 | } |
michael@0 | 652 | }; |
michael@0 | 653 | |
michael@0 | 654 | js::jit::AutoFlushICache *autoFlushICache() const; |
michael@0 | 655 | void setAutoFlushICache(js::jit::AutoFlushICache *afc); |
michael@0 | 656 | |
michael@0 | 657 | #ifdef JS_ARM_SIMULATOR |
michael@0 | 658 | js::jit::Simulator *simulator() const; |
michael@0 | 659 | void setSimulator(js::jit::Simulator *sim); |
michael@0 | 660 | js::jit::SimulatorRuntime *simulatorRuntime() const; |
michael@0 | 661 | uintptr_t *addressOfSimulatorStackLimit(); |
michael@0 | 662 | #endif |
michael@0 | 663 | }; |
michael@0 | 664 | |
michael@0 | 665 | namespace gc { |
michael@0 | 666 | class MarkingValidator; |
michael@0 | 667 | } // namespace gc |
michael@0 | 668 | |
michael@0 | 669 | typedef Vector<JS::Zone *, 4, SystemAllocPolicy> ZoneVector; |
michael@0 | 670 | |
michael@0 | 671 | class AutoLockForExclusiveAccess; |
michael@0 | 672 | |
michael@0 | 673 | void RecomputeStackLimit(JSRuntime *rt, StackKind kind); |
michael@0 | 674 | |
michael@0 | 675 | } // namespace js |
michael@0 | 676 | |
michael@0 | 677 | struct JSRuntime : public JS::shadow::Runtime, |
michael@0 | 678 | public js::MallocProvider<JSRuntime> |
michael@0 | 679 | { |
michael@0 | 680 | /* |
michael@0 | 681 | * Per-thread data for the main thread that is associated with |
michael@0 | 682 | * this JSRuntime, as opposed to any worker threads used in |
michael@0 | 683 | * parallel sections. See definition of |PerThreadData| struct |
michael@0 | 684 | * above for more details. |
michael@0 | 685 | * |
michael@0 | 686 | * NB: This field is statically asserted to be at offset |
michael@0 | 687 | * sizeof(js::shadow::Runtime). See |
michael@0 | 688 | * PerThreadDataFriendFields::getMainThread. |
michael@0 | 689 | */ |
michael@0 | 690 | js::PerThreadData mainThread; |
michael@0 | 691 | |
michael@0 | 692 | /* |
michael@0 | 693 | * If non-null, another runtime guaranteed to outlive this one and whose |
michael@0 | 694 | * permanent data may be used by this one where possible. |
michael@0 | 695 | */ |
michael@0 | 696 | JSRuntime *parentRuntime; |
michael@0 | 697 | |
michael@0 | 698 | /* |
michael@0 | 699 | * If true, we've been asked to call the interrupt callback as soon as |
michael@0 | 700 | * possible. |
michael@0 | 701 | */ |
michael@0 | 702 | mozilla::Atomic<bool, mozilla::Relaxed> interrupt; |
michael@0 | 703 | |
michael@0 | 704 | #if defined(JS_THREADSAFE) && defined(JS_ION) |
michael@0 | 705 | /* |
michael@0 | 706 | * If non-zero, ForkJoin should service an interrupt. This is a separate |
michael@0 | 707 | * flag from |interrupt| because we cannot use the mprotect trick with PJS |
michael@0 | 708 | * code and ignore the TriggerCallbackAnyThreadDontStopIon trigger. |
michael@0 | 709 | */ |
michael@0 | 710 | mozilla::Atomic<bool, mozilla::Relaxed> interruptPar; |
michael@0 | 711 | #endif |
michael@0 | 712 | |
michael@0 | 713 | /* Set when handling a signal for a thread associated with this runtime. */ |
michael@0 | 714 | bool handlingSignal; |
michael@0 | 715 | |
michael@0 | 716 | JSInterruptCallback interruptCallback; |
michael@0 | 717 | |
michael@0 | 718 | #ifdef DEBUG |
michael@0 | 719 | void assertCanLock(js::RuntimeLock which); |
michael@0 | 720 | #else |
michael@0 | 721 | void assertCanLock(js::RuntimeLock which) {} |
michael@0 | 722 | #endif |
michael@0 | 723 | |
michael@0 | 724 | private: |
michael@0 | 725 | /* |
michael@0 | 726 | * Lock taken when triggering an interrupt from another thread. |
michael@0 | 727 | * Protects all data that is touched in this process. |
michael@0 | 728 | */ |
michael@0 | 729 | #ifdef JS_THREADSAFE |
michael@0 | 730 | PRLock *interruptLock; |
michael@0 | 731 | PRThread *interruptLockOwner; |
michael@0 | 732 | #else |
michael@0 | 733 | bool interruptLockTaken; |
michael@0 | 734 | #endif // JS_THREADSAFE |
michael@0 | 735 | public: |
michael@0 | 736 | |
michael@0 | 737 | class AutoLockForInterrupt { |
michael@0 | 738 | JSRuntime *rt; |
michael@0 | 739 | public: |
michael@0 | 740 | AutoLockForInterrupt(JSRuntime *rt MOZ_GUARD_OBJECT_NOTIFIER_PARAM) : rt(rt) { |
michael@0 | 741 | MOZ_GUARD_OBJECT_NOTIFIER_INIT; |
michael@0 | 742 | rt->assertCanLock(js::InterruptLock); |
michael@0 | 743 | #ifdef JS_THREADSAFE |
michael@0 | 744 | PR_Lock(rt->interruptLock); |
michael@0 | 745 | rt->interruptLockOwner = PR_GetCurrentThread(); |
michael@0 | 746 | #else |
michael@0 | 747 | rt->interruptLockTaken = true; |
michael@0 | 748 | #endif // JS_THREADSAFE |
michael@0 | 749 | } |
michael@0 | 750 | ~AutoLockForInterrupt() { |
michael@0 | 751 | JS_ASSERT(rt->currentThreadOwnsInterruptLock()); |
michael@0 | 752 | #ifdef JS_THREADSAFE |
michael@0 | 753 | rt->interruptLockOwner = nullptr; |
michael@0 | 754 | PR_Unlock(rt->interruptLock); |
michael@0 | 755 | #else |
michael@0 | 756 | rt->interruptLockTaken = false; |
michael@0 | 757 | #endif // JS_THREADSAFE |
michael@0 | 758 | } |
michael@0 | 759 | |
michael@0 | 760 | MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER |
michael@0 | 761 | }; |
michael@0 | 762 | |
michael@0 | 763 | bool currentThreadOwnsInterruptLock() { |
michael@0 | 764 | #if defined(JS_THREADSAFE) |
michael@0 | 765 | return interruptLockOwner == PR_GetCurrentThread(); |
michael@0 | 766 | #else |
michael@0 | 767 | return interruptLockTaken; |
michael@0 | 768 | #endif |
michael@0 | 769 | } |
michael@0 | 770 | |
michael@0 | 771 | #ifdef JS_THREADSAFE |
michael@0 | 772 | |
michael@0 | 773 | private: |
michael@0 | 774 | /* |
michael@0 | 775 | * Lock taken when using per-runtime or per-zone data that could otherwise |
michael@0 | 776 | * be accessed simultaneously by both the main thread and another thread |
michael@0 | 777 | * with an ExclusiveContext. |
michael@0 | 778 | * |
michael@0 | 779 | * Locking this only occurs if there is actually a thread other than the |
michael@0 | 780 | * main thread with an ExclusiveContext which could access such data. |
michael@0 | 781 | */ |
michael@0 | 782 | PRLock *exclusiveAccessLock; |
michael@0 | 783 | mozilla::DebugOnly<PRThread *> exclusiveAccessOwner; |
michael@0 | 784 | mozilla::DebugOnly<bool> mainThreadHasExclusiveAccess; |
michael@0 | 785 | |
michael@0 | 786 | /* Number of non-main threads with an ExclusiveContext. */ |
michael@0 | 787 | size_t numExclusiveThreads; |
michael@0 | 788 | |
michael@0 | 789 | friend class js::AutoLockForExclusiveAccess; |
michael@0 | 790 | |
michael@0 | 791 | public: |
michael@0 | 792 | void setUsedByExclusiveThread(JS::Zone *zone); |
michael@0 | 793 | void clearUsedByExclusiveThread(JS::Zone *zone); |
michael@0 | 794 | |
michael@0 | 795 | #endif // JS_THREADSAFE |
michael@0 | 796 | |
michael@0 | 797 | #ifdef DEBUG |
michael@0 | 798 | bool currentThreadHasExclusiveAccess() { |
michael@0 | 799 | #ifdef JS_THREADSAFE |
michael@0 | 800 | return (!numExclusiveThreads && mainThreadHasExclusiveAccess) || |
michael@0 | 801 | exclusiveAccessOwner == PR_GetCurrentThread(); |
michael@0 | 802 | #else |
michael@0 | 803 | return true; |
michael@0 | 804 | #endif |
michael@0 | 805 | } |
michael@0 | 806 | #endif // DEBUG |
michael@0 | 807 | |
michael@0 | 808 | bool exclusiveThreadsPresent() const { |
michael@0 | 809 | #ifdef JS_THREADSAFE |
michael@0 | 810 | return numExclusiveThreads > 0; |
michael@0 | 811 | #else |
michael@0 | 812 | return false; |
michael@0 | 813 | #endif |
michael@0 | 814 | } |
michael@0 | 815 | |
michael@0 | 816 | /* Embedders can use this zone however they wish. */ |
michael@0 | 817 | JS::Zone *systemZone; |
michael@0 | 818 | |
michael@0 | 819 | /* List of compartments and zones (protected by the GC lock). */ |
michael@0 | 820 | js::ZoneVector zones; |
michael@0 | 821 | |
michael@0 | 822 | /* How many compartments there are across all zones. */ |
michael@0 | 823 | size_t numCompartments; |
michael@0 | 824 | |
michael@0 | 825 | /* Locale-specific callbacks for string conversion. */ |
michael@0 | 826 | JSLocaleCallbacks *localeCallbacks; |
michael@0 | 827 | |
michael@0 | 828 | /* Default locale for Internationalization API */ |
michael@0 | 829 | char *defaultLocale; |
michael@0 | 830 | |
michael@0 | 831 | /* Default JSVersion. */ |
michael@0 | 832 | JSVersion defaultVersion_; |
michael@0 | 833 | |
michael@0 | 834 | #ifdef JS_THREADSAFE |
michael@0 | 835 | private: |
michael@0 | 836 | /* See comment for JS_AbortIfWrongThread in jsapi.h. */ |
michael@0 | 837 | void *ownerThread_; |
michael@0 | 838 | friend bool js::CurrentThreadCanAccessRuntime(JSRuntime *rt); |
michael@0 | 839 | public: |
michael@0 | 840 | #endif |
michael@0 | 841 | |
michael@0 | 842 | /* Temporary arena pool used while compiling and decompiling. */ |
michael@0 | 843 | static const size_t TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE = 4 * 1024; |
michael@0 | 844 | js::LifoAlloc tempLifoAlloc; |
michael@0 | 845 | |
michael@0 | 846 | /* |
michael@0 | 847 | * Free LIFO blocks are transferred to this allocator before being freed on |
michael@0 | 848 | * the background GC thread. |
michael@0 | 849 | */ |
michael@0 | 850 | js::LifoAlloc freeLifoAlloc; |
michael@0 | 851 | |
michael@0 | 852 | private: |
michael@0 | 853 | /* |
michael@0 | 854 | * Both of these allocators are used for regular expression code which is shared at the |
michael@0 | 855 | * thread-data level. |
michael@0 | 856 | */ |
michael@0 | 857 | JSC::ExecutableAllocator *execAlloc_; |
michael@0 | 858 | WTF::BumpPointerAllocator *bumpAlloc_; |
michael@0 | 859 | js::jit::JitRuntime *jitRuntime_; |
michael@0 | 860 | |
michael@0 | 861 | /* |
michael@0 | 862 | * Self-hosting state cloned on demand into other compartments. Shared with the parent |
michael@0 | 863 | * runtime if there is one. |
michael@0 | 864 | */ |
michael@0 | 865 | JSObject *selfHostingGlobal_; |
michael@0 | 866 | |
michael@0 | 867 | /* Space for interpreter frames. */ |
michael@0 | 868 | js::InterpreterStack interpreterStack_; |
michael@0 | 869 | |
michael@0 | 870 | JSC::ExecutableAllocator *createExecutableAllocator(JSContext *cx); |
michael@0 | 871 | WTF::BumpPointerAllocator *createBumpPointerAllocator(JSContext *cx); |
michael@0 | 872 | js::jit::JitRuntime *createJitRuntime(JSContext *cx); |
michael@0 | 873 | |
michael@0 | 874 | public: |
michael@0 | 875 | JSC::ExecutableAllocator *getExecAlloc(JSContext *cx) { |
michael@0 | 876 | return execAlloc_ ? execAlloc_ : createExecutableAllocator(cx); |
michael@0 | 877 | } |
michael@0 | 878 | JSC::ExecutableAllocator &execAlloc() { |
michael@0 | 879 | JS_ASSERT(execAlloc_); |
michael@0 | 880 | return *execAlloc_; |
michael@0 | 881 | } |
michael@0 | 882 | JSC::ExecutableAllocator *maybeExecAlloc() { |
michael@0 | 883 | return execAlloc_; |
michael@0 | 884 | } |
michael@0 | 885 | WTF::BumpPointerAllocator *getBumpPointerAllocator(JSContext *cx) { |
michael@0 | 886 | return bumpAlloc_ ? bumpAlloc_ : createBumpPointerAllocator(cx); |
michael@0 | 887 | } |
michael@0 | 888 | js::jit::JitRuntime *getJitRuntime(JSContext *cx) { |
michael@0 | 889 | return jitRuntime_ ? jitRuntime_ : createJitRuntime(cx); |
michael@0 | 890 | } |
michael@0 | 891 | js::jit::JitRuntime *jitRuntime() const { |
michael@0 | 892 | return jitRuntime_; |
michael@0 | 893 | } |
michael@0 | 894 | bool hasJitRuntime() const { |
michael@0 | 895 | return !!jitRuntime_; |
michael@0 | 896 | } |
michael@0 | 897 | js::InterpreterStack &interpreterStack() { |
michael@0 | 898 | return interpreterStack_; |
michael@0 | 899 | } |
michael@0 | 900 | |
michael@0 | 901 | //------------------------------------------------------------------------- |
michael@0 | 902 | // Self-hosting support |
michael@0 | 903 | //------------------------------------------------------------------------- |
michael@0 | 904 | |
michael@0 | 905 | bool initSelfHosting(JSContext *cx); |
michael@0 | 906 | void finishSelfHosting(); |
michael@0 | 907 | void markSelfHostingGlobal(JSTracer *trc); |
michael@0 | 908 | bool isSelfHostingGlobal(JSObject *global) { |
michael@0 | 909 | return global == selfHostingGlobal_; |
michael@0 | 910 | } |
michael@0 | 911 | bool isSelfHostingCompartment(JSCompartment *comp); |
michael@0 | 912 | bool cloneSelfHostedFunctionScript(JSContext *cx, js::Handle<js::PropertyName*> name, |
michael@0 | 913 | js::Handle<JSFunction*> targetFun); |
michael@0 | 914 | bool cloneSelfHostedValue(JSContext *cx, js::Handle<js::PropertyName*> name, |
michael@0 | 915 | js::MutableHandleValue vp); |
michael@0 | 916 | |
michael@0 | 917 | //------------------------------------------------------------------------- |
michael@0 | 918 | // Locale information |
michael@0 | 919 | //------------------------------------------------------------------------- |
michael@0 | 920 | |
michael@0 | 921 | /* |
michael@0 | 922 | * Set the default locale for the ECMAScript Internationalization API |
michael@0 | 923 | * (Intl.Collator, Intl.NumberFormat, Intl.DateTimeFormat). |
michael@0 | 924 | * Note that the Internationalization API encourages clients to |
michael@0 | 925 | * specify their own locales. |
michael@0 | 926 | * The locale string remains owned by the caller. |
michael@0 | 927 | */ |
michael@0 | 928 | bool setDefaultLocale(const char *locale); |
michael@0 | 929 | |
michael@0 | 930 | /* Reset the default locale to OS defaults. */ |
michael@0 | 931 | void resetDefaultLocale(); |
michael@0 | 932 | |
michael@0 | 933 | /* Gets current default locale. String remains owned by context. */ |
michael@0 | 934 | const char *getDefaultLocale(); |
michael@0 | 935 | |
michael@0 | 936 | JSVersion defaultVersion() { return defaultVersion_; } |
michael@0 | 937 | void setDefaultVersion(JSVersion v) { defaultVersion_ = v; } |
michael@0 | 938 | |
michael@0 | 939 | /* Base address of the native stack for the current thread. */ |
michael@0 | 940 | uintptr_t nativeStackBase; |
michael@0 | 941 | |
michael@0 | 942 | /* The native stack size limit that runtime should not exceed. */ |
michael@0 | 943 | size_t nativeStackQuota[js::StackKindCount]; |
michael@0 | 944 | |
michael@0 | 945 | /* Context create/destroy callback. */ |
michael@0 | 946 | JSContextCallback cxCallback; |
michael@0 | 947 | void *cxCallbackData; |
michael@0 | 948 | |
michael@0 | 949 | /* Compartment destroy callback. */ |
michael@0 | 950 | JSDestroyCompartmentCallback destroyCompartmentCallback; |
michael@0 | 951 | |
michael@0 | 952 | /* Zone destroy callback. */ |
michael@0 | 953 | JSZoneCallback destroyZoneCallback; |
michael@0 | 954 | |
michael@0 | 955 | /* Zone sweep callback. */ |
michael@0 | 956 | JSZoneCallback sweepZoneCallback; |
michael@0 | 957 | |
michael@0 | 958 | /* Call this to get the name of a compartment. */ |
michael@0 | 959 | JSCompartmentNameCallback compartmentNameCallback; |
michael@0 | 960 | |
michael@0 | 961 | js::ActivityCallback activityCallback; |
michael@0 | 962 | void *activityCallbackArg; |
michael@0 | 963 | void triggerActivityCallback(bool active); |
michael@0 | 964 | |
michael@0 | 965 | #ifdef JS_THREADSAFE |
michael@0 | 966 | /* The request depth for this thread. */ |
michael@0 | 967 | unsigned requestDepth; |
michael@0 | 968 | |
michael@0 | 969 | # ifdef DEBUG |
michael@0 | 970 | unsigned checkRequestDepth; |
michael@0 | 971 | # endif |
michael@0 | 972 | #endif |
michael@0 | 973 | |
michael@0 | 974 | #ifdef DEBUG |
michael@0 | 975 | /* |
michael@0 | 976 | * To help embedders enforce their invariants, we allow them to specify in |
michael@0 | 977 | * advance which JSContext should be passed to JSAPI calls. If this is set |
michael@0 | 978 | * to a non-null value, the assertSameCompartment machinery does double- |
michael@0 | 979 | * duty (in debug builds) to verify that it matches the cx being used. |
michael@0 | 980 | */ |
michael@0 | 981 | JSContext *activeContext; |
michael@0 | 982 | #endif |
michael@0 | 983 | |
michael@0 | 984 | /* Garbage collector state, used by jsgc.c. */ |
michael@0 | 985 | |
michael@0 | 986 | /* Garbase collector state has been sucessfully initialized. */ |
michael@0 | 987 | bool gcInitialized; |
michael@0 | 988 | |
michael@0 | 989 | /* |
michael@0 | 990 | * Set of all GC chunks with at least one allocated thing. The |
michael@0 | 991 | * conservative GC uses it to quickly check if a possible GC thing points |
michael@0 | 992 | * into an allocated chunk. |
michael@0 | 993 | */ |
michael@0 | 994 | js::GCChunkSet gcChunkSet; |
michael@0 | 995 | |
michael@0 | 996 | /* |
michael@0 | 997 | * Doubly-linked lists of chunks from user and system compartments. The GC |
michael@0 | 998 | * allocates its arenas from the corresponding list and when all arenas |
michael@0 | 999 | * in the list head are taken, then the chunk is removed from the list. |
michael@0 | 1000 | * During the GC when all arenas in a chunk become free, that chunk is |
michael@0 | 1001 | * removed from the list and scheduled for release. |
michael@0 | 1002 | */ |
michael@0 | 1003 | js::gc::Chunk *gcSystemAvailableChunkListHead; |
michael@0 | 1004 | js::gc::Chunk *gcUserAvailableChunkListHead; |
michael@0 | 1005 | js::gc::ChunkPool gcChunkPool; |
michael@0 | 1006 | |
michael@0 | 1007 | js::RootedValueMap gcRootsHash; |
michael@0 | 1008 | |
michael@0 | 1009 | /* This is updated by both the main and GC helper threads. */ |
michael@0 | 1010 | mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcBytes; |
michael@0 | 1011 | |
michael@0 | 1012 | size_t gcMaxBytes; |
michael@0 | 1013 | size_t gcMaxMallocBytes; |
michael@0 | 1014 | |
michael@0 | 1015 | /* |
michael@0 | 1016 | * Number of the committed arenas in all GC chunks including empty chunks. |
michael@0 | 1017 | */ |
michael@0 | 1018 | mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcNumArenasFreeCommitted; |
michael@0 | 1019 | js::GCMarker gcMarker; |
michael@0 | 1020 | void *gcVerifyPreData; |
michael@0 | 1021 | void *gcVerifyPostData; |
michael@0 | 1022 | bool gcChunkAllocationSinceLastGC; |
michael@0 | 1023 | int64_t gcNextFullGCTime; |
michael@0 | 1024 | int64_t gcLastGCTime; |
michael@0 | 1025 | int64_t gcJitReleaseTime; |
michael@0 | 1026 | private: |
michael@0 | 1027 | JSGCMode gcMode_; |
michael@0 | 1028 | |
michael@0 | 1029 | public: |
michael@0 | 1030 | JSGCMode gcMode() const { return gcMode_; } |
michael@0 | 1031 | void setGCMode(JSGCMode mode) { |
michael@0 | 1032 | gcMode_ = mode; |
michael@0 | 1033 | gcMarker.setGCMode(mode); |
michael@0 | 1034 | } |
michael@0 | 1035 | |
michael@0 | 1036 | size_t gcAllocationThreshold; |
michael@0 | 1037 | bool gcHighFrequencyGC; |
michael@0 | 1038 | uint64_t gcHighFrequencyTimeThreshold; |
michael@0 | 1039 | uint64_t gcHighFrequencyLowLimitBytes; |
michael@0 | 1040 | uint64_t gcHighFrequencyHighLimitBytes; |
michael@0 | 1041 | double gcHighFrequencyHeapGrowthMax; |
michael@0 | 1042 | double gcHighFrequencyHeapGrowthMin; |
michael@0 | 1043 | double gcLowFrequencyHeapGrowth; |
michael@0 | 1044 | bool gcDynamicHeapGrowth; |
michael@0 | 1045 | bool gcDynamicMarkSlice; |
michael@0 | 1046 | uint64_t gcDecommitThreshold; |
michael@0 | 1047 | |
michael@0 | 1048 | /* During shutdown, the GC needs to clean up every possible object. */ |
michael@0 | 1049 | bool gcShouldCleanUpEverything; |
michael@0 | 1050 | |
michael@0 | 1051 | /* |
michael@0 | 1052 | * The gray bits can become invalid if UnmarkGray overflows the stack. A |
michael@0 | 1053 | * full GC will reset this bit, since it fills in all the gray bits. |
michael@0 | 1054 | */ |
michael@0 | 1055 | bool gcGrayBitsValid; |
michael@0 | 1056 | |
michael@0 | 1057 | /* |
michael@0 | 1058 | * These flags must be kept separate so that a thread requesting a |
michael@0 | 1059 | * compartment GC doesn't cancel another thread's concurrent request for a |
michael@0 | 1060 | * full GC. |
michael@0 | 1061 | */ |
michael@0 | 1062 | volatile uintptr_t gcIsNeeded; |
michael@0 | 1063 | |
michael@0 | 1064 | js::gcstats::Statistics gcStats; |
michael@0 | 1065 | |
michael@0 | 1066 | /* Incremented on every GC slice. */ |
michael@0 | 1067 | uint64_t gcNumber; |
michael@0 | 1068 | |
michael@0 | 1069 | /* The gcNumber at the time of the most recent GC's first slice. */ |
michael@0 | 1070 | uint64_t gcStartNumber; |
michael@0 | 1071 | |
michael@0 | 1072 | /* Whether the currently running GC can finish in multiple slices. */ |
michael@0 | 1073 | bool gcIsIncremental; |
michael@0 | 1074 | |
michael@0 | 1075 | /* Whether all compartments are being collected in first GC slice. */ |
michael@0 | 1076 | bool gcIsFull; |
michael@0 | 1077 | |
michael@0 | 1078 | /* The reason that an interrupt-triggered GC should be called. */ |
michael@0 | 1079 | JS::gcreason::Reason gcTriggerReason; |
michael@0 | 1080 | |
michael@0 | 1081 | /* |
michael@0 | 1082 | * If this is true, all marked objects must belong to a compartment being |
michael@0 | 1083 | * GCed. This is used to look for compartment bugs. |
michael@0 | 1084 | */ |
michael@0 | 1085 | bool gcStrictCompartmentChecking; |
michael@0 | 1086 | |
michael@0 | 1087 | #ifdef DEBUG |
michael@0 | 1088 | /* |
michael@0 | 1089 | * If this is 0, all cross-compartment proxies must be registered in the |
michael@0 | 1090 | * wrapper map. This checking must be disabled temporarily while creating |
michael@0 | 1091 | * new wrappers. When non-zero, this records the recursion depth of wrapper |
michael@0 | 1092 | * creation. |
michael@0 | 1093 | */ |
michael@0 | 1094 | uintptr_t gcDisableStrictProxyCheckingCount; |
michael@0 | 1095 | #else |
michael@0 | 1096 | uintptr_t unused1; |
michael@0 | 1097 | #endif |
michael@0 | 1098 | |
michael@0 | 1099 | /* |
michael@0 | 1100 | * The current incremental GC phase. This is also used internally in |
michael@0 | 1101 | * non-incremental GC. |
michael@0 | 1102 | */ |
michael@0 | 1103 | js::gc::State gcIncrementalState; |
michael@0 | 1104 | |
michael@0 | 1105 | /* Indicates that the last incremental slice exhausted the mark stack. */ |
michael@0 | 1106 | bool gcLastMarkSlice; |
michael@0 | 1107 | |
michael@0 | 1108 | /* Whether any sweeping will take place in the separate GC helper thread. */ |
michael@0 | 1109 | bool gcSweepOnBackgroundThread; |
michael@0 | 1110 | |
michael@0 | 1111 | /* Whether any black->gray edges were found during marking. */ |
michael@0 | 1112 | bool gcFoundBlackGrayEdges; |
michael@0 | 1113 | |
michael@0 | 1114 | /* List head of zones to be swept in the background. */ |
michael@0 | 1115 | JS::Zone *gcSweepingZones; |
michael@0 | 1116 | |
michael@0 | 1117 | /* Index of current zone group (for stats). */ |
michael@0 | 1118 | unsigned gcZoneGroupIndex; |
michael@0 | 1119 | |
michael@0 | 1120 | /* |
michael@0 | 1121 | * Incremental sweep state. |
michael@0 | 1122 | */ |
michael@0 | 1123 | JS::Zone *gcZoneGroups; |
michael@0 | 1124 | JS::Zone *gcCurrentZoneGroup; |
michael@0 | 1125 | int gcSweepPhase; |
michael@0 | 1126 | JS::Zone *gcSweepZone; |
michael@0 | 1127 | int gcSweepKindIndex; |
michael@0 | 1128 | bool gcAbortSweepAfterCurrentGroup; |
michael@0 | 1129 | |
michael@0 | 1130 | /* |
michael@0 | 1131 | * List head of arenas allocated during the sweep phase. |
michael@0 | 1132 | */ |
michael@0 | 1133 | js::gc::ArenaHeader *gcArenasAllocatedDuringSweep; |
michael@0 | 1134 | |
michael@0 | 1135 | #ifdef DEBUG |
michael@0 | 1136 | js::gc::MarkingValidator *gcMarkingValidator; |
michael@0 | 1137 | #endif |
michael@0 | 1138 | |
michael@0 | 1139 | /* |
michael@0 | 1140 | * Indicates that a GC slice has taken place in the middle of an animation |
michael@0 | 1141 | * frame, rather than at the beginning. In this case, the next slice will be |
michael@0 | 1142 | * delayed so that we don't get back-to-back slices. |
michael@0 | 1143 | */ |
michael@0 | 1144 | volatile uintptr_t gcInterFrameGC; |
michael@0 | 1145 | |
michael@0 | 1146 | /* Default budget for incremental GC slice. See SliceBudget in jsgc.h. */ |
michael@0 | 1147 | int64_t gcSliceBudget; |
michael@0 | 1148 | |
michael@0 | 1149 | /* |
michael@0 | 1150 | * We disable incremental GC if we encounter a js::Class with a trace hook |
michael@0 | 1151 | * that does not implement write barriers. |
michael@0 | 1152 | */ |
michael@0 | 1153 | bool gcIncrementalEnabled; |
michael@0 | 1154 | |
michael@0 | 1155 | /* |
michael@0 | 1156 | * GGC can be enabled from the command line while testing. |
michael@0 | 1157 | */ |
michael@0 | 1158 | unsigned gcGenerationalDisabled; |
michael@0 | 1159 | |
michael@0 | 1160 | /* |
michael@0 | 1161 | * This is true if we are in the middle of a brain transplant (e.g., |
michael@0 | 1162 | * JS_TransplantObject) or some other operation that can manipulate |
michael@0 | 1163 | * dead zones. |
michael@0 | 1164 | */ |
michael@0 | 1165 | bool gcManipulatingDeadZones; |
michael@0 | 1166 | |
michael@0 | 1167 | /* |
michael@0 | 1168 | * This field is incremented each time we mark an object inside a |
michael@0 | 1169 | * zone with no incoming cross-compartment pointers. Typically if |
michael@0 | 1170 | * this happens it signals that an incremental GC is marking too much |
michael@0 | 1171 | * stuff. At various times we check this counter and, if it has changed, we |
michael@0 | 1172 | * run an immediate, non-incremental GC to clean up the dead |
michael@0 | 1173 | * zones. This should happen very rarely. |
michael@0 | 1174 | */ |
michael@0 | 1175 | unsigned gcObjectsMarkedInDeadZones; |
michael@0 | 1176 | |
michael@0 | 1177 | bool gcPoke; |
michael@0 | 1178 | |
michael@0 | 1179 | volatile js::HeapState heapState; |
michael@0 | 1180 | |
michael@0 | 1181 | bool isHeapBusy() { return heapState != js::Idle; } |
michael@0 | 1182 | bool isHeapMajorCollecting() { return heapState == js::MajorCollecting; } |
michael@0 | 1183 | bool isHeapMinorCollecting() { return heapState == js::MinorCollecting; } |
michael@0 | 1184 | bool isHeapCollecting() { return isHeapMajorCollecting() || isHeapMinorCollecting(); } |
michael@0 | 1185 | |
michael@0 | 1186 | #ifdef JSGC_GENERATIONAL |
michael@0 | 1187 | js::Nursery gcNursery; |
michael@0 | 1188 | js::gc::StoreBuffer gcStoreBuffer; |
michael@0 | 1189 | #endif |
michael@0 | 1190 | |
michael@0 | 1191 | /* |
michael@0 | 1192 | * These options control the zealousness of the GC. The fundamental values |
michael@0 | 1193 | * are gcNextScheduled and gcDebugCompartmentGC. At every allocation, |
michael@0 | 1194 | * gcNextScheduled is decremented. When it reaches zero, we do either a |
michael@0 | 1195 | * full or a compartmental GC, based on gcDebugCompartmentGC. |
michael@0 | 1196 | * |
michael@0 | 1197 | * At this point, if gcZeal_ is one of the types that trigger periodic |
michael@0 | 1198 | * collection, then gcNextScheduled is reset to the value of |
michael@0 | 1199 | * gcZealFrequency. Otherwise, no additional GCs take place. |
michael@0 | 1200 | * |
michael@0 | 1201 | * You can control these values in several ways: |
michael@0 | 1202 | * - Pass the -Z flag to the shell (see the usage info for details) |
michael@0 | 1203 | * - Call gczeal() or schedulegc() from inside shell-executed JS code |
michael@0 | 1204 | * (see the help for details) |
michael@0 | 1205 | * |
michael@0 | 1206 | * If gzZeal_ == 1 then we perform GCs in select places (during MaybeGC and |
michael@0 | 1207 | * whenever a GC poke happens). This option is mainly useful to embedders. |
michael@0 | 1208 | * |
michael@0 | 1209 | * We use gcZeal_ == 4 to enable write barrier verification. See the comment |
michael@0 | 1210 | * in jsgc.cpp for more information about this. |
michael@0 | 1211 | * |
michael@0 | 1212 | * gcZeal_ values from 8 to 10 periodically run different types of |
michael@0 | 1213 | * incremental GC. |
michael@0 | 1214 | */ |
michael@0 | 1215 | #ifdef JS_GC_ZEAL |
michael@0 | 1216 | int gcZeal_; |
michael@0 | 1217 | int gcZealFrequency; |
michael@0 | 1218 | int gcNextScheduled; |
michael@0 | 1219 | bool gcDeterministicOnly; |
michael@0 | 1220 | int gcIncrementalLimit; |
michael@0 | 1221 | |
michael@0 | 1222 | js::Vector<JSObject *, 0, js::SystemAllocPolicy> gcSelectedForMarking; |
michael@0 | 1223 | |
michael@0 | 1224 | int gcZeal() { return gcZeal_; } |
michael@0 | 1225 | |
michael@0 | 1226 | bool upcomingZealousGC() { |
michael@0 | 1227 | return gcNextScheduled == 1; |
michael@0 | 1228 | } |
michael@0 | 1229 | |
michael@0 | 1230 | bool needZealousGC() { |
michael@0 | 1231 | if (gcNextScheduled > 0 && --gcNextScheduled == 0) { |
michael@0 | 1232 | if (gcZeal() == js::gc::ZealAllocValue || |
michael@0 | 1233 | gcZeal() == js::gc::ZealGenerationalGCValue || |
michael@0 | 1234 | (gcZeal() >= js::gc::ZealIncrementalRootsThenFinish && |
michael@0 | 1235 | gcZeal() <= js::gc::ZealIncrementalMultipleSlices)) |
michael@0 | 1236 | { |
michael@0 | 1237 | gcNextScheduled = gcZealFrequency; |
michael@0 | 1238 | } |
michael@0 | 1239 | return true; |
michael@0 | 1240 | } |
michael@0 | 1241 | return false; |
michael@0 | 1242 | } |
michael@0 | 1243 | #else |
michael@0 | 1244 | int gcZeal() { return 0; } |
michael@0 | 1245 | bool upcomingZealousGC() { return false; } |
michael@0 | 1246 | bool needZealousGC() { return false; } |
michael@0 | 1247 | #endif |
michael@0 | 1248 | |
michael@0 | 1249 | bool gcValidate; |
michael@0 | 1250 | bool gcFullCompartmentChecks; |
michael@0 | 1251 | |
michael@0 | 1252 | JSGCCallback gcCallback; |
michael@0 | 1253 | JS::GCSliceCallback gcSliceCallback; |
michael@0 | 1254 | JSFinalizeCallback gcFinalizeCallback; |
michael@0 | 1255 | |
michael@0 | 1256 | void *gcCallbackData; |
michael@0 | 1257 | |
michael@0 | 1258 | private: |
michael@0 | 1259 | /* |
michael@0 | 1260 | * Malloc counter to measure memory pressure for GC scheduling. It runs |
michael@0 | 1261 | * from gcMaxMallocBytes down to zero. |
michael@0 | 1262 | */ |
michael@0 | 1263 | mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> gcMallocBytes; |
michael@0 | 1264 | |
michael@0 | 1265 | /* |
michael@0 | 1266 | * Whether a GC has been triggered as a result of gcMallocBytes falling |
michael@0 | 1267 | * below zero. |
michael@0 | 1268 | */ |
michael@0 | 1269 | mozilla::Atomic<bool, mozilla::ReleaseAcquire> gcMallocGCTriggered; |
michael@0 | 1270 | |
michael@0 | 1271 | #ifdef JS_ARM_SIMULATOR |
michael@0 | 1272 | js::jit::SimulatorRuntime *simulatorRuntime_; |
michael@0 | 1273 | #endif |
michael@0 | 1274 | |
michael@0 | 1275 | public: |
michael@0 | 1276 | void setNeedsBarrier(bool needs) { |
michael@0 | 1277 | needsBarrier_ = needs; |
michael@0 | 1278 | } |
michael@0 | 1279 | |
michael@0 | 1280 | struct ExtraTracer { |
michael@0 | 1281 | JSTraceDataOp op; |
michael@0 | 1282 | void *data; |
michael@0 | 1283 | |
michael@0 | 1284 | ExtraTracer() |
michael@0 | 1285 | : op(nullptr), data(nullptr) |
michael@0 | 1286 | {} |
michael@0 | 1287 | ExtraTracer(JSTraceDataOp op, void *data) |
michael@0 | 1288 | : op(op), data(data) |
michael@0 | 1289 | {} |
michael@0 | 1290 | }; |
michael@0 | 1291 | |
michael@0 | 1292 | #ifdef JS_ARM_SIMULATOR |
michael@0 | 1293 | js::jit::SimulatorRuntime *simulatorRuntime() const; |
michael@0 | 1294 | void setSimulatorRuntime(js::jit::SimulatorRuntime *srt); |
michael@0 | 1295 | #endif |
michael@0 | 1296 | |
michael@0 | 1297 | /* |
michael@0 | 1298 | * The trace operations to trace embedding-specific GC roots. One is for |
michael@0 | 1299 | * tracing through black roots and the other is for tracing through gray |
michael@0 | 1300 | * roots. The black/gray distinction is only relevant to the cycle |
michael@0 | 1301 | * collector. |
michael@0 | 1302 | */ |
michael@0 | 1303 | typedef js::Vector<ExtraTracer, 4, js::SystemAllocPolicy> ExtraTracerVector; |
michael@0 | 1304 | ExtraTracerVector gcBlackRootTracers; |
michael@0 | 1305 | ExtraTracer gcGrayRootTracer; |
michael@0 | 1306 | |
michael@0 | 1307 | /* |
michael@0 | 1308 | * The GC can only safely decommit memory when the page size of the |
michael@0 | 1309 | * running process matches the compiled arena size. |
michael@0 | 1310 | */ |
michael@0 | 1311 | size_t gcSystemPageSize; |
michael@0 | 1312 | |
michael@0 | 1313 | /* The OS allocation granularity may not match the page size. */ |
michael@0 | 1314 | size_t gcSystemAllocGranularity; |
michael@0 | 1315 | |
michael@0 | 1316 | /* Strong references on scripts held for PCCount profiling API. */ |
michael@0 | 1317 | js::ScriptAndCountsVector *scriptAndCountsVector; |
michael@0 | 1318 | |
michael@0 | 1319 | /* Well-known numbers held for use by this runtime's contexts. */ |
michael@0 | 1320 | const js::Value NaNValue; |
michael@0 | 1321 | const js::Value negativeInfinityValue; |
michael@0 | 1322 | const js::Value positiveInfinityValue; |
michael@0 | 1323 | |
michael@0 | 1324 | js::PropertyName *emptyString; |
michael@0 | 1325 | |
michael@0 | 1326 | /* List of active contexts sharing this runtime. */ |
michael@0 | 1327 | mozilla::LinkedList<JSContext> contextList; |
michael@0 | 1328 | |
michael@0 | 1329 | bool hasContexts() const { |
michael@0 | 1330 | return !contextList.isEmpty(); |
michael@0 | 1331 | } |
michael@0 | 1332 | |
michael@0 | 1333 | mozilla::ScopedDeletePtr<js::SourceHook> sourceHook; |
michael@0 | 1334 | |
michael@0 | 1335 | /* Per runtime debug hooks -- see js/OldDebugAPI.h. */ |
michael@0 | 1336 | JSDebugHooks debugHooks; |
michael@0 | 1337 | |
michael@0 | 1338 | /* If true, new compartments are initially in debug mode. */ |
michael@0 | 1339 | bool debugMode; |
michael@0 | 1340 | |
michael@0 | 1341 | /* SPS profiling metadata */ |
michael@0 | 1342 | js::SPSProfiler spsProfiler; |
michael@0 | 1343 | |
michael@0 | 1344 | /* If true, new scripts must be created with PC counter information. */ |
michael@0 | 1345 | bool profilingScripts; |
michael@0 | 1346 | |
michael@0 | 1347 | /* Always preserve JIT code during GCs, for testing. */ |
michael@0 | 1348 | bool alwaysPreserveCode; |
michael@0 | 1349 | |
michael@0 | 1350 | /* Had an out-of-memory error which did not populate an exception. */ |
michael@0 | 1351 | bool hadOutOfMemory; |
michael@0 | 1352 | |
michael@0 | 1353 | /* A context has been created on this runtime. */ |
michael@0 | 1354 | bool haveCreatedContext; |
michael@0 | 1355 | |
michael@0 | 1356 | /* Linked list of all Debugger objects in the runtime. */ |
michael@0 | 1357 | mozilla::LinkedList<js::Debugger> debuggerList; |
michael@0 | 1358 | |
michael@0 | 1359 | /* |
michael@0 | 1360 | * Head of circular list of all enabled Debuggers that have |
michael@0 | 1361 | * onNewGlobalObject handler methods established. |
michael@0 | 1362 | */ |
michael@0 | 1363 | JSCList onNewGlobalObjectWatchers; |
michael@0 | 1364 | |
michael@0 | 1365 | /* Client opaque pointers */ |
michael@0 | 1366 | void *data; |
michael@0 | 1367 | |
michael@0 | 1368 | private: |
michael@0 | 1369 | /* Synchronize GC heap access between main thread and GCHelperThread. */ |
michael@0 | 1370 | PRLock *gcLock; |
michael@0 | 1371 | mozilla::DebugOnly<PRThread *> gcLockOwner; |
michael@0 | 1372 | |
michael@0 | 1373 | friend class js::GCHelperThread; |
michael@0 | 1374 | public: |
michael@0 | 1375 | |
michael@0 | 1376 | void lockGC() { |
michael@0 | 1377 | #ifdef JS_THREADSAFE |
michael@0 | 1378 | assertCanLock(js::GCLock); |
michael@0 | 1379 | PR_Lock(gcLock); |
michael@0 | 1380 | JS_ASSERT(!gcLockOwner); |
michael@0 | 1381 | #ifdef DEBUG |
michael@0 | 1382 | gcLockOwner = PR_GetCurrentThread(); |
michael@0 | 1383 | #endif |
michael@0 | 1384 | #endif |
michael@0 | 1385 | } |
michael@0 | 1386 | |
michael@0 | 1387 | void unlockGC() { |
michael@0 | 1388 | #ifdef JS_THREADSAFE |
michael@0 | 1389 | JS_ASSERT(gcLockOwner == PR_GetCurrentThread()); |
michael@0 | 1390 | gcLockOwner = nullptr; |
michael@0 | 1391 | PR_Unlock(gcLock); |
michael@0 | 1392 | #endif |
michael@0 | 1393 | } |
michael@0 | 1394 | |
michael@0 | 1395 | js::GCHelperThread gcHelperThread; |
michael@0 | 1396 | |
michael@0 | 1397 | #if defined(XP_MACOSX) && defined(JS_ION) |
michael@0 | 1398 | js::AsmJSMachExceptionHandler asmJSMachExceptionHandler; |
michael@0 | 1399 | #endif |
michael@0 | 1400 | |
michael@0 | 1401 | // Whether asm.js signal handlers have been installed and can be used for |
michael@0 | 1402 | // performing interrupt checks in loops. |
michael@0 | 1403 | private: |
michael@0 | 1404 | bool signalHandlersInstalled_; |
michael@0 | 1405 | public: |
michael@0 | 1406 | bool signalHandlersInstalled() const { |
michael@0 | 1407 | return signalHandlersInstalled_; |
michael@0 | 1408 | } |
michael@0 | 1409 | |
michael@0 | 1410 | private: |
michael@0 | 1411 | js::FreeOp defaultFreeOp_; |
michael@0 | 1412 | |
michael@0 | 1413 | public: |
michael@0 | 1414 | js::FreeOp *defaultFreeOp() { |
michael@0 | 1415 | return &defaultFreeOp_; |
michael@0 | 1416 | } |
michael@0 | 1417 | |
michael@0 | 1418 | uint32_t debuggerMutations; |
michael@0 | 1419 | |
michael@0 | 1420 | const JSSecurityCallbacks *securityCallbacks; |
michael@0 | 1421 | const js::DOMCallbacks *DOMcallbacks; |
michael@0 | 1422 | JSDestroyPrincipalsOp destroyPrincipals; |
michael@0 | 1423 | |
michael@0 | 1424 | /* Structured data callbacks are runtime-wide. */ |
michael@0 | 1425 | const JSStructuredCloneCallbacks *structuredCloneCallbacks; |
michael@0 | 1426 | |
michael@0 | 1427 | /* Call this to accumulate telemetry data. */ |
michael@0 | 1428 | JSAccumulateTelemetryDataCallback telemetryCallback; |
michael@0 | 1429 | |
michael@0 | 1430 | /* AsmJSCache callbacks are runtime-wide. */ |
michael@0 | 1431 | JS::AsmJSCacheOps asmJSCacheOps; |
michael@0 | 1432 | |
michael@0 | 1433 | /* |
michael@0 | 1434 | * The propertyRemovals counter is incremented for every JSObject::clear, |
michael@0 | 1435 | * and for each JSObject::remove method call that frees a slot in the given |
michael@0 | 1436 | * object. See js_NativeGet and js_NativeSet in jsobj.cpp. |
michael@0 | 1437 | */ |
michael@0 | 1438 | uint32_t propertyRemovals; |
michael@0 | 1439 | |
michael@0 | 1440 | #if !EXPOSE_INTL_API |
michael@0 | 1441 | /* Number localization, used by jsnum.cpp. */ |
michael@0 | 1442 | const char *thousandsSeparator; |
michael@0 | 1443 | const char *decimalSeparator; |
michael@0 | 1444 | const char *numGrouping; |
michael@0 | 1445 | #endif |
michael@0 | 1446 | |
michael@0 | 1447 | private: |
michael@0 | 1448 | js::MathCache *mathCache_; |
michael@0 | 1449 | js::MathCache *createMathCache(JSContext *cx); |
michael@0 | 1450 | public: |
michael@0 | 1451 | js::MathCache *getMathCache(JSContext *cx) { |
michael@0 | 1452 | return mathCache_ ? mathCache_ : createMathCache(cx); |
michael@0 | 1453 | } |
michael@0 | 1454 | js::MathCache *maybeGetMathCache() { |
michael@0 | 1455 | return mathCache_; |
michael@0 | 1456 | } |
michael@0 | 1457 | |
michael@0 | 1458 | js::GSNCache gsnCache; |
michael@0 | 1459 | js::ScopeCoordinateNameCache scopeCoordinateNameCache; |
michael@0 | 1460 | js::NewObjectCache newObjectCache; |
michael@0 | 1461 | js::NativeIterCache nativeIterCache; |
michael@0 | 1462 | js::SourceDataCache sourceDataCache; |
michael@0 | 1463 | js::EvalCache evalCache; |
michael@0 | 1464 | js::LazyScriptCache lazyScriptCache; |
michael@0 | 1465 | |
michael@0 | 1466 | js::DateTimeInfo dateTimeInfo; |
michael@0 | 1467 | |
michael@0 | 1468 | js::ConservativeGCData conservativeGC; |
michael@0 | 1469 | |
michael@0 | 1470 | // Pool of maps used during parse/emit. This may be modified by threads |
michael@0 | 1471 | // with an ExclusiveContext and requires a lock. Active compilations |
michael@0 | 1472 | // prevent the pool from being purged during GCs. |
michael@0 | 1473 | private: |
michael@0 | 1474 | js::frontend::ParseMapPool parseMapPool_; |
michael@0 | 1475 | unsigned activeCompilations_; |
michael@0 | 1476 | public: |
michael@0 | 1477 | js::frontend::ParseMapPool &parseMapPool() { |
michael@0 | 1478 | JS_ASSERT(currentThreadHasExclusiveAccess()); |
michael@0 | 1479 | return parseMapPool_; |
michael@0 | 1480 | } |
michael@0 | 1481 | bool hasActiveCompilations() { |
michael@0 | 1482 | return activeCompilations_ != 0; |
michael@0 | 1483 | } |
michael@0 | 1484 | void addActiveCompilation() { |
michael@0 | 1485 | JS_ASSERT(currentThreadHasExclusiveAccess()); |
michael@0 | 1486 | activeCompilations_++; |
michael@0 | 1487 | } |
michael@0 | 1488 | void removeActiveCompilation() { |
michael@0 | 1489 | JS_ASSERT(currentThreadHasExclusiveAccess()); |
michael@0 | 1490 | activeCompilations_--; |
michael@0 | 1491 | } |
michael@0 | 1492 | |
michael@0 | 1493 | // Count of AutoKeepAtoms instances on the main thread's stack. When any |
michael@0 | 1494 | // instances exist, atoms in the runtime will not be collected. Threads |
michael@0 | 1495 | // with an ExclusiveContext do not increment this value, but the presence |
michael@0 | 1496 | // of any such threads also inhibits collection of atoms. We don't scan the |
michael@0 | 1497 | // stacks of exclusive threads, so we need to avoid collecting their |
michael@0 | 1498 | // objects in another way. The only GC thing pointers they have are to |
michael@0 | 1499 | // their exclusive compartment (which is not collected) or to the atoms |
michael@0 | 1500 | // compartment. Therefore, we avoid collecting the atoms compartment when |
michael@0 | 1501 | // exclusive threads are running. |
michael@0 | 1502 | private: |
michael@0 | 1503 | unsigned keepAtoms_; |
michael@0 | 1504 | friend class js::AutoKeepAtoms; |
michael@0 | 1505 | public: |
michael@0 | 1506 | bool keepAtoms() { |
michael@0 | 1507 | JS_ASSERT(CurrentThreadCanAccessRuntime(this)); |
michael@0 | 1508 | return keepAtoms_ != 0 || exclusiveThreadsPresent(); |
michael@0 | 1509 | } |
michael@0 | 1510 | |
michael@0 | 1511 | private: |
michael@0 | 1512 | const JSPrincipals *trustedPrincipals_; |
michael@0 | 1513 | public: |
michael@0 | 1514 | void setTrustedPrincipals(const JSPrincipals *p) { trustedPrincipals_ = p; } |
michael@0 | 1515 | const JSPrincipals *trustedPrincipals() const { return trustedPrincipals_; } |
michael@0 | 1516 | |
michael@0 | 1517 | private: |
michael@0 | 1518 | bool beingDestroyed_; |
michael@0 | 1519 | public: |
michael@0 | 1520 | bool isBeingDestroyed() const { |
michael@0 | 1521 | return beingDestroyed_; |
michael@0 | 1522 | } |
michael@0 | 1523 | |
michael@0 | 1524 | private: |
michael@0 | 1525 | // Set of all atoms other than those in permanentAtoms and staticStrings. |
michael@0 | 1526 | // This may be modified by threads with an ExclusiveContext and requires |
michael@0 | 1527 | // a lock. |
michael@0 | 1528 | js::AtomSet *atoms_; |
michael@0 | 1529 | |
michael@0 | 1530 | // Compartment and associated zone containing all atoms in the runtime, |
michael@0 | 1531 | // as well as runtime wide IonCode stubs. The contents of this compartment |
michael@0 | 1532 | // may be modified by threads with an ExclusiveContext and requires a lock. |
michael@0 | 1533 | JSCompartment *atomsCompartment_; |
michael@0 | 1534 | |
michael@0 | 1535 | public: |
michael@0 | 1536 | bool initializeAtoms(JSContext *cx); |
michael@0 | 1537 | void finishAtoms(); |
michael@0 | 1538 | |
michael@0 | 1539 | void sweepAtoms(); |
michael@0 | 1540 | |
michael@0 | 1541 | js::AtomSet &atoms() { |
michael@0 | 1542 | JS_ASSERT(currentThreadHasExclusiveAccess()); |
michael@0 | 1543 | return *atoms_; |
michael@0 | 1544 | } |
michael@0 | 1545 | JSCompartment *atomsCompartment() { |
michael@0 | 1546 | JS_ASSERT(currentThreadHasExclusiveAccess()); |
michael@0 | 1547 | return atomsCompartment_; |
michael@0 | 1548 | } |
michael@0 | 1549 | |
michael@0 | 1550 | bool isAtomsCompartment(JSCompartment *comp) { |
michael@0 | 1551 | return comp == atomsCompartment_; |
michael@0 | 1552 | } |
michael@0 | 1553 | |
michael@0 | 1554 | // The atoms compartment is the only one in its zone. |
michael@0 | 1555 | inline bool isAtomsZone(JS::Zone *zone); |
michael@0 | 1556 | |
michael@0 | 1557 | bool activeGCInAtomsZone(); |
michael@0 | 1558 | |
michael@0 | 1559 | // Permanent atoms are fixed during initialization of the runtime and are |
michael@0 | 1560 | // not modified or collected until the runtime is destroyed. These may be |
michael@0 | 1561 | // shared with another, longer living runtime through |parentRuntime| and |
michael@0 | 1562 | // can be freely accessed with no locking necessary. |
michael@0 | 1563 | |
michael@0 | 1564 | // Permanent atoms pre-allocated for general use. |
michael@0 | 1565 | js::StaticStrings *staticStrings; |
michael@0 | 1566 | |
michael@0 | 1567 | // Cached pointers to various permanent property names. |
michael@0 | 1568 | JSAtomState *commonNames; |
michael@0 | 1569 | |
michael@0 | 1570 | // All permanent atoms in the runtime, other than those in staticStrings. |
michael@0 | 1571 | js::AtomSet *permanentAtoms; |
michael@0 | 1572 | |
michael@0 | 1573 | bool transformToPermanentAtoms(); |
michael@0 | 1574 | |
michael@0 | 1575 | const JSWrapObjectCallbacks *wrapObjectCallbacks; |
michael@0 | 1576 | js::PreserveWrapperCallback preserveWrapperCallback; |
michael@0 | 1577 | |
michael@0 | 1578 | // Table of bytecode and other data that may be shared across scripts |
michael@0 | 1579 | // within the runtime. This may be modified by threads with an |
michael@0 | 1580 | // ExclusiveContext and requires a lock. |
michael@0 | 1581 | private: |
michael@0 | 1582 | js::ScriptDataTable scriptDataTable_; |
michael@0 | 1583 | public: |
michael@0 | 1584 | js::ScriptDataTable &scriptDataTable() { |
michael@0 | 1585 | JS_ASSERT(currentThreadHasExclusiveAccess()); |
michael@0 | 1586 | return scriptDataTable_; |
michael@0 | 1587 | } |
michael@0 | 1588 | |
michael@0 | 1589 | #ifdef DEBUG |
michael@0 | 1590 | size_t noGCOrAllocationCheck; |
michael@0 | 1591 | #endif |
michael@0 | 1592 | |
michael@0 | 1593 | bool jitSupportsFloatingPoint; |
michael@0 | 1594 | |
michael@0 | 1595 | // Used to reset stack limit after a signaled interrupt (i.e. jitStackLimit_ = -1) |
michael@0 | 1596 | // has been noticed by Ion/Baseline. |
michael@0 | 1597 | void resetJitStackLimit(); |
michael@0 | 1598 | |
michael@0 | 1599 | // Cache for jit::GetPcScript(). |
michael@0 | 1600 | js::jit::PcScriptCache *ionPcScriptCache; |
michael@0 | 1601 | |
michael@0 | 1602 | js::ThreadPool threadPool; |
michael@0 | 1603 | |
michael@0 | 1604 | js::DefaultJSContextCallback defaultJSContextCallback; |
michael@0 | 1605 | |
michael@0 | 1606 | js::CTypesActivityCallback ctypesActivityCallback; |
michael@0 | 1607 | |
michael@0 | 1608 | // Non-zero if this is a ForkJoin warmup execution. See |
michael@0 | 1609 | // js::ForkJoin() for more information. |
michael@0 | 1610 | uint32_t forkJoinWarmup; |
michael@0 | 1611 | |
michael@0 | 1612 | private: |
michael@0 | 1613 | // In certain cases, we want to optimize certain opcodes to typed instructions, |
michael@0 | 1614 | // to avoid carrying an extra register to feed into an unbox. Unfortunately, |
michael@0 | 1615 | // that's not always possible. For example, a GetPropertyCacheT could return a |
michael@0 | 1616 | // typed double, but if it takes its out-of-line path, it could return an |
michael@0 | 1617 | // object, and trigger invalidation. The invalidation bailout will consider the |
michael@0 | 1618 | // return value to be a double, and create a garbage Value. |
michael@0 | 1619 | // |
michael@0 | 1620 | // To allow the GetPropertyCacheT optimization, we allow the ability for |
michael@0 | 1621 | // GetPropertyCache to override the return value at the top of the stack - the |
michael@0 | 1622 | // value that will be temporarily corrupt. This special override value is set |
michael@0 | 1623 | // only in callVM() targets that are about to return *and* have invalidated |
michael@0 | 1624 | // their callee. |
michael@0 | 1625 | js::Value ionReturnOverride_; |
michael@0 | 1626 | |
michael@0 | 1627 | #ifdef JS_THREADSAFE |
michael@0 | 1628 | static mozilla::Atomic<size_t> liveRuntimesCount; |
michael@0 | 1629 | #else |
michael@0 | 1630 | static size_t liveRuntimesCount; |
michael@0 | 1631 | #endif |
michael@0 | 1632 | |
michael@0 | 1633 | public: |
michael@0 | 1634 | static bool hasLiveRuntimes() { |
michael@0 | 1635 | return liveRuntimesCount > 0; |
michael@0 | 1636 | } |
michael@0 | 1637 | |
michael@0 | 1638 | bool hasIonReturnOverride() const { |
michael@0 | 1639 | return !ionReturnOverride_.isMagic(); |
michael@0 | 1640 | } |
michael@0 | 1641 | js::Value takeIonReturnOverride() { |
michael@0 | 1642 | js::Value v = ionReturnOverride_; |
michael@0 | 1643 | ionReturnOverride_ = js::MagicValue(JS_ARG_POISON); |
michael@0 | 1644 | return v; |
michael@0 | 1645 | } |
michael@0 | 1646 | void setIonReturnOverride(const js::Value &v) { |
michael@0 | 1647 | JS_ASSERT(!hasIonReturnOverride()); |
michael@0 | 1648 | ionReturnOverride_ = v; |
michael@0 | 1649 | } |
michael@0 | 1650 | |
michael@0 | 1651 | JSRuntime(JSRuntime *parentRuntime, JSUseHelperThreads useHelperThreads); |
michael@0 | 1652 | ~JSRuntime(); |
michael@0 | 1653 | |
michael@0 | 1654 | bool init(uint32_t maxbytes); |
michael@0 | 1655 | |
michael@0 | 1656 | JSRuntime *thisFromCtor() { return this; } |
michael@0 | 1657 | |
michael@0 | 1658 | void setGCMaxMallocBytes(size_t value); |
michael@0 | 1659 | |
michael@0 | 1660 | void resetGCMallocBytes() { |
michael@0 | 1661 | gcMallocBytes = ptrdiff_t(gcMaxMallocBytes); |
michael@0 | 1662 | gcMallocGCTriggered = false; |
michael@0 | 1663 | } |
michael@0 | 1664 | |
michael@0 | 1665 | /* |
michael@0 | 1666 | * Call this after allocating memory held by GC things, to update memory |
michael@0 | 1667 | * pressure counters or report the OOM error if necessary. If oomError and |
michael@0 | 1668 | * cx is not null the function also reports OOM error. |
michael@0 | 1669 | * |
michael@0 | 1670 | * The function must be called outside the GC lock and in case of OOM error |
michael@0 | 1671 | * the caller must ensure that no deadlock possible during OOM reporting. |
michael@0 | 1672 | */ |
michael@0 | 1673 | void updateMallocCounter(size_t nbytes); |
michael@0 | 1674 | void updateMallocCounter(JS::Zone *zone, size_t nbytes); |
michael@0 | 1675 | |
michael@0 | 1676 | void reportAllocationOverflow() { js_ReportAllocationOverflow(nullptr); } |
michael@0 | 1677 | |
michael@0 | 1678 | bool isTooMuchMalloc() const { |
michael@0 | 1679 | return gcMallocBytes <= 0; |
michael@0 | 1680 | } |
michael@0 | 1681 | |
michael@0 | 1682 | /* |
michael@0 | 1683 | * The function must be called outside the GC lock. |
michael@0 | 1684 | */ |
michael@0 | 1685 | JS_FRIEND_API(void) onTooMuchMalloc(); |
michael@0 | 1686 | |
michael@0 | 1687 | /* |
michael@0 | 1688 | * This should be called after system malloc/realloc returns nullptr to try |
michael@0 | 1689 | * to recove some memory or to report an error. Failures in malloc and |
michael@0 | 1690 | * calloc are signaled by p == null and p == reinterpret_cast<void *>(1). |
michael@0 | 1691 | * Other values of p mean a realloc failure. |
michael@0 | 1692 | * |
michael@0 | 1693 | * The function must be called outside the GC lock. |
michael@0 | 1694 | */ |
michael@0 | 1695 | JS_FRIEND_API(void *) onOutOfMemory(void *p, size_t nbytes); |
michael@0 | 1696 | JS_FRIEND_API(void *) onOutOfMemory(void *p, size_t nbytes, JSContext *cx); |
michael@0 | 1697 | |
michael@0 | 1698 | // Ways in which the interrupt callback on the runtime can be triggered, |
michael@0 | 1699 | // varying based on which thread is triggering the callback. |
michael@0 | 1700 | enum InterruptMode { |
michael@0 | 1701 | RequestInterruptMainThread, |
michael@0 | 1702 | RequestInterruptAnyThread, |
michael@0 | 1703 | RequestInterruptAnyThreadDontStopIon, |
michael@0 | 1704 | RequestInterruptAnyThreadForkJoin |
michael@0 | 1705 | }; |
michael@0 | 1706 | |
michael@0 | 1707 | void requestInterrupt(InterruptMode mode); |
michael@0 | 1708 | |
michael@0 | 1709 | void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, JS::RuntimeSizes *runtime); |
michael@0 | 1710 | |
michael@0 | 1711 | private: |
michael@0 | 1712 | JS::RuntimeOptions options_; |
michael@0 | 1713 | |
michael@0 | 1714 | JSUseHelperThreads useHelperThreads_; |
michael@0 | 1715 | |
michael@0 | 1716 | // Settings for how helper threads can be used. |
michael@0 | 1717 | bool parallelIonCompilationEnabled_; |
michael@0 | 1718 | bool parallelParsingEnabled_; |
michael@0 | 1719 | |
michael@0 | 1720 | // True iff this is a DOM Worker runtime. |
michael@0 | 1721 | bool isWorkerRuntime_; |
michael@0 | 1722 | |
michael@0 | 1723 | public: |
michael@0 | 1724 | |
michael@0 | 1725 | // This controls whether the JSRuntime is allowed to create any helper |
michael@0 | 1726 | // threads at all. This means both specific threads (background GC thread) |
michael@0 | 1727 | // and the general JS worker thread pool. |
michael@0 | 1728 | bool useHelperThreads() const { |
michael@0 | 1729 | #ifdef JS_THREADSAFE |
michael@0 | 1730 | return useHelperThreads_ == JS_USE_HELPER_THREADS; |
michael@0 | 1731 | #else |
michael@0 | 1732 | return false; |
michael@0 | 1733 | #endif |
michael@0 | 1734 | } |
michael@0 | 1735 | |
michael@0 | 1736 | // Note: these values may be toggled dynamically (in response to about:config |
michael@0 | 1737 | // prefs changing). |
michael@0 | 1738 | void setParallelIonCompilationEnabled(bool value) { |
michael@0 | 1739 | parallelIonCompilationEnabled_ = value; |
michael@0 | 1740 | } |
michael@0 | 1741 | bool canUseParallelIonCompilation() const { |
michael@0 | 1742 | return useHelperThreads() && |
michael@0 | 1743 | parallelIonCompilationEnabled_; |
michael@0 | 1744 | } |
michael@0 | 1745 | void setParallelParsingEnabled(bool value) { |
michael@0 | 1746 | parallelParsingEnabled_ = value; |
michael@0 | 1747 | } |
michael@0 | 1748 | bool canUseParallelParsing() const { |
michael@0 | 1749 | return useHelperThreads() && |
michael@0 | 1750 | parallelParsingEnabled_; |
michael@0 | 1751 | } |
michael@0 | 1752 | |
michael@0 | 1753 | void setIsWorkerRuntime() { |
michael@0 | 1754 | isWorkerRuntime_ = true; |
michael@0 | 1755 | } |
michael@0 | 1756 | bool isWorkerRuntime() const { |
michael@0 | 1757 | return isWorkerRuntime_; |
michael@0 | 1758 | } |
michael@0 | 1759 | |
michael@0 | 1760 | const JS::RuntimeOptions &options() const { |
michael@0 | 1761 | return options_; |
michael@0 | 1762 | } |
michael@0 | 1763 | JS::RuntimeOptions &options() { |
michael@0 | 1764 | return options_; |
michael@0 | 1765 | } |
michael@0 | 1766 | |
michael@0 | 1767 | #ifdef DEBUG |
michael@0 | 1768 | public: |
michael@0 | 1769 | js::AutoEnterPolicy *enteredPolicy; |
michael@0 | 1770 | #endif |
michael@0 | 1771 | |
michael@0 | 1772 | /* See comment for JS::SetLargeAllocationFailureCallback in jsapi.h. */ |
michael@0 | 1773 | JS::LargeAllocationFailureCallback largeAllocationFailureCallback; |
michael@0 | 1774 | /* See comment for JS::SetOutOfMemoryCallback in jsapi.h. */ |
michael@0 | 1775 | JS::OutOfMemoryCallback oomCallback; |
michael@0 | 1776 | |
michael@0 | 1777 | /* |
michael@0 | 1778 | * These variations of malloc/calloc/realloc will call the |
michael@0 | 1779 | * large-allocation-failure callback on OOM and retry the allocation. |
michael@0 | 1780 | */ |
michael@0 | 1781 | |
michael@0 | 1782 | static const unsigned LARGE_ALLOCATION = 25 * 1024 * 1024; |
michael@0 | 1783 | |
michael@0 | 1784 | void *callocCanGC(size_t bytes) { |
michael@0 | 1785 | void *p = calloc_(bytes); |
michael@0 | 1786 | if (MOZ_LIKELY(!!p)) |
michael@0 | 1787 | return p; |
michael@0 | 1788 | if (!largeAllocationFailureCallback || bytes < LARGE_ALLOCATION) |
michael@0 | 1789 | return nullptr; |
michael@0 | 1790 | largeAllocationFailureCallback(); |
michael@0 | 1791 | return onOutOfMemory(reinterpret_cast<void *>(1), bytes); |
michael@0 | 1792 | } |
michael@0 | 1793 | |
michael@0 | 1794 | void *reallocCanGC(void *p, size_t bytes) { |
michael@0 | 1795 | void *p2 = realloc_(p, bytes); |
michael@0 | 1796 | if (MOZ_LIKELY(!!p2)) |
michael@0 | 1797 | return p2; |
michael@0 | 1798 | if (!largeAllocationFailureCallback || bytes < LARGE_ALLOCATION) |
michael@0 | 1799 | return nullptr; |
michael@0 | 1800 | largeAllocationFailureCallback(); |
michael@0 | 1801 | return onOutOfMemory(p, bytes); |
michael@0 | 1802 | } |
michael@0 | 1803 | }; |
michael@0 | 1804 | |
michael@0 | 1805 | namespace js { |
michael@0 | 1806 | |
michael@0 | 1807 | // When entering JIT code, the calling JSContext* is stored into the thread's |
michael@0 | 1808 | // PerThreadData. This function retrieves the JSContext with the pre-condition |
michael@0 | 1809 | // that the caller is JIT code or C++ called directly from JIT code. This |
michael@0 | 1810 | // function should not be called from arbitrary locations since the JSContext |
michael@0 | 1811 | // may be the wrong one. |
michael@0 | 1812 | static inline JSContext * |
michael@0 | 1813 | GetJSContextFromJitCode() |
michael@0 | 1814 | { |
michael@0 | 1815 | JSContext *cx = TlsPerThreadData.get()->jitJSContext; |
michael@0 | 1816 | JS_ASSERT(cx); |
michael@0 | 1817 | return cx; |
michael@0 | 1818 | } |
michael@0 | 1819 | |
michael@0 | 1820 | /* |
michael@0 | 1821 | * Flags accompany script version data so that a) dynamically created scripts |
michael@0 | 1822 | * can inherit their caller's compile-time properties and b) scripts can be |
michael@0 | 1823 | * appropriately compared in the eval cache across global option changes. An |
michael@0 | 1824 | * example of the latter is enabling the top-level-anonymous-function-is-error |
michael@0 | 1825 | * option: subsequent evals of the same, previously-valid script text may have |
michael@0 | 1826 | * become invalid. |
michael@0 | 1827 | */ |
michael@0 | 1828 | namespace VersionFlags { |
michael@0 | 1829 | static const unsigned MASK = 0x0FFF; /* see JSVersion in jspubtd.h */ |
michael@0 | 1830 | } /* namespace VersionFlags */ |
michael@0 | 1831 | |
michael@0 | 1832 | static inline JSVersion |
michael@0 | 1833 | VersionNumber(JSVersion version) |
michael@0 | 1834 | { |
michael@0 | 1835 | return JSVersion(uint32_t(version) & VersionFlags::MASK); |
michael@0 | 1836 | } |
michael@0 | 1837 | |
michael@0 | 1838 | static inline JSVersion |
michael@0 | 1839 | VersionExtractFlags(JSVersion version) |
michael@0 | 1840 | { |
michael@0 | 1841 | return JSVersion(uint32_t(version) & ~VersionFlags::MASK); |
michael@0 | 1842 | } |
michael@0 | 1843 | |
michael@0 | 1844 | static inline void |
michael@0 | 1845 | VersionCopyFlags(JSVersion *version, JSVersion from) |
michael@0 | 1846 | { |
michael@0 | 1847 | *version = JSVersion(VersionNumber(*version) | VersionExtractFlags(from)); |
michael@0 | 1848 | } |
michael@0 | 1849 | |
michael@0 | 1850 | static inline bool |
michael@0 | 1851 | VersionHasFlags(JSVersion version) |
michael@0 | 1852 | { |
michael@0 | 1853 | return !!VersionExtractFlags(version); |
michael@0 | 1854 | } |
michael@0 | 1855 | |
michael@0 | 1856 | static inline bool |
michael@0 | 1857 | VersionIsKnown(JSVersion version) |
michael@0 | 1858 | { |
michael@0 | 1859 | return VersionNumber(version) != JSVERSION_UNKNOWN; |
michael@0 | 1860 | } |
michael@0 | 1861 | |
michael@0 | 1862 | inline void |
michael@0 | 1863 | FreeOp::free_(void *p) |
michael@0 | 1864 | { |
michael@0 | 1865 | if (shouldFreeLater()) { |
michael@0 | 1866 | runtime()->gcHelperThread.freeLater(p); |
michael@0 | 1867 | return; |
michael@0 | 1868 | } |
michael@0 | 1869 | js_free(p); |
michael@0 | 1870 | } |
michael@0 | 1871 | |
michael@0 | 1872 | class AutoLockGC |
michael@0 | 1873 | { |
michael@0 | 1874 | public: |
michael@0 | 1875 | explicit AutoLockGC(JSRuntime *rt = nullptr |
michael@0 | 1876 | MOZ_GUARD_OBJECT_NOTIFIER_PARAM) |
michael@0 | 1877 | : runtime(rt) |
michael@0 | 1878 | { |
michael@0 | 1879 | MOZ_GUARD_OBJECT_NOTIFIER_INIT; |
michael@0 | 1880 | // Avoid MSVC warning C4390 for non-threadsafe builds. |
michael@0 | 1881 | if (rt) |
michael@0 | 1882 | rt->lockGC(); |
michael@0 | 1883 | } |
michael@0 | 1884 | |
michael@0 | 1885 | ~AutoLockGC() |
michael@0 | 1886 | { |
michael@0 | 1887 | if (runtime) |
michael@0 | 1888 | runtime->unlockGC(); |
michael@0 | 1889 | } |
michael@0 | 1890 | |
michael@0 | 1891 | bool locked() const { |
michael@0 | 1892 | return !!runtime; |
michael@0 | 1893 | } |
michael@0 | 1894 | |
michael@0 | 1895 | void lock(JSRuntime *rt) { |
michael@0 | 1896 | JS_ASSERT(rt); |
michael@0 | 1897 | JS_ASSERT(!runtime); |
michael@0 | 1898 | runtime = rt; |
michael@0 | 1899 | rt->lockGC(); |
michael@0 | 1900 | } |
michael@0 | 1901 | |
michael@0 | 1902 | private: |
michael@0 | 1903 | JSRuntime *runtime; |
michael@0 | 1904 | MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER |
michael@0 | 1905 | }; |
michael@0 | 1906 | |
michael@0 | 1907 | class AutoUnlockGC |
michael@0 | 1908 | { |
michael@0 | 1909 | private: |
michael@0 | 1910 | JSRuntime *rt; |
michael@0 | 1911 | MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER |
michael@0 | 1912 | |
michael@0 | 1913 | public: |
michael@0 | 1914 | explicit AutoUnlockGC(JSRuntime *rt |
michael@0 | 1915 | MOZ_GUARD_OBJECT_NOTIFIER_PARAM) |
michael@0 | 1916 | : rt(rt) |
michael@0 | 1917 | { |
michael@0 | 1918 | MOZ_GUARD_OBJECT_NOTIFIER_INIT; |
michael@0 | 1919 | rt->unlockGC(); |
michael@0 | 1920 | } |
michael@0 | 1921 | ~AutoUnlockGC() { rt->lockGC(); } |
michael@0 | 1922 | }; |
michael@0 | 1923 | |
michael@0 | 1924 | class MOZ_STACK_CLASS AutoKeepAtoms |
michael@0 | 1925 | { |
michael@0 | 1926 | PerThreadData *pt; |
michael@0 | 1927 | MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER |
michael@0 | 1928 | |
michael@0 | 1929 | public: |
michael@0 | 1930 | explicit AutoKeepAtoms(PerThreadData *pt |
michael@0 | 1931 | MOZ_GUARD_OBJECT_NOTIFIER_PARAM) |
michael@0 | 1932 | : pt(pt) |
michael@0 | 1933 | { |
michael@0 | 1934 | MOZ_GUARD_OBJECT_NOTIFIER_INIT; |
michael@0 | 1935 | if (JSRuntime *rt = pt->runtimeIfOnOwnerThread()) { |
michael@0 | 1936 | rt->keepAtoms_++; |
michael@0 | 1937 | } else { |
michael@0 | 1938 | // This should be a thread with an exclusive context, which will |
michael@0 | 1939 | // always inhibit collection of atoms. |
michael@0 | 1940 | JS_ASSERT(pt->exclusiveThreadsPresent()); |
michael@0 | 1941 | } |
michael@0 | 1942 | } |
michael@0 | 1943 | ~AutoKeepAtoms() { |
michael@0 | 1944 | if (JSRuntime *rt = pt->runtimeIfOnOwnerThread()) { |
michael@0 | 1945 | JS_ASSERT(rt->keepAtoms_); |
michael@0 | 1946 | rt->keepAtoms_--; |
michael@0 | 1947 | } |
michael@0 | 1948 | } |
michael@0 | 1949 | }; |
michael@0 | 1950 | |
michael@0 | 1951 | inline void |
michael@0 | 1952 | PerThreadData::setJitStackLimit(uintptr_t limit) |
michael@0 | 1953 | { |
michael@0 | 1954 | JS_ASSERT(runtime_->currentThreadOwnsInterruptLock()); |
michael@0 | 1955 | jitStackLimit = limit; |
michael@0 | 1956 | } |
michael@0 | 1957 | |
michael@0 | 1958 | inline JSRuntime * |
michael@0 | 1959 | PerThreadData::runtimeFromMainThread() |
michael@0 | 1960 | { |
michael@0 | 1961 | JS_ASSERT(CurrentThreadCanAccessRuntime(runtime_)); |
michael@0 | 1962 | return runtime_; |
michael@0 | 1963 | } |
michael@0 | 1964 | |
michael@0 | 1965 | inline JSRuntime * |
michael@0 | 1966 | PerThreadData::runtimeIfOnOwnerThread() |
michael@0 | 1967 | { |
michael@0 | 1968 | return CurrentThreadCanAccessRuntime(runtime_) ? runtime_ : nullptr; |
michael@0 | 1969 | } |
michael@0 | 1970 | |
michael@0 | 1971 | inline bool |
michael@0 | 1972 | PerThreadData::exclusiveThreadsPresent() |
michael@0 | 1973 | { |
michael@0 | 1974 | return runtime_->exclusiveThreadsPresent(); |
michael@0 | 1975 | } |
michael@0 | 1976 | |
michael@0 | 1977 | inline void |
michael@0 | 1978 | PerThreadData::addActiveCompilation() |
michael@0 | 1979 | { |
michael@0 | 1980 | activeCompilations++; |
michael@0 | 1981 | runtime_->addActiveCompilation(); |
michael@0 | 1982 | } |
michael@0 | 1983 | |
michael@0 | 1984 | inline void |
michael@0 | 1985 | PerThreadData::removeActiveCompilation() |
michael@0 | 1986 | { |
michael@0 | 1987 | JS_ASSERT(activeCompilations); |
michael@0 | 1988 | activeCompilations--; |
michael@0 | 1989 | runtime_->removeActiveCompilation(); |
michael@0 | 1990 | } |
michael@0 | 1991 | |
michael@0 | 1992 | /************************************************************************/ |
michael@0 | 1993 | |
michael@0 | 1994 | static MOZ_ALWAYS_INLINE void |
michael@0 | 1995 | MakeRangeGCSafe(Value *vec, size_t len) |
michael@0 | 1996 | { |
michael@0 | 1997 | mozilla::PodZero(vec, len); |
michael@0 | 1998 | } |
michael@0 | 1999 | |
michael@0 | 2000 | static MOZ_ALWAYS_INLINE void |
michael@0 | 2001 | MakeRangeGCSafe(Value *beg, Value *end) |
michael@0 | 2002 | { |
michael@0 | 2003 | mozilla::PodZero(beg, end - beg); |
michael@0 | 2004 | } |
michael@0 | 2005 | |
michael@0 | 2006 | static MOZ_ALWAYS_INLINE void |
michael@0 | 2007 | MakeRangeGCSafe(jsid *beg, jsid *end) |
michael@0 | 2008 | { |
michael@0 | 2009 | for (jsid *id = beg; id != end; ++id) |
michael@0 | 2010 | *id = INT_TO_JSID(0); |
michael@0 | 2011 | } |
michael@0 | 2012 | |
michael@0 | 2013 | static MOZ_ALWAYS_INLINE void |
michael@0 | 2014 | MakeRangeGCSafe(jsid *vec, size_t len) |
michael@0 | 2015 | { |
michael@0 | 2016 | MakeRangeGCSafe(vec, vec + len); |
michael@0 | 2017 | } |
michael@0 | 2018 | |
michael@0 | 2019 | static MOZ_ALWAYS_INLINE void |
michael@0 | 2020 | MakeRangeGCSafe(Shape **beg, Shape **end) |
michael@0 | 2021 | { |
michael@0 | 2022 | mozilla::PodZero(beg, end - beg); |
michael@0 | 2023 | } |
michael@0 | 2024 | |
michael@0 | 2025 | static MOZ_ALWAYS_INLINE void |
michael@0 | 2026 | MakeRangeGCSafe(Shape **vec, size_t len) |
michael@0 | 2027 | { |
michael@0 | 2028 | mozilla::PodZero(vec, len); |
michael@0 | 2029 | } |
michael@0 | 2030 | |
michael@0 | 2031 | static MOZ_ALWAYS_INLINE void |
michael@0 | 2032 | SetValueRangeToUndefined(Value *beg, Value *end) |
michael@0 | 2033 | { |
michael@0 | 2034 | for (Value *v = beg; v != end; ++v) |
michael@0 | 2035 | v->setUndefined(); |
michael@0 | 2036 | } |
michael@0 | 2037 | |
michael@0 | 2038 | static MOZ_ALWAYS_INLINE void |
michael@0 | 2039 | SetValueRangeToUndefined(Value *vec, size_t len) |
michael@0 | 2040 | { |
michael@0 | 2041 | SetValueRangeToUndefined(vec, vec + len); |
michael@0 | 2042 | } |
michael@0 | 2043 | |
michael@0 | 2044 | static MOZ_ALWAYS_INLINE void |
michael@0 | 2045 | SetValueRangeToNull(Value *beg, Value *end) |
michael@0 | 2046 | { |
michael@0 | 2047 | for (Value *v = beg; v != end; ++v) |
michael@0 | 2048 | v->setNull(); |
michael@0 | 2049 | } |
michael@0 | 2050 | |
michael@0 | 2051 | static MOZ_ALWAYS_INLINE void |
michael@0 | 2052 | SetValueRangeToNull(Value *vec, size_t len) |
michael@0 | 2053 | { |
michael@0 | 2054 | SetValueRangeToNull(vec, vec + len); |
michael@0 | 2055 | } |
michael@0 | 2056 | |
michael@0 | 2057 | /* |
michael@0 | 2058 | * Allocation policy that uses JSRuntime::malloc_ and friends, so that |
michael@0 | 2059 | * memory pressure is properly accounted for. This is suitable for |
michael@0 | 2060 | * long-lived objects owned by the JSRuntime. |
michael@0 | 2061 | * |
michael@0 | 2062 | * Since it doesn't hold a JSContext (those may not live long enough), it |
michael@0 | 2063 | * can't report out-of-memory conditions itself; the caller must check for |
michael@0 | 2064 | * OOM and take the appropriate action. |
michael@0 | 2065 | * |
michael@0 | 2066 | * FIXME bug 647103 - replace these *AllocPolicy names. |
michael@0 | 2067 | */ |
michael@0 | 2068 | class RuntimeAllocPolicy |
michael@0 | 2069 | { |
michael@0 | 2070 | JSRuntime *const runtime; |
michael@0 | 2071 | |
michael@0 | 2072 | public: |
michael@0 | 2073 | RuntimeAllocPolicy(JSRuntime *rt) : runtime(rt) {} |
michael@0 | 2074 | void *malloc_(size_t bytes) { return runtime->malloc_(bytes); } |
michael@0 | 2075 | void *calloc_(size_t bytes) { return runtime->calloc_(bytes); } |
michael@0 | 2076 | void *realloc_(void *p, size_t bytes) { return runtime->realloc_(p, bytes); } |
michael@0 | 2077 | void free_(void *p) { js_free(p); } |
michael@0 | 2078 | void reportAllocOverflow() const {} |
michael@0 | 2079 | }; |
michael@0 | 2080 | |
michael@0 | 2081 | extern const JSSecurityCallbacks NullSecurityCallbacks; |
michael@0 | 2082 | |
michael@0 | 2083 | } /* namespace js */ |
michael@0 | 2084 | |
michael@0 | 2085 | #ifdef _MSC_VER |
michael@0 | 2086 | #pragma warning(pop) |
michael@0 | 2087 | #endif |
michael@0 | 2088 | |
michael@0 | 2089 | #endif /* vm_Runtime_h */ |