Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "jit/Ion.h" |
michael@0 | 8 | |
michael@0 | 9 | #include "mozilla/MemoryReporting.h" |
michael@0 | 10 | #include "mozilla/ThreadLocal.h" |
michael@0 | 11 | |
michael@0 | 12 | #include "jscompartment.h" |
michael@0 | 13 | #include "jsprf.h" |
michael@0 | 14 | #include "jsworkers.h" |
michael@0 | 15 | |
michael@0 | 16 | #include "gc/Marking.h" |
michael@0 | 17 | #include "jit/AliasAnalysis.h" |
michael@0 | 18 | #include "jit/AsmJSModule.h" |
michael@0 | 19 | #include "jit/BacktrackingAllocator.h" |
michael@0 | 20 | #include "jit/BaselineDebugModeOSR.h" |
michael@0 | 21 | #include "jit/BaselineFrame.h" |
michael@0 | 22 | #include "jit/BaselineInspector.h" |
michael@0 | 23 | #include "jit/BaselineJIT.h" |
michael@0 | 24 | #include "jit/CodeGenerator.h" |
michael@0 | 25 | #include "jit/EdgeCaseAnalysis.h" |
michael@0 | 26 | #include "jit/EffectiveAddressAnalysis.h" |
michael@0 | 27 | #include "jit/IonAnalysis.h" |
michael@0 | 28 | #include "jit/IonBuilder.h" |
michael@0 | 29 | #include "jit/IonOptimizationLevels.h" |
michael@0 | 30 | #include "jit/IonSpewer.h" |
michael@0 | 31 | #include "jit/JitCommon.h" |
michael@0 | 32 | #include "jit/JitCompartment.h" |
michael@0 | 33 | #include "jit/LICM.h" |
michael@0 | 34 | #include "jit/LinearScan.h" |
michael@0 | 35 | #include "jit/LIR.h" |
michael@0 | 36 | #include "jit/Lowering.h" |
michael@0 | 37 | #include "jit/ParallelSafetyAnalysis.h" |
michael@0 | 38 | #include "jit/PerfSpewer.h" |
michael@0 | 39 | #include "jit/RangeAnalysis.h" |
michael@0 | 40 | #include "jit/StupidAllocator.h" |
michael@0 | 41 | #include "jit/UnreachableCodeElimination.h" |
michael@0 | 42 | #include "jit/ValueNumbering.h" |
michael@0 | 43 | #include "vm/ForkJoin.h" |
michael@0 | 44 | #include "vm/TraceLogging.h" |
michael@0 | 45 | |
michael@0 | 46 | #include "jscompartmentinlines.h" |
michael@0 | 47 | #include "jsgcinlines.h" |
michael@0 | 48 | #include "jsinferinlines.h" |
michael@0 | 49 | #include "jsobjinlines.h" |
michael@0 | 50 | |
michael@0 | 51 | #include "jit/ExecutionMode-inl.h" |
michael@0 | 52 | |
michael@0 | 53 | using namespace js; |
michael@0 | 54 | using namespace js::jit; |
michael@0 | 55 | |
michael@0 | 56 | using mozilla::ThreadLocal; |
michael@0 | 57 | |
michael@0 | 58 | // Assert that JitCode is gc::Cell aligned. |
michael@0 | 59 | JS_STATIC_ASSERT(sizeof(JitCode) % gc::CellSize == 0); |
michael@0 | 60 | |
michael@0 | 61 | static ThreadLocal<IonContext*> TlsIonContext; |
michael@0 | 62 | |
michael@0 | 63 | static IonContext * |
michael@0 | 64 | CurrentIonContext() |
michael@0 | 65 | { |
michael@0 | 66 | if (!TlsIonContext.initialized()) |
michael@0 | 67 | return nullptr; |
michael@0 | 68 | return TlsIonContext.get(); |
michael@0 | 69 | } |
michael@0 | 70 | |
michael@0 | 71 | void |
michael@0 | 72 | jit::SetIonContext(IonContext *ctx) |
michael@0 | 73 | { |
michael@0 | 74 | TlsIonContext.set(ctx); |
michael@0 | 75 | } |
michael@0 | 76 | |
michael@0 | 77 | IonContext * |
michael@0 | 78 | jit::GetIonContext() |
michael@0 | 79 | { |
michael@0 | 80 | MOZ_ASSERT(CurrentIonContext()); |
michael@0 | 81 | return CurrentIonContext(); |
michael@0 | 82 | } |
michael@0 | 83 | |
michael@0 | 84 | IonContext * |
michael@0 | 85 | jit::MaybeGetIonContext() |
michael@0 | 86 | { |
michael@0 | 87 | return CurrentIonContext(); |
michael@0 | 88 | } |
michael@0 | 89 | |
michael@0 | 90 | IonContext::IonContext(JSContext *cx, TempAllocator *temp) |
michael@0 | 91 | : cx(cx), |
michael@0 | 92 | temp(temp), |
michael@0 | 93 | runtime(CompileRuntime::get(cx->runtime())), |
michael@0 | 94 | compartment(CompileCompartment::get(cx->compartment())), |
michael@0 | 95 | prev_(CurrentIonContext()), |
michael@0 | 96 | assemblerCount_(0) |
michael@0 | 97 | { |
michael@0 | 98 | SetIonContext(this); |
michael@0 | 99 | } |
michael@0 | 100 | |
michael@0 | 101 | IonContext::IonContext(ExclusiveContext *cx, TempAllocator *temp) |
michael@0 | 102 | : cx(nullptr), |
michael@0 | 103 | temp(temp), |
michael@0 | 104 | runtime(CompileRuntime::get(cx->runtime_)), |
michael@0 | 105 | compartment(nullptr), |
michael@0 | 106 | prev_(CurrentIonContext()), |
michael@0 | 107 | assemblerCount_(0) |
michael@0 | 108 | { |
michael@0 | 109 | SetIonContext(this); |
michael@0 | 110 | } |
michael@0 | 111 | |
michael@0 | 112 | IonContext::IonContext(CompileRuntime *rt, CompileCompartment *comp, TempAllocator *temp) |
michael@0 | 113 | : cx(nullptr), |
michael@0 | 114 | temp(temp), |
michael@0 | 115 | runtime(rt), |
michael@0 | 116 | compartment(comp), |
michael@0 | 117 | prev_(CurrentIonContext()), |
michael@0 | 118 | assemblerCount_(0) |
michael@0 | 119 | { |
michael@0 | 120 | SetIonContext(this); |
michael@0 | 121 | } |
michael@0 | 122 | |
michael@0 | 123 | IonContext::IonContext(CompileRuntime *rt) |
michael@0 | 124 | : cx(nullptr), |
michael@0 | 125 | temp(nullptr), |
michael@0 | 126 | runtime(rt), |
michael@0 | 127 | compartment(nullptr), |
michael@0 | 128 | prev_(CurrentIonContext()), |
michael@0 | 129 | assemblerCount_(0) |
michael@0 | 130 | { |
michael@0 | 131 | SetIonContext(this); |
michael@0 | 132 | } |
michael@0 | 133 | |
michael@0 | 134 | IonContext::~IonContext() |
michael@0 | 135 | { |
michael@0 | 136 | SetIonContext(prev_); |
michael@0 | 137 | } |
michael@0 | 138 | |
michael@0 | 139 | bool |
michael@0 | 140 | jit::InitializeIon() |
michael@0 | 141 | { |
michael@0 | 142 | if (!TlsIonContext.initialized() && !TlsIonContext.init()) |
michael@0 | 143 | return false; |
michael@0 | 144 | CheckLogging(); |
michael@0 | 145 | CheckPerf(); |
michael@0 | 146 | return true; |
michael@0 | 147 | } |
michael@0 | 148 | |
michael@0 | 149 | JitRuntime::JitRuntime() |
michael@0 | 150 | : execAlloc_(nullptr), |
michael@0 | 151 | ionAlloc_(nullptr), |
michael@0 | 152 | exceptionTail_(nullptr), |
michael@0 | 153 | bailoutTail_(nullptr), |
michael@0 | 154 | enterJIT_(nullptr), |
michael@0 | 155 | bailoutHandler_(nullptr), |
michael@0 | 156 | argumentsRectifier_(nullptr), |
michael@0 | 157 | argumentsRectifierReturnAddr_(nullptr), |
michael@0 | 158 | parallelArgumentsRectifier_(nullptr), |
michael@0 | 159 | invalidator_(nullptr), |
michael@0 | 160 | debugTrapHandler_(nullptr), |
michael@0 | 161 | forkJoinGetSliceStub_(nullptr), |
michael@0 | 162 | baselineDebugModeOSRHandler_(nullptr), |
michael@0 | 163 | functionWrappers_(nullptr), |
michael@0 | 164 | osrTempData_(nullptr), |
michael@0 | 165 | ionCodeProtected_(false) |
michael@0 | 166 | { |
michael@0 | 167 | } |
michael@0 | 168 | |
michael@0 | 169 | JitRuntime::~JitRuntime() |
michael@0 | 170 | { |
michael@0 | 171 | js_delete(functionWrappers_); |
michael@0 | 172 | freeOsrTempData(); |
michael@0 | 173 | |
michael@0 | 174 | // Note: The interrupt lock is not taken here, as JitRuntime is only |
michael@0 | 175 | // destroyed along with its containing JSRuntime. |
michael@0 | 176 | js_delete(ionAlloc_); |
michael@0 | 177 | } |
michael@0 | 178 | |
michael@0 | 179 | bool |
michael@0 | 180 | JitRuntime::initialize(JSContext *cx) |
michael@0 | 181 | { |
michael@0 | 182 | JS_ASSERT(cx->runtime()->currentThreadHasExclusiveAccess()); |
michael@0 | 183 | JS_ASSERT(cx->runtime()->currentThreadOwnsInterruptLock()); |
michael@0 | 184 | |
michael@0 | 185 | AutoCompartment ac(cx, cx->atomsCompartment()); |
michael@0 | 186 | |
michael@0 | 187 | IonContext ictx(cx, nullptr); |
michael@0 | 188 | |
michael@0 | 189 | execAlloc_ = cx->runtime()->getExecAlloc(cx); |
michael@0 | 190 | if (!execAlloc_) |
michael@0 | 191 | return false; |
michael@0 | 192 | |
michael@0 | 193 | if (!cx->compartment()->ensureJitCompartmentExists(cx)) |
michael@0 | 194 | return false; |
michael@0 | 195 | |
michael@0 | 196 | functionWrappers_ = cx->new_<VMWrapperMap>(cx); |
michael@0 | 197 | if (!functionWrappers_ || !functionWrappers_->init()) |
michael@0 | 198 | return false; |
michael@0 | 199 | |
michael@0 | 200 | IonSpew(IonSpew_Codegen, "# Emitting exception tail stub"); |
michael@0 | 201 | exceptionTail_ = generateExceptionTailStub(cx); |
michael@0 | 202 | if (!exceptionTail_) |
michael@0 | 203 | return false; |
michael@0 | 204 | |
michael@0 | 205 | IonSpew(IonSpew_Codegen, "# Emitting bailout tail stub"); |
michael@0 | 206 | bailoutTail_ = generateBailoutTailStub(cx); |
michael@0 | 207 | if (!bailoutTail_) |
michael@0 | 208 | return false; |
michael@0 | 209 | |
michael@0 | 210 | if (cx->runtime()->jitSupportsFloatingPoint) { |
michael@0 | 211 | IonSpew(IonSpew_Codegen, "# Emitting bailout tables"); |
michael@0 | 212 | |
michael@0 | 213 | // Initialize some Ion-only stubs that require floating-point support. |
michael@0 | 214 | if (!bailoutTables_.reserve(FrameSizeClass::ClassLimit().classId())) |
michael@0 | 215 | return false; |
michael@0 | 216 | |
michael@0 | 217 | for (uint32_t id = 0;; id++) { |
michael@0 | 218 | FrameSizeClass class_ = FrameSizeClass::FromClass(id); |
michael@0 | 219 | if (class_ == FrameSizeClass::ClassLimit()) |
michael@0 | 220 | break; |
michael@0 | 221 | bailoutTables_.infallibleAppend((JitCode *)nullptr); |
michael@0 | 222 | bailoutTables_[id] = generateBailoutTable(cx, id); |
michael@0 | 223 | if (!bailoutTables_[id]) |
michael@0 | 224 | return false; |
michael@0 | 225 | } |
michael@0 | 226 | |
michael@0 | 227 | IonSpew(IonSpew_Codegen, "# Emitting bailout handler"); |
michael@0 | 228 | bailoutHandler_ = generateBailoutHandler(cx); |
michael@0 | 229 | if (!bailoutHandler_) |
michael@0 | 230 | return false; |
michael@0 | 231 | |
michael@0 | 232 | IonSpew(IonSpew_Codegen, "# Emitting invalidator"); |
michael@0 | 233 | invalidator_ = generateInvalidator(cx); |
michael@0 | 234 | if (!invalidator_) |
michael@0 | 235 | return false; |
michael@0 | 236 | } |
michael@0 | 237 | |
michael@0 | 238 | IonSpew(IonSpew_Codegen, "# Emitting sequential arguments rectifier"); |
michael@0 | 239 | argumentsRectifier_ = generateArgumentsRectifier(cx, SequentialExecution, &argumentsRectifierReturnAddr_); |
michael@0 | 240 | if (!argumentsRectifier_) |
michael@0 | 241 | return false; |
michael@0 | 242 | |
michael@0 | 243 | #ifdef JS_THREADSAFE |
michael@0 | 244 | IonSpew(IonSpew_Codegen, "# Emitting parallel arguments rectifier"); |
michael@0 | 245 | parallelArgumentsRectifier_ = generateArgumentsRectifier(cx, ParallelExecution, nullptr); |
michael@0 | 246 | if (!parallelArgumentsRectifier_) |
michael@0 | 247 | return false; |
michael@0 | 248 | #endif |
michael@0 | 249 | |
michael@0 | 250 | IonSpew(IonSpew_Codegen, "# Emitting EnterJIT sequence"); |
michael@0 | 251 | enterJIT_ = generateEnterJIT(cx, EnterJitOptimized); |
michael@0 | 252 | if (!enterJIT_) |
michael@0 | 253 | return false; |
michael@0 | 254 | |
michael@0 | 255 | IonSpew(IonSpew_Codegen, "# Emitting EnterBaselineJIT sequence"); |
michael@0 | 256 | enterBaselineJIT_ = generateEnterJIT(cx, EnterJitBaseline); |
michael@0 | 257 | if (!enterBaselineJIT_) |
michael@0 | 258 | return false; |
michael@0 | 259 | |
michael@0 | 260 | IonSpew(IonSpew_Codegen, "# Emitting Pre Barrier for Value"); |
michael@0 | 261 | valuePreBarrier_ = generatePreBarrier(cx, MIRType_Value); |
michael@0 | 262 | if (!valuePreBarrier_) |
michael@0 | 263 | return false; |
michael@0 | 264 | |
michael@0 | 265 | IonSpew(IonSpew_Codegen, "# Emitting Pre Barrier for Shape"); |
michael@0 | 266 | shapePreBarrier_ = generatePreBarrier(cx, MIRType_Shape); |
michael@0 | 267 | if (!shapePreBarrier_) |
michael@0 | 268 | return false; |
michael@0 | 269 | |
michael@0 | 270 | IonSpew(IonSpew_Codegen, "# Emitting VM function wrappers"); |
michael@0 | 271 | for (VMFunction *fun = VMFunction::functions; fun; fun = fun->next) { |
michael@0 | 272 | if (!generateVMWrapper(cx, *fun)) |
michael@0 | 273 | return false; |
michael@0 | 274 | } |
michael@0 | 275 | |
michael@0 | 276 | return true; |
michael@0 | 277 | } |
michael@0 | 278 | |
michael@0 | 279 | JitCode * |
michael@0 | 280 | JitRuntime::debugTrapHandler(JSContext *cx) |
michael@0 | 281 | { |
michael@0 | 282 | if (!debugTrapHandler_) { |
michael@0 | 283 | // JitRuntime code stubs are shared across compartments and have to |
michael@0 | 284 | // be allocated in the atoms compartment. |
michael@0 | 285 | AutoLockForExclusiveAccess lock(cx); |
michael@0 | 286 | AutoCompartment ac(cx, cx->runtime()->atomsCompartment()); |
michael@0 | 287 | debugTrapHandler_ = generateDebugTrapHandler(cx); |
michael@0 | 288 | } |
michael@0 | 289 | return debugTrapHandler_; |
michael@0 | 290 | } |
michael@0 | 291 | |
michael@0 | 292 | bool |
michael@0 | 293 | JitRuntime::ensureForkJoinGetSliceStubExists(JSContext *cx) |
michael@0 | 294 | { |
michael@0 | 295 | if (!forkJoinGetSliceStub_) { |
michael@0 | 296 | IonSpew(IonSpew_Codegen, "# Emitting ForkJoinGetSlice stub"); |
michael@0 | 297 | AutoLockForExclusiveAccess lock(cx); |
michael@0 | 298 | AutoCompartment ac(cx, cx->runtime()->atomsCompartment()); |
michael@0 | 299 | forkJoinGetSliceStub_ = generateForkJoinGetSliceStub(cx); |
michael@0 | 300 | } |
michael@0 | 301 | return !!forkJoinGetSliceStub_; |
michael@0 | 302 | } |
michael@0 | 303 | |
michael@0 | 304 | uint8_t * |
michael@0 | 305 | JitRuntime::allocateOsrTempData(size_t size) |
michael@0 | 306 | { |
michael@0 | 307 | osrTempData_ = (uint8_t *)js_realloc(osrTempData_, size); |
michael@0 | 308 | return osrTempData_; |
michael@0 | 309 | } |
michael@0 | 310 | |
michael@0 | 311 | void |
michael@0 | 312 | JitRuntime::freeOsrTempData() |
michael@0 | 313 | { |
michael@0 | 314 | js_free(osrTempData_); |
michael@0 | 315 | osrTempData_ = nullptr; |
michael@0 | 316 | } |
michael@0 | 317 | |
michael@0 | 318 | JSC::ExecutableAllocator * |
michael@0 | 319 | JitRuntime::createIonAlloc(JSContext *cx) |
michael@0 | 320 | { |
michael@0 | 321 | JS_ASSERT(cx->runtime()->currentThreadOwnsInterruptLock()); |
michael@0 | 322 | |
michael@0 | 323 | ionAlloc_ = js_new<JSC::ExecutableAllocator>(); |
michael@0 | 324 | if (!ionAlloc_) |
michael@0 | 325 | js_ReportOutOfMemory(cx); |
michael@0 | 326 | return ionAlloc_; |
michael@0 | 327 | } |
michael@0 | 328 | |
michael@0 | 329 | void |
michael@0 | 330 | JitRuntime::ensureIonCodeProtected(JSRuntime *rt) |
michael@0 | 331 | { |
michael@0 | 332 | JS_ASSERT(rt->currentThreadOwnsInterruptLock()); |
michael@0 | 333 | |
michael@0 | 334 | if (!rt->signalHandlersInstalled() || ionCodeProtected_ || !ionAlloc_) |
michael@0 | 335 | return; |
michael@0 | 336 | |
michael@0 | 337 | // Protect all Ion code in the runtime to trigger an access violation the |
michael@0 | 338 | // next time any of it runs on the main thread. |
michael@0 | 339 | ionAlloc_->toggleAllCodeAsAccessible(false); |
michael@0 | 340 | ionCodeProtected_ = true; |
michael@0 | 341 | } |
michael@0 | 342 | |
michael@0 | 343 | bool |
michael@0 | 344 | JitRuntime::handleAccessViolation(JSRuntime *rt, void *faultingAddress) |
michael@0 | 345 | { |
michael@0 | 346 | if (!rt->signalHandlersInstalled() || !ionAlloc_ || !ionAlloc_->codeContains((char *) faultingAddress)) |
michael@0 | 347 | return false; |
michael@0 | 348 | |
michael@0 | 349 | #ifdef JS_THREADSAFE |
michael@0 | 350 | // All places where the interrupt lock is taken must either ensure that Ion |
michael@0 | 351 | // code memory won't be accessed within, or call ensureIonCodeAccessible to |
michael@0 | 352 | // render the memory safe for accessing. Otherwise taking the lock below |
michael@0 | 353 | // will deadlock the process. |
michael@0 | 354 | JS_ASSERT(!rt->currentThreadOwnsInterruptLock()); |
michael@0 | 355 | #endif |
michael@0 | 356 | |
michael@0 | 357 | // Taking this lock is necessary to prevent the interrupting thread from marking |
michael@0 | 358 | // the memory as inaccessible while we are patching backedges. This will cause us |
michael@0 | 359 | // to SEGV while still inside the signal handler, and the process will terminate. |
michael@0 | 360 | JSRuntime::AutoLockForInterrupt lock(rt); |
michael@0 | 361 | |
michael@0 | 362 | // Ion code in the runtime faulted after it was made inaccessible. Reset |
michael@0 | 363 | // the code privileges and patch all loop backedges to perform an interrupt |
michael@0 | 364 | // check instead. |
michael@0 | 365 | ensureIonCodeAccessible(rt); |
michael@0 | 366 | return true; |
michael@0 | 367 | } |
michael@0 | 368 | |
michael@0 | 369 | void |
michael@0 | 370 | JitRuntime::ensureIonCodeAccessible(JSRuntime *rt) |
michael@0 | 371 | { |
michael@0 | 372 | JS_ASSERT(rt->currentThreadOwnsInterruptLock()); |
michael@0 | 373 | |
michael@0 | 374 | // This can only be called on the main thread and while handling signals, |
michael@0 | 375 | // which happens on a separate thread in OS X. |
michael@0 | 376 | #ifndef XP_MACOSX |
michael@0 | 377 | JS_ASSERT(CurrentThreadCanAccessRuntime(rt)); |
michael@0 | 378 | #endif |
michael@0 | 379 | |
michael@0 | 380 | if (ionCodeProtected_) { |
michael@0 | 381 | ionAlloc_->toggleAllCodeAsAccessible(true); |
michael@0 | 382 | ionCodeProtected_ = false; |
michael@0 | 383 | } |
michael@0 | 384 | |
michael@0 | 385 | if (rt->interrupt) { |
michael@0 | 386 | // The interrupt handler needs to be invoked by this thread, but we may |
michael@0 | 387 | // be inside a signal handler and have no idea what is above us on the |
michael@0 | 388 | // stack (probably we are executing Ion code at an arbitrary point, but |
michael@0 | 389 | // we could be elsewhere, say repatching a jump for an IonCache). |
michael@0 | 390 | // Patch all backedges in the runtime so they will invoke the interrupt |
michael@0 | 391 | // handler the next time they execute. |
michael@0 | 392 | patchIonBackedges(rt, BackedgeInterruptCheck); |
michael@0 | 393 | } |
michael@0 | 394 | } |
michael@0 | 395 | |
michael@0 | 396 | void |
michael@0 | 397 | JitRuntime::patchIonBackedges(JSRuntime *rt, BackedgeTarget target) |
michael@0 | 398 | { |
michael@0 | 399 | #ifndef XP_MACOSX |
michael@0 | 400 | JS_ASSERT(CurrentThreadCanAccessRuntime(rt)); |
michael@0 | 401 | #endif |
michael@0 | 402 | |
michael@0 | 403 | // Patch all loop backedges in Ion code so that they either jump to the |
michael@0 | 404 | // normal loop header or to an interrupt handler each time they run. |
michael@0 | 405 | for (InlineListIterator<PatchableBackedge> iter(backedgeList_.begin()); |
michael@0 | 406 | iter != backedgeList_.end(); |
michael@0 | 407 | iter++) |
michael@0 | 408 | { |
michael@0 | 409 | PatchableBackedge *patchableBackedge = *iter; |
michael@0 | 410 | PatchJump(patchableBackedge->backedge, target == BackedgeLoopHeader |
michael@0 | 411 | ? patchableBackedge->loopHeader |
michael@0 | 412 | : patchableBackedge->interruptCheck); |
michael@0 | 413 | } |
michael@0 | 414 | } |
michael@0 | 415 | |
michael@0 | 416 | void |
michael@0 | 417 | jit::RequestInterruptForIonCode(JSRuntime *rt, JSRuntime::InterruptMode mode) |
michael@0 | 418 | { |
michael@0 | 419 | JitRuntime *jitRuntime = rt->jitRuntime(); |
michael@0 | 420 | if (!jitRuntime) |
michael@0 | 421 | return; |
michael@0 | 422 | |
michael@0 | 423 | JS_ASSERT(rt->currentThreadOwnsInterruptLock()); |
michael@0 | 424 | |
michael@0 | 425 | // The mechanism for interrupting normal ion code varies depending on how |
michael@0 | 426 | // the interrupt is being requested. |
michael@0 | 427 | switch (mode) { |
michael@0 | 428 | case JSRuntime::RequestInterruptMainThread: |
michael@0 | 429 | // When requesting an interrupt from the main thread, Ion loop |
michael@0 | 430 | // backedges can be patched directly. Make sure we don't segv while |
michael@0 | 431 | // patching the backedges, to avoid deadlocking inside the signal |
michael@0 | 432 | // handler. |
michael@0 | 433 | JS_ASSERT(CurrentThreadCanAccessRuntime(rt)); |
michael@0 | 434 | jitRuntime->ensureIonCodeAccessible(rt); |
michael@0 | 435 | break; |
michael@0 | 436 | |
michael@0 | 437 | case JSRuntime::RequestInterruptAnyThread: |
michael@0 | 438 | // When requesting an interrupt from off the main thread, protect |
michael@0 | 439 | // Ion code memory so that the main thread will fault and enter a |
michael@0 | 440 | // signal handler when trying to execute the code. The signal |
michael@0 | 441 | // handler will unprotect the code and patch loop backedges so |
michael@0 | 442 | // that the interrupt handler is invoked afterwards. |
michael@0 | 443 | jitRuntime->ensureIonCodeProtected(rt); |
michael@0 | 444 | break; |
michael@0 | 445 | |
michael@0 | 446 | case JSRuntime::RequestInterruptAnyThreadDontStopIon: |
michael@0 | 447 | case JSRuntime::RequestInterruptAnyThreadForkJoin: |
michael@0 | 448 | // The caller does not require Ion code to be interrupted. |
michael@0 | 449 | // Nothing more needs to be done. |
michael@0 | 450 | break; |
michael@0 | 451 | |
michael@0 | 452 | default: |
michael@0 | 453 | MOZ_ASSUME_UNREACHABLE("Bad interrupt mode"); |
michael@0 | 454 | } |
michael@0 | 455 | } |
michael@0 | 456 | |
michael@0 | 457 | JitCompartment::JitCompartment() |
michael@0 | 458 | : stubCodes_(nullptr), |
michael@0 | 459 | baselineCallReturnFromIonAddr_(nullptr), |
michael@0 | 460 | baselineGetPropReturnFromIonAddr_(nullptr), |
michael@0 | 461 | baselineSetPropReturnFromIonAddr_(nullptr), |
michael@0 | 462 | baselineCallReturnFromStubAddr_(nullptr), |
michael@0 | 463 | baselineGetPropReturnFromStubAddr_(nullptr), |
michael@0 | 464 | baselineSetPropReturnFromStubAddr_(nullptr), |
michael@0 | 465 | stringConcatStub_(nullptr), |
michael@0 | 466 | parallelStringConcatStub_(nullptr), |
michael@0 | 467 | activeParallelEntryScripts_(nullptr) |
michael@0 | 468 | { |
michael@0 | 469 | } |
michael@0 | 470 | |
michael@0 | 471 | JitCompartment::~JitCompartment() |
michael@0 | 472 | { |
michael@0 | 473 | js_delete(stubCodes_); |
michael@0 | 474 | js_delete(activeParallelEntryScripts_); |
michael@0 | 475 | } |
michael@0 | 476 | |
michael@0 | 477 | bool |
michael@0 | 478 | JitCompartment::initialize(JSContext *cx) |
michael@0 | 479 | { |
michael@0 | 480 | stubCodes_ = cx->new_<ICStubCodeMap>(cx); |
michael@0 | 481 | if (!stubCodes_ || !stubCodes_->init()) |
michael@0 | 482 | return false; |
michael@0 | 483 | |
michael@0 | 484 | return true; |
michael@0 | 485 | } |
michael@0 | 486 | |
michael@0 | 487 | bool |
michael@0 | 488 | JitCompartment::ensureIonStubsExist(JSContext *cx) |
michael@0 | 489 | { |
michael@0 | 490 | if (!stringConcatStub_) { |
michael@0 | 491 | stringConcatStub_ = generateStringConcatStub(cx, SequentialExecution); |
michael@0 | 492 | if (!stringConcatStub_) |
michael@0 | 493 | return false; |
michael@0 | 494 | } |
michael@0 | 495 | |
michael@0 | 496 | #ifdef JS_THREADSAFE |
michael@0 | 497 | if (!parallelStringConcatStub_) { |
michael@0 | 498 | parallelStringConcatStub_ = generateStringConcatStub(cx, ParallelExecution); |
michael@0 | 499 | if (!parallelStringConcatStub_) |
michael@0 | 500 | return false; |
michael@0 | 501 | } |
michael@0 | 502 | #endif |
michael@0 | 503 | |
michael@0 | 504 | return true; |
michael@0 | 505 | } |
michael@0 | 506 | |
michael@0 | 507 | bool |
michael@0 | 508 | JitCompartment::notifyOfActiveParallelEntryScript(JSContext *cx, HandleScript script) |
michael@0 | 509 | { |
michael@0 | 510 | // Fast path. The isParallelEntryScript bit guarantees that the script is |
michael@0 | 511 | // already in the set. |
michael@0 | 512 | if (script->parallelIonScript()->isParallelEntryScript()) { |
michael@0 | 513 | MOZ_ASSERT(activeParallelEntryScripts_ && activeParallelEntryScripts_->has(script)); |
michael@0 | 514 | script->parallelIonScript()->resetParallelAge(); |
michael@0 | 515 | return true; |
michael@0 | 516 | } |
michael@0 | 517 | |
michael@0 | 518 | if (!activeParallelEntryScripts_) { |
michael@0 | 519 | activeParallelEntryScripts_ = cx->new_<ScriptSet>(cx); |
michael@0 | 520 | if (!activeParallelEntryScripts_ || !activeParallelEntryScripts_->init()) |
michael@0 | 521 | return false; |
michael@0 | 522 | } |
michael@0 | 523 | |
michael@0 | 524 | script->parallelIonScript()->setIsParallelEntryScript(); |
michael@0 | 525 | ScriptSet::AddPtr p = activeParallelEntryScripts_->lookupForAdd(script); |
michael@0 | 526 | return p || activeParallelEntryScripts_->add(p, script); |
michael@0 | 527 | } |
michael@0 | 528 | |
michael@0 | 529 | void |
michael@0 | 530 | jit::FinishOffThreadBuilder(IonBuilder *builder) |
michael@0 | 531 | { |
michael@0 | 532 | ExecutionMode executionMode = builder->info().executionMode(); |
michael@0 | 533 | |
michael@0 | 534 | // Clear the recompiling flag of the old ionScript, since we continue to |
michael@0 | 535 | // use the old ionScript if recompiling fails. |
michael@0 | 536 | if (executionMode == SequentialExecution && builder->script()->hasIonScript()) |
michael@0 | 537 | builder->script()->ionScript()->clearRecompiling(); |
michael@0 | 538 | |
michael@0 | 539 | // Clean up if compilation did not succeed. |
michael@0 | 540 | if (CompilingOffThread(builder->script(), executionMode)) |
michael@0 | 541 | SetIonScript(builder->script(), executionMode, nullptr); |
michael@0 | 542 | |
michael@0 | 543 | // The builder is allocated into its LifoAlloc, so destroying that will |
michael@0 | 544 | // destroy the builder and all other data accumulated during compilation, |
michael@0 | 545 | // except any final codegen (which includes an assembler and needs to be |
michael@0 | 546 | // explicitly destroyed). |
michael@0 | 547 | js_delete(builder->backgroundCodegen()); |
michael@0 | 548 | js_delete(builder->alloc().lifoAlloc()); |
michael@0 | 549 | } |
michael@0 | 550 | |
michael@0 | 551 | static inline void |
michael@0 | 552 | FinishAllOffThreadCompilations(JSCompartment *comp) |
michael@0 | 553 | { |
michael@0 | 554 | #ifdef JS_THREADSAFE |
michael@0 | 555 | AutoLockWorkerThreadState lock; |
michael@0 | 556 | GlobalWorkerThreadState::IonBuilderVector &finished = WorkerThreadState().ionFinishedList(); |
michael@0 | 557 | |
michael@0 | 558 | for (size_t i = 0; i < finished.length(); i++) { |
michael@0 | 559 | IonBuilder *builder = finished[i]; |
michael@0 | 560 | if (builder->compartment == CompileCompartment::get(comp)) { |
michael@0 | 561 | FinishOffThreadBuilder(builder); |
michael@0 | 562 | WorkerThreadState().remove(finished, &i); |
michael@0 | 563 | } |
michael@0 | 564 | } |
michael@0 | 565 | #endif |
michael@0 | 566 | } |
michael@0 | 567 | |
michael@0 | 568 | /* static */ void |
michael@0 | 569 | JitRuntime::Mark(JSTracer *trc) |
michael@0 | 570 | { |
michael@0 | 571 | JS_ASSERT(!trc->runtime()->isHeapMinorCollecting()); |
michael@0 | 572 | Zone *zone = trc->runtime()->atomsCompartment()->zone(); |
michael@0 | 573 | for (gc::CellIterUnderGC i(zone, gc::FINALIZE_JITCODE); !i.done(); i.next()) { |
michael@0 | 574 | JitCode *code = i.get<JitCode>(); |
michael@0 | 575 | MarkJitCodeRoot(trc, &code, "wrapper"); |
michael@0 | 576 | } |
michael@0 | 577 | } |
michael@0 | 578 | |
michael@0 | 579 | void |
michael@0 | 580 | JitCompartment::mark(JSTracer *trc, JSCompartment *compartment) |
michael@0 | 581 | { |
michael@0 | 582 | // Cancel any active or pending off thread compilations. Note that the |
michael@0 | 583 | // MIR graph does not hold any nursery pointers, so there's no need to |
michael@0 | 584 | // do this for minor GCs. |
michael@0 | 585 | JS_ASSERT(!trc->runtime()->isHeapMinorCollecting()); |
michael@0 | 586 | CancelOffThreadIonCompile(compartment, nullptr); |
michael@0 | 587 | FinishAllOffThreadCompilations(compartment); |
michael@0 | 588 | |
michael@0 | 589 | // Free temporary OSR buffer. |
michael@0 | 590 | trc->runtime()->jitRuntime()->freeOsrTempData(); |
michael@0 | 591 | |
michael@0 | 592 | // Mark scripts with parallel IonScripts if we should preserve them. |
michael@0 | 593 | if (activeParallelEntryScripts_) { |
michael@0 | 594 | for (ScriptSet::Enum e(*activeParallelEntryScripts_); !e.empty(); e.popFront()) { |
michael@0 | 595 | JSScript *script = e.front(); |
michael@0 | 596 | |
michael@0 | 597 | // If the script has since been invalidated or was attached by an |
michael@0 | 598 | // off-thread worker too late (i.e., the ForkJoin finished with |
michael@0 | 599 | // warmup doing all the work), remove it. |
michael@0 | 600 | if (!script->hasParallelIonScript() || |
michael@0 | 601 | !script->parallelIonScript()->isParallelEntryScript()) |
michael@0 | 602 | { |
michael@0 | 603 | e.removeFront(); |
michael@0 | 604 | continue; |
michael@0 | 605 | } |
michael@0 | 606 | |
michael@0 | 607 | // Check and increment the age. If the script is below the max |
michael@0 | 608 | // age, mark it. |
michael@0 | 609 | // |
michael@0 | 610 | // Subtlety: We depend on the tracing of the parallel IonScript's |
michael@0 | 611 | // callTargetEntries to propagate the parallel age to the entire |
michael@0 | 612 | // call graph. |
michael@0 | 613 | if (ShouldPreserveParallelJITCode(trc->runtime(), script, /* increase = */ true)) { |
michael@0 | 614 | MarkScript(trc, const_cast<EncapsulatedPtrScript *>(&e.front()), "par-script"); |
michael@0 | 615 | MOZ_ASSERT(script == e.front()); |
michael@0 | 616 | } |
michael@0 | 617 | } |
michael@0 | 618 | } |
michael@0 | 619 | } |
michael@0 | 620 | |
michael@0 | 621 | void |
michael@0 | 622 | JitCompartment::sweep(FreeOp *fop) |
michael@0 | 623 | { |
michael@0 | 624 | stubCodes_->sweep(fop); |
michael@0 | 625 | |
michael@0 | 626 | // If the sweep removed the ICCall_Fallback stub, nullptr the baselineCallReturnAddr_ field. |
michael@0 | 627 | if (!stubCodes_->lookup(static_cast<uint32_t>(ICStub::Call_Fallback))) { |
michael@0 | 628 | baselineCallReturnFromIonAddr_ = nullptr; |
michael@0 | 629 | baselineCallReturnFromStubAddr_ = nullptr; |
michael@0 | 630 | } |
michael@0 | 631 | // Similarly for the ICGetProp_Fallback stub. |
michael@0 | 632 | if (!stubCodes_->lookup(static_cast<uint32_t>(ICStub::GetProp_Fallback))) { |
michael@0 | 633 | baselineGetPropReturnFromIonAddr_ = nullptr; |
michael@0 | 634 | baselineGetPropReturnFromStubAddr_ = nullptr; |
michael@0 | 635 | } |
michael@0 | 636 | if (!stubCodes_->lookup(static_cast<uint32_t>(ICStub::SetProp_Fallback))) { |
michael@0 | 637 | baselineSetPropReturnFromIonAddr_ = nullptr; |
michael@0 | 638 | baselineSetPropReturnFromStubAddr_ = nullptr; |
michael@0 | 639 | } |
michael@0 | 640 | |
michael@0 | 641 | if (stringConcatStub_ && !IsJitCodeMarked(stringConcatStub_.unsafeGet())) |
michael@0 | 642 | stringConcatStub_ = nullptr; |
michael@0 | 643 | |
michael@0 | 644 | if (parallelStringConcatStub_ && !IsJitCodeMarked(parallelStringConcatStub_.unsafeGet())) |
michael@0 | 645 | parallelStringConcatStub_ = nullptr; |
michael@0 | 646 | |
michael@0 | 647 | if (activeParallelEntryScripts_) { |
michael@0 | 648 | for (ScriptSet::Enum e(*activeParallelEntryScripts_); !e.empty(); e.popFront()) { |
michael@0 | 649 | JSScript *script = e.front(); |
michael@0 | 650 | if (!IsScriptMarked(&script)) |
michael@0 | 651 | e.removeFront(); |
michael@0 | 652 | else |
michael@0 | 653 | MOZ_ASSERT(script == e.front()); |
michael@0 | 654 | } |
michael@0 | 655 | } |
michael@0 | 656 | } |
michael@0 | 657 | |
michael@0 | 658 | JitCode * |
michael@0 | 659 | JitRuntime::getBailoutTable(const FrameSizeClass &frameClass) const |
michael@0 | 660 | { |
michael@0 | 661 | JS_ASSERT(frameClass != FrameSizeClass::None()); |
michael@0 | 662 | return bailoutTables_[frameClass.classId()]; |
michael@0 | 663 | } |
michael@0 | 664 | |
michael@0 | 665 | JitCode * |
michael@0 | 666 | JitRuntime::getVMWrapper(const VMFunction &f) const |
michael@0 | 667 | { |
michael@0 | 668 | JS_ASSERT(functionWrappers_); |
michael@0 | 669 | JS_ASSERT(functionWrappers_->initialized()); |
michael@0 | 670 | JitRuntime::VMWrapperMap::Ptr p = functionWrappers_->readonlyThreadsafeLookup(&f); |
michael@0 | 671 | JS_ASSERT(p); |
michael@0 | 672 | |
michael@0 | 673 | return p->value(); |
michael@0 | 674 | } |
michael@0 | 675 | |
michael@0 | 676 | template <AllowGC allowGC> |
michael@0 | 677 | JitCode * |
michael@0 | 678 | JitCode::New(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize, |
michael@0 | 679 | JSC::ExecutablePool *pool, JSC::CodeKind kind) |
michael@0 | 680 | { |
michael@0 | 681 | JitCode *codeObj = js::NewJitCode<allowGC>(cx); |
michael@0 | 682 | if (!codeObj) { |
michael@0 | 683 | pool->release(headerSize + bufferSize, kind); |
michael@0 | 684 | return nullptr; |
michael@0 | 685 | } |
michael@0 | 686 | |
michael@0 | 687 | new (codeObj) JitCode(code, bufferSize, headerSize, pool, kind); |
michael@0 | 688 | return codeObj; |
michael@0 | 689 | } |
michael@0 | 690 | |
michael@0 | 691 | template |
michael@0 | 692 | JitCode * |
michael@0 | 693 | JitCode::New<CanGC>(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize, |
michael@0 | 694 | JSC::ExecutablePool *pool, JSC::CodeKind kind); |
michael@0 | 695 | |
michael@0 | 696 | template |
michael@0 | 697 | JitCode * |
michael@0 | 698 | JitCode::New<NoGC>(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize, |
michael@0 | 699 | JSC::ExecutablePool *pool, JSC::CodeKind kind); |
michael@0 | 700 | |
michael@0 | 701 | void |
michael@0 | 702 | JitCode::copyFrom(MacroAssembler &masm) |
michael@0 | 703 | { |
michael@0 | 704 | // Store the JitCode pointer right before the code buffer, so we can |
michael@0 | 705 | // recover the gcthing from relocation tables. |
michael@0 | 706 | *(JitCode **)(code_ - sizeof(JitCode *)) = this; |
michael@0 | 707 | insnSize_ = masm.instructionsSize(); |
michael@0 | 708 | masm.executableCopy(code_); |
michael@0 | 709 | |
michael@0 | 710 | jumpRelocTableBytes_ = masm.jumpRelocationTableBytes(); |
michael@0 | 711 | masm.copyJumpRelocationTable(code_ + jumpRelocTableOffset()); |
michael@0 | 712 | |
michael@0 | 713 | dataRelocTableBytes_ = masm.dataRelocationTableBytes(); |
michael@0 | 714 | masm.copyDataRelocationTable(code_ + dataRelocTableOffset()); |
michael@0 | 715 | |
michael@0 | 716 | preBarrierTableBytes_ = masm.preBarrierTableBytes(); |
michael@0 | 717 | masm.copyPreBarrierTable(code_ + preBarrierTableOffset()); |
michael@0 | 718 | |
michael@0 | 719 | masm.processCodeLabels(code_); |
michael@0 | 720 | } |
michael@0 | 721 | |
michael@0 | 722 | void |
michael@0 | 723 | JitCode::trace(JSTracer *trc) |
michael@0 | 724 | { |
michael@0 | 725 | // Note that we cannot mark invalidated scripts, since we've basically |
michael@0 | 726 | // corrupted the code stream by injecting bailouts. |
michael@0 | 727 | if (invalidated()) |
michael@0 | 728 | return; |
michael@0 | 729 | |
michael@0 | 730 | if (jumpRelocTableBytes_) { |
michael@0 | 731 | uint8_t *start = code_ + jumpRelocTableOffset(); |
michael@0 | 732 | CompactBufferReader reader(start, start + jumpRelocTableBytes_); |
michael@0 | 733 | MacroAssembler::TraceJumpRelocations(trc, this, reader); |
michael@0 | 734 | } |
michael@0 | 735 | if (dataRelocTableBytes_) { |
michael@0 | 736 | uint8_t *start = code_ + dataRelocTableOffset(); |
michael@0 | 737 | CompactBufferReader reader(start, start + dataRelocTableBytes_); |
michael@0 | 738 | MacroAssembler::TraceDataRelocations(trc, this, reader); |
michael@0 | 739 | } |
michael@0 | 740 | } |
michael@0 | 741 | |
michael@0 | 742 | void |
michael@0 | 743 | JitCode::finalize(FreeOp *fop) |
michael@0 | 744 | { |
michael@0 | 745 | // Make sure this can't race with an interrupting thread, which may try |
michael@0 | 746 | // to read the contents of the pool we are releasing references in. |
michael@0 | 747 | JS_ASSERT(fop->runtime()->currentThreadOwnsInterruptLock()); |
michael@0 | 748 | |
michael@0 | 749 | // Buffer can be freed at any time hereafter. Catch use-after-free bugs. |
michael@0 | 750 | // Don't do this if the Ion code is protected, as the signal handler will |
michael@0 | 751 | // deadlock trying to reacquire the interrupt lock. |
michael@0 | 752 | if (fop->runtime()->jitRuntime() && !fop->runtime()->jitRuntime()->ionCodeProtected()) |
michael@0 | 753 | memset(code_, JS_SWEPT_CODE_PATTERN, bufferSize_); |
michael@0 | 754 | code_ = nullptr; |
michael@0 | 755 | |
michael@0 | 756 | // Code buffers are stored inside JSC pools. |
michael@0 | 757 | // Pools are refcounted. Releasing the pool may free it. |
michael@0 | 758 | if (pool_) { |
michael@0 | 759 | // Horrible hack: if we are using perf integration, we don't |
michael@0 | 760 | // want to reuse code addresses, so we just leak the memory instead. |
michael@0 | 761 | if (!PerfEnabled()) |
michael@0 | 762 | pool_->release(headerSize_ + bufferSize_, JSC::CodeKind(kind_)); |
michael@0 | 763 | pool_ = nullptr; |
michael@0 | 764 | } |
michael@0 | 765 | } |
michael@0 | 766 | |
michael@0 | 767 | void |
michael@0 | 768 | JitCode::togglePreBarriers(bool enabled) |
michael@0 | 769 | { |
michael@0 | 770 | uint8_t *start = code_ + preBarrierTableOffset(); |
michael@0 | 771 | CompactBufferReader reader(start, start + preBarrierTableBytes_); |
michael@0 | 772 | |
michael@0 | 773 | while (reader.more()) { |
michael@0 | 774 | size_t offset = reader.readUnsigned(); |
michael@0 | 775 | CodeLocationLabel loc(this, offset); |
michael@0 | 776 | if (enabled) |
michael@0 | 777 | Assembler::ToggleToCmp(loc); |
michael@0 | 778 | else |
michael@0 | 779 | Assembler::ToggleToJmp(loc); |
michael@0 | 780 | } |
michael@0 | 781 | } |
michael@0 | 782 | |
michael@0 | 783 | IonScript::IonScript() |
michael@0 | 784 | : method_(nullptr), |
michael@0 | 785 | deoptTable_(nullptr), |
michael@0 | 786 | osrPc_(nullptr), |
michael@0 | 787 | osrEntryOffset_(0), |
michael@0 | 788 | skipArgCheckEntryOffset_(0), |
michael@0 | 789 | invalidateEpilogueOffset_(0), |
michael@0 | 790 | invalidateEpilogueDataOffset_(0), |
michael@0 | 791 | numBailouts_(0), |
michael@0 | 792 | hasUncompiledCallTarget_(false), |
michael@0 | 793 | isParallelEntryScript_(false), |
michael@0 | 794 | hasSPSInstrumentation_(false), |
michael@0 | 795 | recompiling_(false), |
michael@0 | 796 | runtimeData_(0), |
michael@0 | 797 | runtimeSize_(0), |
michael@0 | 798 | cacheIndex_(0), |
michael@0 | 799 | cacheEntries_(0), |
michael@0 | 800 | safepointIndexOffset_(0), |
michael@0 | 801 | safepointIndexEntries_(0), |
michael@0 | 802 | safepointsStart_(0), |
michael@0 | 803 | safepointsSize_(0), |
michael@0 | 804 | frameSlots_(0), |
michael@0 | 805 | frameSize_(0), |
michael@0 | 806 | bailoutTable_(0), |
michael@0 | 807 | bailoutEntries_(0), |
michael@0 | 808 | osiIndexOffset_(0), |
michael@0 | 809 | osiIndexEntries_(0), |
michael@0 | 810 | snapshots_(0), |
michael@0 | 811 | snapshotsListSize_(0), |
michael@0 | 812 | snapshotsRVATableSize_(0), |
michael@0 | 813 | constantTable_(0), |
michael@0 | 814 | constantEntries_(0), |
michael@0 | 815 | callTargetList_(0), |
michael@0 | 816 | callTargetEntries_(0), |
michael@0 | 817 | backedgeList_(0), |
michael@0 | 818 | backedgeEntries_(0), |
michael@0 | 819 | refcount_(0), |
michael@0 | 820 | parallelAge_(0), |
michael@0 | 821 | recompileInfo_(), |
michael@0 | 822 | osrPcMismatchCounter_(0), |
michael@0 | 823 | dependentAsmJSModules(nullptr) |
michael@0 | 824 | { |
michael@0 | 825 | } |
michael@0 | 826 | |
michael@0 | 827 | IonScript * |
michael@0 | 828 | IonScript::New(JSContext *cx, types::RecompileInfo recompileInfo, |
michael@0 | 829 | uint32_t frameSlots, uint32_t frameSize, |
michael@0 | 830 | size_t snapshotsListSize, size_t snapshotsRVATableSize, |
michael@0 | 831 | size_t recoversSize, size_t bailoutEntries, |
michael@0 | 832 | size_t constants, size_t safepointIndices, |
michael@0 | 833 | size_t osiIndices, size_t cacheEntries, |
michael@0 | 834 | size_t runtimeSize, size_t safepointsSize, |
michael@0 | 835 | size_t callTargetEntries, size_t backedgeEntries, |
michael@0 | 836 | OptimizationLevel optimizationLevel) |
michael@0 | 837 | { |
michael@0 | 838 | static const int DataAlignment = sizeof(void *); |
michael@0 | 839 | |
michael@0 | 840 | if (snapshotsListSize >= MAX_BUFFER_SIZE || |
michael@0 | 841 | (bailoutEntries >= MAX_BUFFER_SIZE / sizeof(uint32_t))) |
michael@0 | 842 | { |
michael@0 | 843 | js_ReportOutOfMemory(cx); |
michael@0 | 844 | return nullptr; |
michael@0 | 845 | } |
michael@0 | 846 | |
michael@0 | 847 | // This should not overflow on x86, because the memory is already allocated |
michael@0 | 848 | // *somewhere* and if their total overflowed there would be no memory left |
michael@0 | 849 | // at all. |
michael@0 | 850 | size_t paddedSnapshotsSize = AlignBytes(snapshotsListSize + snapshotsRVATableSize, DataAlignment); |
michael@0 | 851 | size_t paddedRecoversSize = AlignBytes(recoversSize, DataAlignment); |
michael@0 | 852 | size_t paddedBailoutSize = AlignBytes(bailoutEntries * sizeof(uint32_t), DataAlignment); |
michael@0 | 853 | size_t paddedConstantsSize = AlignBytes(constants * sizeof(Value), DataAlignment); |
michael@0 | 854 | size_t paddedSafepointIndicesSize = AlignBytes(safepointIndices * sizeof(SafepointIndex), DataAlignment); |
michael@0 | 855 | size_t paddedOsiIndicesSize = AlignBytes(osiIndices * sizeof(OsiIndex), DataAlignment); |
michael@0 | 856 | size_t paddedCacheEntriesSize = AlignBytes(cacheEntries * sizeof(uint32_t), DataAlignment); |
michael@0 | 857 | size_t paddedRuntimeSize = AlignBytes(runtimeSize, DataAlignment); |
michael@0 | 858 | size_t paddedSafepointSize = AlignBytes(safepointsSize, DataAlignment); |
michael@0 | 859 | size_t paddedCallTargetSize = AlignBytes(callTargetEntries * sizeof(JSScript *), DataAlignment); |
michael@0 | 860 | size_t paddedBackedgeSize = AlignBytes(backedgeEntries * sizeof(PatchableBackedge), DataAlignment); |
michael@0 | 861 | size_t bytes = paddedSnapshotsSize + |
michael@0 | 862 | paddedRecoversSize + |
michael@0 | 863 | paddedBailoutSize + |
michael@0 | 864 | paddedConstantsSize + |
michael@0 | 865 | paddedSafepointIndicesSize+ |
michael@0 | 866 | paddedOsiIndicesSize + |
michael@0 | 867 | paddedCacheEntriesSize + |
michael@0 | 868 | paddedRuntimeSize + |
michael@0 | 869 | paddedSafepointSize + |
michael@0 | 870 | paddedCallTargetSize + |
michael@0 | 871 | paddedBackedgeSize; |
michael@0 | 872 | uint8_t *buffer = (uint8_t *)cx->malloc_(sizeof(IonScript) + bytes); |
michael@0 | 873 | if (!buffer) |
michael@0 | 874 | return nullptr; |
michael@0 | 875 | |
michael@0 | 876 | IonScript *script = reinterpret_cast<IonScript *>(buffer); |
michael@0 | 877 | new (script) IonScript(); |
michael@0 | 878 | |
michael@0 | 879 | uint32_t offsetCursor = sizeof(IonScript); |
michael@0 | 880 | |
michael@0 | 881 | script->runtimeData_ = offsetCursor; |
michael@0 | 882 | script->runtimeSize_ = runtimeSize; |
michael@0 | 883 | offsetCursor += paddedRuntimeSize; |
michael@0 | 884 | |
michael@0 | 885 | script->cacheIndex_ = offsetCursor; |
michael@0 | 886 | script->cacheEntries_ = cacheEntries; |
michael@0 | 887 | offsetCursor += paddedCacheEntriesSize; |
michael@0 | 888 | |
michael@0 | 889 | script->safepointIndexOffset_ = offsetCursor; |
michael@0 | 890 | script->safepointIndexEntries_ = safepointIndices; |
michael@0 | 891 | offsetCursor += paddedSafepointIndicesSize; |
michael@0 | 892 | |
michael@0 | 893 | script->safepointsStart_ = offsetCursor; |
michael@0 | 894 | script->safepointsSize_ = safepointsSize; |
michael@0 | 895 | offsetCursor += paddedSafepointSize; |
michael@0 | 896 | |
michael@0 | 897 | script->bailoutTable_ = offsetCursor; |
michael@0 | 898 | script->bailoutEntries_ = bailoutEntries; |
michael@0 | 899 | offsetCursor += paddedBailoutSize; |
michael@0 | 900 | |
michael@0 | 901 | script->osiIndexOffset_ = offsetCursor; |
michael@0 | 902 | script->osiIndexEntries_ = osiIndices; |
michael@0 | 903 | offsetCursor += paddedOsiIndicesSize; |
michael@0 | 904 | |
michael@0 | 905 | script->snapshots_ = offsetCursor; |
michael@0 | 906 | script->snapshotsListSize_ = snapshotsListSize; |
michael@0 | 907 | script->snapshotsRVATableSize_ = snapshotsRVATableSize; |
michael@0 | 908 | offsetCursor += paddedSnapshotsSize; |
michael@0 | 909 | |
michael@0 | 910 | script->recovers_ = offsetCursor; |
michael@0 | 911 | script->recoversSize_ = recoversSize; |
michael@0 | 912 | offsetCursor += paddedRecoversSize; |
michael@0 | 913 | |
michael@0 | 914 | script->constantTable_ = offsetCursor; |
michael@0 | 915 | script->constantEntries_ = constants; |
michael@0 | 916 | offsetCursor += paddedConstantsSize; |
michael@0 | 917 | |
michael@0 | 918 | script->callTargetList_ = offsetCursor; |
michael@0 | 919 | script->callTargetEntries_ = callTargetEntries; |
michael@0 | 920 | offsetCursor += paddedCallTargetSize; |
michael@0 | 921 | |
michael@0 | 922 | script->backedgeList_ = offsetCursor; |
michael@0 | 923 | script->backedgeEntries_ = backedgeEntries; |
michael@0 | 924 | offsetCursor += paddedBackedgeSize; |
michael@0 | 925 | |
michael@0 | 926 | script->frameSlots_ = frameSlots; |
michael@0 | 927 | script->frameSize_ = frameSize; |
michael@0 | 928 | |
michael@0 | 929 | script->recompileInfo_ = recompileInfo; |
michael@0 | 930 | script->optimizationLevel_ = optimizationLevel; |
michael@0 | 931 | |
michael@0 | 932 | return script; |
michael@0 | 933 | } |
michael@0 | 934 | |
michael@0 | 935 | void |
michael@0 | 936 | IonScript::trace(JSTracer *trc) |
michael@0 | 937 | { |
michael@0 | 938 | if (method_) |
michael@0 | 939 | MarkJitCode(trc, &method_, "method"); |
michael@0 | 940 | |
michael@0 | 941 | if (deoptTable_) |
michael@0 | 942 | MarkJitCode(trc, &deoptTable_, "deoptimizationTable"); |
michael@0 | 943 | |
michael@0 | 944 | for (size_t i = 0; i < numConstants(); i++) |
michael@0 | 945 | gc::MarkValue(trc, &getConstant(i), "constant"); |
michael@0 | 946 | |
michael@0 | 947 | // No write barrier is needed for the call target list, as it's attached |
michael@0 | 948 | // at compilation time and is read only. |
michael@0 | 949 | for (size_t i = 0; i < callTargetEntries(); i++) { |
michael@0 | 950 | // Propagate the parallelAge to the call targets. |
michael@0 | 951 | if (callTargetList()[i]->hasParallelIonScript()) |
michael@0 | 952 | callTargetList()[i]->parallelIonScript()->parallelAge_ = parallelAge_; |
michael@0 | 953 | |
michael@0 | 954 | gc::MarkScriptUnbarriered(trc, &callTargetList()[i], "callTarget"); |
michael@0 | 955 | } |
michael@0 | 956 | } |
michael@0 | 957 | |
michael@0 | 958 | /* static */ void |
michael@0 | 959 | IonScript::writeBarrierPre(Zone *zone, IonScript *ionScript) |
michael@0 | 960 | { |
michael@0 | 961 | #ifdef JSGC_INCREMENTAL |
michael@0 | 962 | if (zone->needsBarrier()) |
michael@0 | 963 | ionScript->trace(zone->barrierTracer()); |
michael@0 | 964 | #endif |
michael@0 | 965 | } |
michael@0 | 966 | |
michael@0 | 967 | void |
michael@0 | 968 | IonScript::copySnapshots(const SnapshotWriter *writer) |
michael@0 | 969 | { |
michael@0 | 970 | MOZ_ASSERT(writer->listSize() == snapshotsListSize_); |
michael@0 | 971 | memcpy((uint8_t *)this + snapshots_, |
michael@0 | 972 | writer->listBuffer(), snapshotsListSize_); |
michael@0 | 973 | |
michael@0 | 974 | MOZ_ASSERT(snapshotsRVATableSize_); |
michael@0 | 975 | MOZ_ASSERT(writer->RVATableSize() == snapshotsRVATableSize_); |
michael@0 | 976 | memcpy((uint8_t *)this + snapshots_ + snapshotsListSize_, |
michael@0 | 977 | writer->RVATableBuffer(), snapshotsRVATableSize_); |
michael@0 | 978 | } |
michael@0 | 979 | |
michael@0 | 980 | void |
michael@0 | 981 | IonScript::copyRecovers(const RecoverWriter *writer) |
michael@0 | 982 | { |
michael@0 | 983 | MOZ_ASSERT(writer->size() == recoversSize_); |
michael@0 | 984 | memcpy((uint8_t *)this + recovers_, writer->buffer(), recoversSize_); |
michael@0 | 985 | } |
michael@0 | 986 | |
michael@0 | 987 | void |
michael@0 | 988 | IonScript::copySafepoints(const SafepointWriter *writer) |
michael@0 | 989 | { |
michael@0 | 990 | JS_ASSERT(writer->size() == safepointsSize_); |
michael@0 | 991 | memcpy((uint8_t *)this + safepointsStart_, writer->buffer(), safepointsSize_); |
michael@0 | 992 | } |
michael@0 | 993 | |
michael@0 | 994 | void |
michael@0 | 995 | IonScript::copyBailoutTable(const SnapshotOffset *table) |
michael@0 | 996 | { |
michael@0 | 997 | memcpy(bailoutTable(), table, bailoutEntries_ * sizeof(uint32_t)); |
michael@0 | 998 | } |
michael@0 | 999 | |
michael@0 | 1000 | void |
michael@0 | 1001 | IonScript::copyConstants(const Value *vp) |
michael@0 | 1002 | { |
michael@0 | 1003 | for (size_t i = 0; i < constantEntries_; i++) |
michael@0 | 1004 | constants()[i].init(vp[i]); |
michael@0 | 1005 | } |
michael@0 | 1006 | |
michael@0 | 1007 | void |
michael@0 | 1008 | IonScript::copyCallTargetEntries(JSScript **callTargets) |
michael@0 | 1009 | { |
michael@0 | 1010 | for (size_t i = 0; i < callTargetEntries_; i++) |
michael@0 | 1011 | callTargetList()[i] = callTargets[i]; |
michael@0 | 1012 | } |
michael@0 | 1013 | |
michael@0 | 1014 | void |
michael@0 | 1015 | IonScript::copyPatchableBackedges(JSContext *cx, JitCode *code, |
michael@0 | 1016 | PatchableBackedgeInfo *backedges) |
michael@0 | 1017 | { |
michael@0 | 1018 | for (size_t i = 0; i < backedgeEntries_; i++) { |
michael@0 | 1019 | const PatchableBackedgeInfo &info = backedges[i]; |
michael@0 | 1020 | PatchableBackedge *patchableBackedge = &backedgeList()[i]; |
michael@0 | 1021 | |
michael@0 | 1022 | CodeLocationJump backedge(code, info.backedge); |
michael@0 | 1023 | CodeLocationLabel loopHeader(code, CodeOffsetLabel(info.loopHeader->offset())); |
michael@0 | 1024 | CodeLocationLabel interruptCheck(code, CodeOffsetLabel(info.interruptCheck->offset())); |
michael@0 | 1025 | new(patchableBackedge) PatchableBackedge(backedge, loopHeader, interruptCheck); |
michael@0 | 1026 | |
michael@0 | 1027 | // Point the backedge to either of its possible targets, according to |
michael@0 | 1028 | // whether an interrupt is currently desired, matching the targets |
michael@0 | 1029 | // established by ensureIonCodeAccessible() above. We don't handle the |
michael@0 | 1030 | // interrupt immediately as the interrupt lock is held here. |
michael@0 | 1031 | PatchJump(backedge, cx->runtime()->interrupt ? interruptCheck : loopHeader); |
michael@0 | 1032 | |
michael@0 | 1033 | cx->runtime()->jitRuntime()->addPatchableBackedge(patchableBackedge); |
michael@0 | 1034 | } |
michael@0 | 1035 | } |
michael@0 | 1036 | |
michael@0 | 1037 | void |
michael@0 | 1038 | IonScript::copySafepointIndices(const SafepointIndex *si, MacroAssembler &masm) |
michael@0 | 1039 | { |
michael@0 | 1040 | // Jumps in the caches reflect the offset of those jumps in the compiled |
michael@0 | 1041 | // code, not the absolute positions of the jumps. Update according to the |
michael@0 | 1042 | // final code address now. |
michael@0 | 1043 | SafepointIndex *table = safepointIndices(); |
michael@0 | 1044 | memcpy(table, si, safepointIndexEntries_ * sizeof(SafepointIndex)); |
michael@0 | 1045 | for (size_t i = 0; i < safepointIndexEntries_; i++) |
michael@0 | 1046 | table[i].adjustDisplacement(masm.actualOffset(table[i].displacement())); |
michael@0 | 1047 | } |
michael@0 | 1048 | |
michael@0 | 1049 | void |
michael@0 | 1050 | IonScript::copyOsiIndices(const OsiIndex *oi, MacroAssembler &masm) |
michael@0 | 1051 | { |
michael@0 | 1052 | memcpy(osiIndices(), oi, osiIndexEntries_ * sizeof(OsiIndex)); |
michael@0 | 1053 | for (unsigned i = 0; i < osiIndexEntries_; i++) |
michael@0 | 1054 | osiIndices()[i].fixUpOffset(masm); |
michael@0 | 1055 | } |
michael@0 | 1056 | |
michael@0 | 1057 | void |
michael@0 | 1058 | IonScript::copyRuntimeData(const uint8_t *data) |
michael@0 | 1059 | { |
michael@0 | 1060 | memcpy(runtimeData(), data, runtimeSize()); |
michael@0 | 1061 | } |
michael@0 | 1062 | |
michael@0 | 1063 | void |
michael@0 | 1064 | IonScript::copyCacheEntries(const uint32_t *caches, MacroAssembler &masm) |
michael@0 | 1065 | { |
michael@0 | 1066 | memcpy(cacheIndex(), caches, numCaches() * sizeof(uint32_t)); |
michael@0 | 1067 | |
michael@0 | 1068 | // Jumps in the caches reflect the offset of those jumps in the compiled |
michael@0 | 1069 | // code, not the absolute positions of the jumps. Update according to the |
michael@0 | 1070 | // final code address now. |
michael@0 | 1071 | for (size_t i = 0; i < numCaches(); i++) |
michael@0 | 1072 | getCacheFromIndex(i).updateBaseAddress(method_, masm); |
michael@0 | 1073 | } |
michael@0 | 1074 | |
michael@0 | 1075 | const SafepointIndex * |
michael@0 | 1076 | IonScript::getSafepointIndex(uint32_t disp) const |
michael@0 | 1077 | { |
michael@0 | 1078 | JS_ASSERT(safepointIndexEntries_ > 0); |
michael@0 | 1079 | |
michael@0 | 1080 | const SafepointIndex *table = safepointIndices(); |
michael@0 | 1081 | if (safepointIndexEntries_ == 1) { |
michael@0 | 1082 | JS_ASSERT(disp == table[0].displacement()); |
michael@0 | 1083 | return &table[0]; |
michael@0 | 1084 | } |
michael@0 | 1085 | |
michael@0 | 1086 | size_t minEntry = 0; |
michael@0 | 1087 | size_t maxEntry = safepointIndexEntries_ - 1; |
michael@0 | 1088 | uint32_t min = table[minEntry].displacement(); |
michael@0 | 1089 | uint32_t max = table[maxEntry].displacement(); |
michael@0 | 1090 | |
michael@0 | 1091 | // Raise if the element is not in the list. |
michael@0 | 1092 | JS_ASSERT(min <= disp && disp <= max); |
michael@0 | 1093 | |
michael@0 | 1094 | // Approximate the location of the FrameInfo. |
michael@0 | 1095 | size_t guess = (disp - min) * (maxEntry - minEntry) / (max - min) + minEntry; |
michael@0 | 1096 | uint32_t guessDisp = table[guess].displacement(); |
michael@0 | 1097 | |
michael@0 | 1098 | if (table[guess].displacement() == disp) |
michael@0 | 1099 | return &table[guess]; |
michael@0 | 1100 | |
michael@0 | 1101 | // Doing a linear scan from the guess should be more efficient in case of |
michael@0 | 1102 | // small group which are equally distributed on the code. |
michael@0 | 1103 | // |
michael@0 | 1104 | // such as: <... ... ... ... . ... ...> |
michael@0 | 1105 | if (guessDisp > disp) { |
michael@0 | 1106 | while (--guess >= minEntry) { |
michael@0 | 1107 | guessDisp = table[guess].displacement(); |
michael@0 | 1108 | JS_ASSERT(guessDisp >= disp); |
michael@0 | 1109 | if (guessDisp == disp) |
michael@0 | 1110 | return &table[guess]; |
michael@0 | 1111 | } |
michael@0 | 1112 | } else { |
michael@0 | 1113 | while (++guess <= maxEntry) { |
michael@0 | 1114 | guessDisp = table[guess].displacement(); |
michael@0 | 1115 | JS_ASSERT(guessDisp <= disp); |
michael@0 | 1116 | if (guessDisp == disp) |
michael@0 | 1117 | return &table[guess]; |
michael@0 | 1118 | } |
michael@0 | 1119 | } |
michael@0 | 1120 | |
michael@0 | 1121 | MOZ_ASSUME_UNREACHABLE("displacement not found."); |
michael@0 | 1122 | } |
michael@0 | 1123 | |
michael@0 | 1124 | const OsiIndex * |
michael@0 | 1125 | IonScript::getOsiIndex(uint32_t disp) const |
michael@0 | 1126 | { |
michael@0 | 1127 | for (const OsiIndex *it = osiIndices(), *end = osiIndices() + osiIndexEntries_; |
michael@0 | 1128 | it != end; |
michael@0 | 1129 | ++it) |
michael@0 | 1130 | { |
michael@0 | 1131 | if (it->returnPointDisplacement() == disp) |
michael@0 | 1132 | return it; |
michael@0 | 1133 | } |
michael@0 | 1134 | |
michael@0 | 1135 | MOZ_ASSUME_UNREACHABLE("Failed to find OSI point return address"); |
michael@0 | 1136 | } |
michael@0 | 1137 | |
michael@0 | 1138 | const OsiIndex * |
michael@0 | 1139 | IonScript::getOsiIndex(uint8_t *retAddr) const |
michael@0 | 1140 | { |
michael@0 | 1141 | IonSpew(IonSpew_Invalidate, "IonScript %p has method %p raw %p", (void *) this, (void *) |
michael@0 | 1142 | method(), method()->raw()); |
michael@0 | 1143 | |
michael@0 | 1144 | JS_ASSERT(containsCodeAddress(retAddr)); |
michael@0 | 1145 | uint32_t disp = retAddr - method()->raw(); |
michael@0 | 1146 | return getOsiIndex(disp); |
michael@0 | 1147 | } |
michael@0 | 1148 | |
michael@0 | 1149 | void |
michael@0 | 1150 | IonScript::Trace(JSTracer *trc, IonScript *script) |
michael@0 | 1151 | { |
michael@0 | 1152 | if (script != ION_DISABLED_SCRIPT) |
michael@0 | 1153 | script->trace(trc); |
michael@0 | 1154 | } |
michael@0 | 1155 | |
michael@0 | 1156 | void |
michael@0 | 1157 | IonScript::Destroy(FreeOp *fop, IonScript *script) |
michael@0 | 1158 | { |
michael@0 | 1159 | script->destroyCaches(); |
michael@0 | 1160 | script->unlinkFromRuntime(fop); |
michael@0 | 1161 | fop->free_(script); |
michael@0 | 1162 | } |
michael@0 | 1163 | |
michael@0 | 1164 | void |
michael@0 | 1165 | IonScript::toggleBarriers(bool enabled) |
michael@0 | 1166 | { |
michael@0 | 1167 | method()->togglePreBarriers(enabled); |
michael@0 | 1168 | } |
michael@0 | 1169 | |
michael@0 | 1170 | void |
michael@0 | 1171 | IonScript::purgeCaches() |
michael@0 | 1172 | { |
michael@0 | 1173 | // Don't reset any ICs if we're invalidated, otherwise, repointing the |
michael@0 | 1174 | // inline jump could overwrite an invalidation marker. These ICs can |
michael@0 | 1175 | // no longer run, however, the IC slow paths may be active on the stack. |
michael@0 | 1176 | // ICs therefore are required to check for invalidation before patching, |
michael@0 | 1177 | // to ensure the same invariant. |
michael@0 | 1178 | if (invalidated()) |
michael@0 | 1179 | return; |
michael@0 | 1180 | |
michael@0 | 1181 | for (size_t i = 0; i < numCaches(); i++) |
michael@0 | 1182 | getCacheFromIndex(i).reset(); |
michael@0 | 1183 | } |
michael@0 | 1184 | |
michael@0 | 1185 | void |
michael@0 | 1186 | IonScript::destroyCaches() |
michael@0 | 1187 | { |
michael@0 | 1188 | for (size_t i = 0; i < numCaches(); i++) |
michael@0 | 1189 | getCacheFromIndex(i).destroy(); |
michael@0 | 1190 | } |
michael@0 | 1191 | |
michael@0 | 1192 | bool |
michael@0 | 1193 | IonScript::addDependentAsmJSModule(JSContext *cx, DependentAsmJSModuleExit exit) |
michael@0 | 1194 | { |
michael@0 | 1195 | if (!dependentAsmJSModules) { |
michael@0 | 1196 | dependentAsmJSModules = cx->new_<Vector<DependentAsmJSModuleExit> >(cx); |
michael@0 | 1197 | if (!dependentAsmJSModules) |
michael@0 | 1198 | return false; |
michael@0 | 1199 | } |
michael@0 | 1200 | return dependentAsmJSModules->append(exit); |
michael@0 | 1201 | } |
michael@0 | 1202 | |
michael@0 | 1203 | void |
michael@0 | 1204 | IonScript::unlinkFromRuntime(FreeOp *fop) |
michael@0 | 1205 | { |
michael@0 | 1206 | // Remove any links from AsmJSModules that contain optimized FFI calls into |
michael@0 | 1207 | // this IonScript. |
michael@0 | 1208 | if (dependentAsmJSModules) { |
michael@0 | 1209 | for (size_t i = 0; i < dependentAsmJSModules->length(); i++) { |
michael@0 | 1210 | DependentAsmJSModuleExit exit = dependentAsmJSModules->begin()[i]; |
michael@0 | 1211 | exit.module->detachIonCompilation(exit.exitIndex); |
michael@0 | 1212 | } |
michael@0 | 1213 | |
michael@0 | 1214 | fop->delete_(dependentAsmJSModules); |
michael@0 | 1215 | dependentAsmJSModules = nullptr; |
michael@0 | 1216 | } |
michael@0 | 1217 | |
michael@0 | 1218 | // The writes to the executable buffer below may clobber backedge jumps, so |
michael@0 | 1219 | // make sure that those backedges are unlinked from the runtime and not |
michael@0 | 1220 | // reclobbered with garbage if an interrupt is requested. |
michael@0 | 1221 | JSRuntime *rt = fop->runtime(); |
michael@0 | 1222 | for (size_t i = 0; i < backedgeEntries_; i++) { |
michael@0 | 1223 | PatchableBackedge *backedge = &backedgeList()[i]; |
michael@0 | 1224 | rt->jitRuntime()->removePatchableBackedge(backedge); |
michael@0 | 1225 | } |
michael@0 | 1226 | |
michael@0 | 1227 | // Clear the list of backedges, so that this method is idempotent. It is |
michael@0 | 1228 | // called during destruction, and may be additionally called when the |
michael@0 | 1229 | // script is invalidated. |
michael@0 | 1230 | backedgeEntries_ = 0; |
michael@0 | 1231 | } |
michael@0 | 1232 | |
michael@0 | 1233 | void |
michael@0 | 1234 | jit::ToggleBarriers(JS::Zone *zone, bool needs) |
michael@0 | 1235 | { |
michael@0 | 1236 | JSRuntime *rt = zone->runtimeFromMainThread(); |
michael@0 | 1237 | if (!rt->hasJitRuntime()) |
michael@0 | 1238 | return; |
michael@0 | 1239 | |
michael@0 | 1240 | for (gc::CellIterUnderGC i(zone, gc::FINALIZE_SCRIPT); !i.done(); i.next()) { |
michael@0 | 1241 | JSScript *script = i.get<JSScript>(); |
michael@0 | 1242 | if (script->hasIonScript()) |
michael@0 | 1243 | script->ionScript()->toggleBarriers(needs); |
michael@0 | 1244 | if (script->hasBaselineScript()) |
michael@0 | 1245 | script->baselineScript()->toggleBarriers(needs); |
michael@0 | 1246 | } |
michael@0 | 1247 | |
michael@0 | 1248 | for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) { |
michael@0 | 1249 | if (comp->jitCompartment()) |
michael@0 | 1250 | comp->jitCompartment()->toggleBaselineStubBarriers(needs); |
michael@0 | 1251 | } |
michael@0 | 1252 | } |
michael@0 | 1253 | |
michael@0 | 1254 | namespace js { |
michael@0 | 1255 | namespace jit { |
michael@0 | 1256 | |
michael@0 | 1257 | bool |
michael@0 | 1258 | OptimizeMIR(MIRGenerator *mir) |
michael@0 | 1259 | { |
michael@0 | 1260 | MIRGraph &graph = mir->graph(); |
michael@0 | 1261 | TraceLogger *logger; |
michael@0 | 1262 | if (GetIonContext()->runtime->onMainThread()) |
michael@0 | 1263 | logger = TraceLoggerForMainThread(GetIonContext()->runtime); |
michael@0 | 1264 | else |
michael@0 | 1265 | logger = TraceLoggerForCurrentThread(); |
michael@0 | 1266 | |
michael@0 | 1267 | if (!mir->compilingAsmJS()) { |
michael@0 | 1268 | if (!MakeMRegExpHoistable(graph)) |
michael@0 | 1269 | return false; |
michael@0 | 1270 | } |
michael@0 | 1271 | |
michael@0 | 1272 | IonSpewPass("BuildSSA"); |
michael@0 | 1273 | AssertBasicGraphCoherency(graph); |
michael@0 | 1274 | |
michael@0 | 1275 | if (mir->shouldCancel("Start")) |
michael@0 | 1276 | return false; |
michael@0 | 1277 | |
michael@0 | 1278 | { |
michael@0 | 1279 | AutoTraceLog log(logger, TraceLogger::SplitCriticalEdges); |
michael@0 | 1280 | if (!SplitCriticalEdges(graph)) |
michael@0 | 1281 | return false; |
michael@0 | 1282 | IonSpewPass("Split Critical Edges"); |
michael@0 | 1283 | AssertGraphCoherency(graph); |
michael@0 | 1284 | |
michael@0 | 1285 | if (mir->shouldCancel("Split Critical Edges")) |
michael@0 | 1286 | return false; |
michael@0 | 1287 | } |
michael@0 | 1288 | |
michael@0 | 1289 | { |
michael@0 | 1290 | AutoTraceLog log(logger, TraceLogger::RenumberBlocks); |
michael@0 | 1291 | if (!RenumberBlocks(graph)) |
michael@0 | 1292 | return false; |
michael@0 | 1293 | IonSpewPass("Renumber Blocks"); |
michael@0 | 1294 | AssertGraphCoherency(graph); |
michael@0 | 1295 | |
michael@0 | 1296 | if (mir->shouldCancel("Renumber Blocks")) |
michael@0 | 1297 | return false; |
michael@0 | 1298 | } |
michael@0 | 1299 | |
michael@0 | 1300 | { |
michael@0 | 1301 | AutoTraceLog log(logger, TraceLogger::DominatorTree); |
michael@0 | 1302 | if (!BuildDominatorTree(graph)) |
michael@0 | 1303 | return false; |
michael@0 | 1304 | // No spew: graph not changed. |
michael@0 | 1305 | |
michael@0 | 1306 | if (mir->shouldCancel("Dominator Tree")) |
michael@0 | 1307 | return false; |
michael@0 | 1308 | } |
michael@0 | 1309 | |
michael@0 | 1310 | { |
michael@0 | 1311 | AutoTraceLog log(logger, TraceLogger::PhiAnalysis); |
michael@0 | 1312 | // Aggressive phi elimination must occur before any code elimination. If the |
michael@0 | 1313 | // script contains a try-statement, we only compiled the try block and not |
michael@0 | 1314 | // the catch or finally blocks, so in this case it's also invalid to use |
michael@0 | 1315 | // aggressive phi elimination. |
michael@0 | 1316 | Observability observability = graph.hasTryBlock() |
michael@0 | 1317 | ? ConservativeObservability |
michael@0 | 1318 | : AggressiveObservability; |
michael@0 | 1319 | if (!EliminatePhis(mir, graph, observability)) |
michael@0 | 1320 | return false; |
michael@0 | 1321 | IonSpewPass("Eliminate phis"); |
michael@0 | 1322 | AssertGraphCoherency(graph); |
michael@0 | 1323 | |
michael@0 | 1324 | if (mir->shouldCancel("Eliminate phis")) |
michael@0 | 1325 | return false; |
michael@0 | 1326 | |
michael@0 | 1327 | if (!BuildPhiReverseMapping(graph)) |
michael@0 | 1328 | return false; |
michael@0 | 1329 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1330 | // No spew: graph not changed. |
michael@0 | 1331 | |
michael@0 | 1332 | if (mir->shouldCancel("Phi reverse mapping")) |
michael@0 | 1333 | return false; |
michael@0 | 1334 | } |
michael@0 | 1335 | |
michael@0 | 1336 | if (!mir->compilingAsmJS()) { |
michael@0 | 1337 | AutoTraceLog log(logger, TraceLogger::ApplyTypes); |
michael@0 | 1338 | if (!ApplyTypeInformation(mir, graph)) |
michael@0 | 1339 | return false; |
michael@0 | 1340 | IonSpewPass("Apply types"); |
michael@0 | 1341 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1342 | |
michael@0 | 1343 | if (mir->shouldCancel("Apply types")) |
michael@0 | 1344 | return false; |
michael@0 | 1345 | } |
michael@0 | 1346 | |
michael@0 | 1347 | if (graph.entryBlock()->info().executionMode() == ParallelExecution) { |
michael@0 | 1348 | AutoTraceLog log(logger, TraceLogger::ParallelSafetyAnalysis); |
michael@0 | 1349 | ParallelSafetyAnalysis analysis(mir, graph); |
michael@0 | 1350 | if (!analysis.analyze()) |
michael@0 | 1351 | return false; |
michael@0 | 1352 | } |
michael@0 | 1353 | |
michael@0 | 1354 | // Alias analysis is required for LICM and GVN so that we don't move |
michael@0 | 1355 | // loads across stores. |
michael@0 | 1356 | if (mir->optimizationInfo().licmEnabled() || |
michael@0 | 1357 | mir->optimizationInfo().gvnEnabled()) |
michael@0 | 1358 | { |
michael@0 | 1359 | AutoTraceLog log(logger, TraceLogger::AliasAnalysis); |
michael@0 | 1360 | AliasAnalysis analysis(mir, graph); |
michael@0 | 1361 | if (!analysis.analyze()) |
michael@0 | 1362 | return false; |
michael@0 | 1363 | IonSpewPass("Alias analysis"); |
michael@0 | 1364 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1365 | |
michael@0 | 1366 | if (mir->shouldCancel("Alias analysis")) |
michael@0 | 1367 | return false; |
michael@0 | 1368 | |
michael@0 | 1369 | // Eliminating dead resume point operands requires basic block |
michael@0 | 1370 | // instructions to be numbered. Reuse the numbering computed during |
michael@0 | 1371 | // alias analysis. |
michael@0 | 1372 | if (!EliminateDeadResumePointOperands(mir, graph)) |
michael@0 | 1373 | return false; |
michael@0 | 1374 | |
michael@0 | 1375 | if (mir->shouldCancel("Eliminate dead resume point operands")) |
michael@0 | 1376 | return false; |
michael@0 | 1377 | } |
michael@0 | 1378 | |
michael@0 | 1379 | if (mir->optimizationInfo().gvnEnabled()) { |
michael@0 | 1380 | AutoTraceLog log(logger, TraceLogger::GVN); |
michael@0 | 1381 | ValueNumberer gvn(mir, graph, mir->optimizationInfo().gvnKind() == GVN_Optimistic); |
michael@0 | 1382 | if (!gvn.analyze()) |
michael@0 | 1383 | return false; |
michael@0 | 1384 | IonSpewPass("GVN"); |
michael@0 | 1385 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1386 | |
michael@0 | 1387 | if (mir->shouldCancel("GVN")) |
michael@0 | 1388 | return false; |
michael@0 | 1389 | } |
michael@0 | 1390 | |
michael@0 | 1391 | if (mir->optimizationInfo().uceEnabled()) { |
michael@0 | 1392 | AutoTraceLog log(logger, TraceLogger::UCE); |
michael@0 | 1393 | UnreachableCodeElimination uce(mir, graph); |
michael@0 | 1394 | if (!uce.analyze()) |
michael@0 | 1395 | return false; |
michael@0 | 1396 | IonSpewPass("UCE"); |
michael@0 | 1397 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1398 | |
michael@0 | 1399 | if (mir->shouldCancel("UCE")) |
michael@0 | 1400 | return false; |
michael@0 | 1401 | } |
michael@0 | 1402 | |
michael@0 | 1403 | if (mir->optimizationInfo().licmEnabled()) { |
michael@0 | 1404 | AutoTraceLog log(logger, TraceLogger::LICM); |
michael@0 | 1405 | // LICM can hoist instructions from conditional branches and trigger |
michael@0 | 1406 | // repeated bailouts. Disable it if this script is known to bailout |
michael@0 | 1407 | // frequently. |
michael@0 | 1408 | JSScript *script = mir->info().script(); |
michael@0 | 1409 | if (!script || !script->hadFrequentBailouts()) { |
michael@0 | 1410 | LICM licm(mir, graph); |
michael@0 | 1411 | if (!licm.analyze()) |
michael@0 | 1412 | return false; |
michael@0 | 1413 | IonSpewPass("LICM"); |
michael@0 | 1414 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1415 | |
michael@0 | 1416 | if (mir->shouldCancel("LICM")) |
michael@0 | 1417 | return false; |
michael@0 | 1418 | } |
michael@0 | 1419 | } |
michael@0 | 1420 | |
michael@0 | 1421 | if (mir->optimizationInfo().rangeAnalysisEnabled()) { |
michael@0 | 1422 | AutoTraceLog log(logger, TraceLogger::RangeAnalysis); |
michael@0 | 1423 | RangeAnalysis r(mir, graph); |
michael@0 | 1424 | if (!r.addBetaNodes()) |
michael@0 | 1425 | return false; |
michael@0 | 1426 | IonSpewPass("Beta"); |
michael@0 | 1427 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1428 | |
michael@0 | 1429 | if (mir->shouldCancel("RA Beta")) |
michael@0 | 1430 | return false; |
michael@0 | 1431 | |
michael@0 | 1432 | if (!r.analyze() || !r.addRangeAssertions()) |
michael@0 | 1433 | return false; |
michael@0 | 1434 | IonSpewPass("Range Analysis"); |
michael@0 | 1435 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1436 | |
michael@0 | 1437 | if (mir->shouldCancel("Range Analysis")) |
michael@0 | 1438 | return false; |
michael@0 | 1439 | |
michael@0 | 1440 | if (!r.removeBetaNodes()) |
michael@0 | 1441 | return false; |
michael@0 | 1442 | IonSpewPass("De-Beta"); |
michael@0 | 1443 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1444 | |
michael@0 | 1445 | if (mir->shouldCancel("RA De-Beta")) |
michael@0 | 1446 | return false; |
michael@0 | 1447 | |
michael@0 | 1448 | if (mir->optimizationInfo().uceEnabled()) { |
michael@0 | 1449 | bool shouldRunUCE = false; |
michael@0 | 1450 | if (!r.prepareForUCE(&shouldRunUCE)) |
michael@0 | 1451 | return false; |
michael@0 | 1452 | IonSpewPass("RA check UCE"); |
michael@0 | 1453 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1454 | |
michael@0 | 1455 | if (mir->shouldCancel("RA check UCE")) |
michael@0 | 1456 | return false; |
michael@0 | 1457 | |
michael@0 | 1458 | if (shouldRunUCE) { |
michael@0 | 1459 | UnreachableCodeElimination uce(mir, graph); |
michael@0 | 1460 | uce.disableAliasAnalysis(); |
michael@0 | 1461 | if (!uce.analyze()) |
michael@0 | 1462 | return false; |
michael@0 | 1463 | IonSpewPass("UCE After RA"); |
michael@0 | 1464 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1465 | |
michael@0 | 1466 | if (mir->shouldCancel("UCE After RA")) |
michael@0 | 1467 | return false; |
michael@0 | 1468 | } |
michael@0 | 1469 | } |
michael@0 | 1470 | |
michael@0 | 1471 | if (!r.truncate()) |
michael@0 | 1472 | return false; |
michael@0 | 1473 | IonSpewPass("Truncate Doubles"); |
michael@0 | 1474 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1475 | |
michael@0 | 1476 | if (mir->shouldCancel("Truncate Doubles")) |
michael@0 | 1477 | return false; |
michael@0 | 1478 | } |
michael@0 | 1479 | |
michael@0 | 1480 | if (mir->optimizationInfo().eaaEnabled()) { |
michael@0 | 1481 | AutoTraceLog log(logger, TraceLogger::EffectiveAddressAnalysis); |
michael@0 | 1482 | EffectiveAddressAnalysis eaa(graph); |
michael@0 | 1483 | if (!eaa.analyze()) |
michael@0 | 1484 | return false; |
michael@0 | 1485 | IonSpewPass("Effective Address Analysis"); |
michael@0 | 1486 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1487 | |
michael@0 | 1488 | if (mir->shouldCancel("Effective Address Analysis")) |
michael@0 | 1489 | return false; |
michael@0 | 1490 | } |
michael@0 | 1491 | |
michael@0 | 1492 | { |
michael@0 | 1493 | AutoTraceLog log(logger, TraceLogger::EliminateDeadCode); |
michael@0 | 1494 | if (!EliminateDeadCode(mir, graph)) |
michael@0 | 1495 | return false; |
michael@0 | 1496 | IonSpewPass("DCE"); |
michael@0 | 1497 | AssertExtendedGraphCoherency(graph); |
michael@0 | 1498 | |
michael@0 | 1499 | if (mir->shouldCancel("DCE")) |
michael@0 | 1500 | return false; |
michael@0 | 1501 | } |
michael@0 | 1502 | |
michael@0 | 1503 | // Passes after this point must not move instructions; these analyses |
michael@0 | 1504 | // depend on knowing the final order in which instructions will execute. |
michael@0 | 1505 | |
michael@0 | 1506 | if (mir->optimizationInfo().edgeCaseAnalysisEnabled()) { |
michael@0 | 1507 | AutoTraceLog log(logger, TraceLogger::EdgeCaseAnalysis); |
michael@0 | 1508 | EdgeCaseAnalysis edgeCaseAnalysis(mir, graph); |
michael@0 | 1509 | if (!edgeCaseAnalysis.analyzeLate()) |
michael@0 | 1510 | return false; |
michael@0 | 1511 | IonSpewPass("Edge Case Analysis (Late)"); |
michael@0 | 1512 | AssertGraphCoherency(graph); |
michael@0 | 1513 | |
michael@0 | 1514 | if (mir->shouldCancel("Edge Case Analysis (Late)")) |
michael@0 | 1515 | return false; |
michael@0 | 1516 | } |
michael@0 | 1517 | |
michael@0 | 1518 | if (mir->optimizationInfo().eliminateRedundantChecksEnabled()) { |
michael@0 | 1519 | AutoTraceLog log(logger, TraceLogger::EliminateRedundantChecks); |
michael@0 | 1520 | // Note: check elimination has to run after all other passes that move |
michael@0 | 1521 | // instructions. Since check uses are replaced with the actual index, |
michael@0 | 1522 | // code motion after this pass could incorrectly move a load or store |
michael@0 | 1523 | // before its bounds check. |
michael@0 | 1524 | if (!EliminateRedundantChecks(graph)) |
michael@0 | 1525 | return false; |
michael@0 | 1526 | IonSpewPass("Bounds Check Elimination"); |
michael@0 | 1527 | AssertGraphCoherency(graph); |
michael@0 | 1528 | } |
michael@0 | 1529 | |
michael@0 | 1530 | return true; |
michael@0 | 1531 | } |
michael@0 | 1532 | |
michael@0 | 1533 | LIRGraph * |
michael@0 | 1534 | GenerateLIR(MIRGenerator *mir) |
michael@0 | 1535 | { |
michael@0 | 1536 | MIRGraph &graph = mir->graph(); |
michael@0 | 1537 | |
michael@0 | 1538 | LIRGraph *lir = mir->alloc().lifoAlloc()->new_<LIRGraph>(&graph); |
michael@0 | 1539 | if (!lir || !lir->init()) |
michael@0 | 1540 | return nullptr; |
michael@0 | 1541 | |
michael@0 | 1542 | LIRGenerator lirgen(mir, graph, *lir); |
michael@0 | 1543 | if (!lirgen.generate()) |
michael@0 | 1544 | return nullptr; |
michael@0 | 1545 | IonSpewPass("Generate LIR"); |
michael@0 | 1546 | |
michael@0 | 1547 | if (mir->shouldCancel("Generate LIR")) |
michael@0 | 1548 | return nullptr; |
michael@0 | 1549 | |
michael@0 | 1550 | AllocationIntegrityState integrity(*lir); |
michael@0 | 1551 | |
michael@0 | 1552 | switch (mir->optimizationInfo().registerAllocator()) { |
michael@0 | 1553 | case RegisterAllocator_LSRA: { |
michael@0 | 1554 | #ifdef DEBUG |
michael@0 | 1555 | if (!integrity.record()) |
michael@0 | 1556 | return nullptr; |
michael@0 | 1557 | #endif |
michael@0 | 1558 | |
michael@0 | 1559 | LinearScanAllocator regalloc(mir, &lirgen, *lir); |
michael@0 | 1560 | if (!regalloc.go()) |
michael@0 | 1561 | return nullptr; |
michael@0 | 1562 | |
michael@0 | 1563 | #ifdef DEBUG |
michael@0 | 1564 | if (!integrity.check(false)) |
michael@0 | 1565 | return nullptr; |
michael@0 | 1566 | #endif |
michael@0 | 1567 | |
michael@0 | 1568 | IonSpewPass("Allocate Registers [LSRA]", ®alloc); |
michael@0 | 1569 | break; |
michael@0 | 1570 | } |
michael@0 | 1571 | |
michael@0 | 1572 | case RegisterAllocator_Backtracking: { |
michael@0 | 1573 | #ifdef DEBUG |
michael@0 | 1574 | if (!integrity.record()) |
michael@0 | 1575 | return nullptr; |
michael@0 | 1576 | #endif |
michael@0 | 1577 | |
michael@0 | 1578 | BacktrackingAllocator regalloc(mir, &lirgen, *lir); |
michael@0 | 1579 | if (!regalloc.go()) |
michael@0 | 1580 | return nullptr; |
michael@0 | 1581 | |
michael@0 | 1582 | #ifdef DEBUG |
michael@0 | 1583 | if (!integrity.check(false)) |
michael@0 | 1584 | return nullptr; |
michael@0 | 1585 | #endif |
michael@0 | 1586 | |
michael@0 | 1587 | IonSpewPass("Allocate Registers [Backtracking]"); |
michael@0 | 1588 | break; |
michael@0 | 1589 | } |
michael@0 | 1590 | |
michael@0 | 1591 | case RegisterAllocator_Stupid: { |
michael@0 | 1592 | // Use the integrity checker to populate safepoint information, so |
michael@0 | 1593 | // run it in all builds. |
michael@0 | 1594 | if (!integrity.record()) |
michael@0 | 1595 | return nullptr; |
michael@0 | 1596 | |
michael@0 | 1597 | StupidAllocator regalloc(mir, &lirgen, *lir); |
michael@0 | 1598 | if (!regalloc.go()) |
michael@0 | 1599 | return nullptr; |
michael@0 | 1600 | if (!integrity.check(true)) |
michael@0 | 1601 | return nullptr; |
michael@0 | 1602 | IonSpewPass("Allocate Registers [Stupid]"); |
michael@0 | 1603 | break; |
michael@0 | 1604 | } |
michael@0 | 1605 | |
michael@0 | 1606 | default: |
michael@0 | 1607 | MOZ_ASSUME_UNREACHABLE("Bad regalloc"); |
michael@0 | 1608 | } |
michael@0 | 1609 | |
michael@0 | 1610 | if (mir->shouldCancel("Allocate Registers")) |
michael@0 | 1611 | return nullptr; |
michael@0 | 1612 | |
michael@0 | 1613 | // Now that all optimization and register allocation is done, re-introduce |
michael@0 | 1614 | // critical edges to avoid unnecessary jumps. |
michael@0 | 1615 | if (!UnsplitEdges(lir)) |
michael@0 | 1616 | return nullptr; |
michael@0 | 1617 | IonSpewPass("Unsplit Critical Edges"); |
michael@0 | 1618 | AssertBasicGraphCoherency(graph); |
michael@0 | 1619 | |
michael@0 | 1620 | return lir; |
michael@0 | 1621 | } |
michael@0 | 1622 | |
michael@0 | 1623 | CodeGenerator * |
michael@0 | 1624 | GenerateCode(MIRGenerator *mir, LIRGraph *lir) |
michael@0 | 1625 | { |
michael@0 | 1626 | CodeGenerator *codegen = js_new<CodeGenerator>(mir, lir); |
michael@0 | 1627 | if (!codegen) |
michael@0 | 1628 | return nullptr; |
michael@0 | 1629 | |
michael@0 | 1630 | if (!codegen->generate()) { |
michael@0 | 1631 | js_delete(codegen); |
michael@0 | 1632 | return nullptr; |
michael@0 | 1633 | } |
michael@0 | 1634 | |
michael@0 | 1635 | return codegen; |
michael@0 | 1636 | } |
michael@0 | 1637 | |
michael@0 | 1638 | CodeGenerator * |
michael@0 | 1639 | CompileBackEnd(MIRGenerator *mir) |
michael@0 | 1640 | { |
michael@0 | 1641 | if (!OptimizeMIR(mir)) |
michael@0 | 1642 | return nullptr; |
michael@0 | 1643 | |
michael@0 | 1644 | LIRGraph *lir = GenerateLIR(mir); |
michael@0 | 1645 | if (!lir) |
michael@0 | 1646 | return nullptr; |
michael@0 | 1647 | |
michael@0 | 1648 | return GenerateCode(mir, lir); |
michael@0 | 1649 | } |
michael@0 | 1650 | |
michael@0 | 1651 | void |
michael@0 | 1652 | AttachFinishedCompilations(JSContext *cx) |
michael@0 | 1653 | { |
michael@0 | 1654 | #ifdef JS_THREADSAFE |
michael@0 | 1655 | JitCompartment *ion = cx->compartment()->jitCompartment(); |
michael@0 | 1656 | if (!ion) |
michael@0 | 1657 | return; |
michael@0 | 1658 | |
michael@0 | 1659 | types::AutoEnterAnalysis enterTypes(cx); |
michael@0 | 1660 | AutoLockWorkerThreadState lock; |
michael@0 | 1661 | |
michael@0 | 1662 | GlobalWorkerThreadState::IonBuilderVector &finished = WorkerThreadState().ionFinishedList(); |
michael@0 | 1663 | |
michael@0 | 1664 | TraceLogger *logger = TraceLoggerForMainThread(cx->runtime()); |
michael@0 | 1665 | |
michael@0 | 1666 | // Incorporate any off thread compilations for the compartment which have |
michael@0 | 1667 | // finished, failed or have been cancelled. |
michael@0 | 1668 | while (true) { |
michael@0 | 1669 | IonBuilder *builder = nullptr; |
michael@0 | 1670 | |
michael@0 | 1671 | // Find a finished builder for the compartment. |
michael@0 | 1672 | for (size_t i = 0; i < finished.length(); i++) { |
michael@0 | 1673 | IonBuilder *testBuilder = finished[i]; |
michael@0 | 1674 | if (testBuilder->compartment == CompileCompartment::get(cx->compartment())) { |
michael@0 | 1675 | builder = testBuilder; |
michael@0 | 1676 | WorkerThreadState().remove(finished, &i); |
michael@0 | 1677 | break; |
michael@0 | 1678 | } |
michael@0 | 1679 | } |
michael@0 | 1680 | if (!builder) |
michael@0 | 1681 | break; |
michael@0 | 1682 | |
michael@0 | 1683 | if (CodeGenerator *codegen = builder->backgroundCodegen()) { |
michael@0 | 1684 | RootedScript script(cx, builder->script()); |
michael@0 | 1685 | IonContext ictx(cx, &builder->alloc()); |
michael@0 | 1686 | AutoTraceLog logScript(logger, TraceLogCreateTextId(logger, script)); |
michael@0 | 1687 | AutoTraceLog logLink(logger, TraceLogger::IonLinking); |
michael@0 | 1688 | |
michael@0 | 1689 | // Root the assembler until the builder is finished below. As it |
michael@0 | 1690 | // was constructed off thread, the assembler has not been rooted |
michael@0 | 1691 | // previously, though any GC activity would discard the builder. |
michael@0 | 1692 | codegen->masm.constructRoot(cx); |
michael@0 | 1693 | |
michael@0 | 1694 | bool success; |
michael@0 | 1695 | { |
michael@0 | 1696 | // Release the worker thread lock and root the compiler for GC. |
michael@0 | 1697 | AutoTempAllocatorRooter root(cx, &builder->alloc()); |
michael@0 | 1698 | AutoUnlockWorkerThreadState unlock; |
michael@0 | 1699 | success = codegen->link(cx, builder->constraints()); |
michael@0 | 1700 | } |
michael@0 | 1701 | |
michael@0 | 1702 | if (!success) { |
michael@0 | 1703 | // Silently ignore OOM during code generation. The caller is |
michael@0 | 1704 | // InvokeInterruptCallback, which always runs at a |
michael@0 | 1705 | // nondeterministic time. It's not OK to throw a catchable |
michael@0 | 1706 | // exception from there. |
michael@0 | 1707 | cx->clearPendingException(); |
michael@0 | 1708 | } |
michael@0 | 1709 | } |
michael@0 | 1710 | |
michael@0 | 1711 | FinishOffThreadBuilder(builder); |
michael@0 | 1712 | } |
michael@0 | 1713 | #endif |
michael@0 | 1714 | } |
michael@0 | 1715 | |
michael@0 | 1716 | static const size_t BUILDER_LIFO_ALLOC_PRIMARY_CHUNK_SIZE = 1 << 12; |
michael@0 | 1717 | |
michael@0 | 1718 | static inline bool |
michael@0 | 1719 | OffThreadCompilationAvailable(JSContext *cx) |
michael@0 | 1720 | { |
michael@0 | 1721 | #ifdef JS_THREADSAFE |
michael@0 | 1722 | // Even if off thread compilation is enabled, compilation must still occur |
michael@0 | 1723 | // on the main thread in some cases. Do not compile off thread during an |
michael@0 | 1724 | // incremental GC, as this may trip incremental read barriers. |
michael@0 | 1725 | // |
michael@0 | 1726 | // Require cpuCount > 1 so that Ion compilation jobs and main-thread |
michael@0 | 1727 | // execution are not competing for the same resources. |
michael@0 | 1728 | // |
michael@0 | 1729 | // Skip off thread compilation if PC count profiling is enabled, as |
michael@0 | 1730 | // CodeGenerator::maybeCreateScriptCounts will not attach script profiles |
michael@0 | 1731 | // when running off thread. |
michael@0 | 1732 | return cx->runtime()->canUseParallelIonCompilation() |
michael@0 | 1733 | && WorkerThreadState().cpuCount > 1 |
michael@0 | 1734 | && cx->runtime()->gcIncrementalState == gc::NO_INCREMENTAL |
michael@0 | 1735 | && !cx->runtime()->profilingScripts; |
michael@0 | 1736 | #else |
michael@0 | 1737 | return false; |
michael@0 | 1738 | #endif |
michael@0 | 1739 | } |
michael@0 | 1740 | |
michael@0 | 1741 | static void |
michael@0 | 1742 | TrackAllProperties(JSContext *cx, JSObject *obj) |
michael@0 | 1743 | { |
michael@0 | 1744 | JS_ASSERT(obj->hasSingletonType()); |
michael@0 | 1745 | |
michael@0 | 1746 | for (Shape::Range<NoGC> range(obj->lastProperty()); !range.empty(); range.popFront()) |
michael@0 | 1747 | types::EnsureTrackPropertyTypes(cx, obj, range.front().propid()); |
michael@0 | 1748 | } |
michael@0 | 1749 | |
michael@0 | 1750 | static void |
michael@0 | 1751 | TrackPropertiesForSingletonScopes(JSContext *cx, JSScript *script, BaselineFrame *baselineFrame) |
michael@0 | 1752 | { |
michael@0 | 1753 | // Ensure that all properties of singleton call objects which the script |
michael@0 | 1754 | // could access are tracked. These are generally accessed through |
michael@0 | 1755 | // ALIASEDVAR operations in baseline and will not be tracked even if they |
michael@0 | 1756 | // have been accessed in baseline code. |
michael@0 | 1757 | JSObject *environment = script->functionNonDelazifying() |
michael@0 | 1758 | ? script->functionNonDelazifying()->environment() |
michael@0 | 1759 | : nullptr; |
michael@0 | 1760 | |
michael@0 | 1761 | while (environment && !environment->is<GlobalObject>()) { |
michael@0 | 1762 | if (environment->is<CallObject>() && environment->hasSingletonType()) |
michael@0 | 1763 | TrackAllProperties(cx, environment); |
michael@0 | 1764 | environment = environment->enclosingScope(); |
michael@0 | 1765 | } |
michael@0 | 1766 | |
michael@0 | 1767 | if (baselineFrame) { |
michael@0 | 1768 | JSObject *scope = baselineFrame->scopeChain(); |
michael@0 | 1769 | if (scope->is<CallObject>() && scope->hasSingletonType()) |
michael@0 | 1770 | TrackAllProperties(cx, scope); |
michael@0 | 1771 | } |
michael@0 | 1772 | } |
michael@0 | 1773 | |
michael@0 | 1774 | static AbortReason |
michael@0 | 1775 | IonCompile(JSContext *cx, JSScript *script, |
michael@0 | 1776 | BaselineFrame *baselineFrame, jsbytecode *osrPc, bool constructing, |
michael@0 | 1777 | ExecutionMode executionMode, bool recompile, |
michael@0 | 1778 | OptimizationLevel optimizationLevel) |
michael@0 | 1779 | { |
michael@0 | 1780 | TraceLogger *logger = TraceLoggerForMainThread(cx->runtime()); |
michael@0 | 1781 | AutoTraceLog logScript(logger, TraceLogCreateTextId(logger, script)); |
michael@0 | 1782 | AutoTraceLog logCompile(logger, TraceLogger::IonCompilation); |
michael@0 | 1783 | |
michael@0 | 1784 | JS_ASSERT(optimizationLevel > Optimization_DontCompile); |
michael@0 | 1785 | |
michael@0 | 1786 | // Make sure the script's canonical function isn't lazy. We can't de-lazify |
michael@0 | 1787 | // it in a worker thread. |
michael@0 | 1788 | script->ensureNonLazyCanonicalFunction(cx); |
michael@0 | 1789 | |
michael@0 | 1790 | TrackPropertiesForSingletonScopes(cx, script, baselineFrame); |
michael@0 | 1791 | |
michael@0 | 1792 | LifoAlloc *alloc = cx->new_<LifoAlloc>(BUILDER_LIFO_ALLOC_PRIMARY_CHUNK_SIZE); |
michael@0 | 1793 | if (!alloc) |
michael@0 | 1794 | return AbortReason_Alloc; |
michael@0 | 1795 | |
michael@0 | 1796 | ScopedJSDeletePtr<LifoAlloc> autoDelete(alloc); |
michael@0 | 1797 | |
michael@0 | 1798 | TempAllocator *temp = alloc->new_<TempAllocator>(alloc); |
michael@0 | 1799 | if (!temp) |
michael@0 | 1800 | return AbortReason_Alloc; |
michael@0 | 1801 | |
michael@0 | 1802 | IonContext ictx(cx, temp); |
michael@0 | 1803 | |
michael@0 | 1804 | types::AutoEnterAnalysis enter(cx); |
michael@0 | 1805 | |
michael@0 | 1806 | if (!cx->compartment()->ensureJitCompartmentExists(cx)) |
michael@0 | 1807 | return AbortReason_Alloc; |
michael@0 | 1808 | |
michael@0 | 1809 | if (!cx->compartment()->jitCompartment()->ensureIonStubsExist(cx)) |
michael@0 | 1810 | return AbortReason_Alloc; |
michael@0 | 1811 | |
michael@0 | 1812 | if (executionMode == ParallelExecution && |
michael@0 | 1813 | LIRGenerator::allowInlineForkJoinGetSlice() && |
michael@0 | 1814 | !cx->runtime()->jitRuntime()->ensureForkJoinGetSliceStubExists(cx)) |
michael@0 | 1815 | { |
michael@0 | 1816 | return AbortReason_Alloc; |
michael@0 | 1817 | } |
michael@0 | 1818 | |
michael@0 | 1819 | MIRGraph *graph = alloc->new_<MIRGraph>(temp); |
michael@0 | 1820 | if (!graph) |
michael@0 | 1821 | return AbortReason_Alloc; |
michael@0 | 1822 | |
michael@0 | 1823 | CompileInfo *info = alloc->new_<CompileInfo>(script, script->functionNonDelazifying(), osrPc, |
michael@0 | 1824 | constructing, executionMode, |
michael@0 | 1825 | script->needsArgsObj()); |
michael@0 | 1826 | if (!info) |
michael@0 | 1827 | return AbortReason_Alloc; |
michael@0 | 1828 | |
michael@0 | 1829 | BaselineInspector *inspector = alloc->new_<BaselineInspector>(script); |
michael@0 | 1830 | if (!inspector) |
michael@0 | 1831 | return AbortReason_Alloc; |
michael@0 | 1832 | |
michael@0 | 1833 | BaselineFrameInspector *baselineFrameInspector = nullptr; |
michael@0 | 1834 | if (baselineFrame) { |
michael@0 | 1835 | baselineFrameInspector = NewBaselineFrameInspector(temp, baselineFrame, info); |
michael@0 | 1836 | if (!baselineFrameInspector) |
michael@0 | 1837 | return AbortReason_Alloc; |
michael@0 | 1838 | } |
michael@0 | 1839 | |
michael@0 | 1840 | AutoTempAllocatorRooter root(cx, temp); |
michael@0 | 1841 | types::CompilerConstraintList *constraints = types::NewCompilerConstraintList(*temp); |
michael@0 | 1842 | if (!constraints) |
michael@0 | 1843 | return AbortReason_Alloc; |
michael@0 | 1844 | |
michael@0 | 1845 | const OptimizationInfo *optimizationInfo = js_IonOptimizations.get(optimizationLevel); |
michael@0 | 1846 | const JitCompileOptions options(cx); |
michael@0 | 1847 | |
michael@0 | 1848 | IonBuilder *builder = alloc->new_<IonBuilder>((JSContext *) nullptr, |
michael@0 | 1849 | CompileCompartment::get(cx->compartment()), |
michael@0 | 1850 | options, temp, graph, constraints, |
michael@0 | 1851 | inspector, info, optimizationInfo, |
michael@0 | 1852 | baselineFrameInspector); |
michael@0 | 1853 | if (!builder) |
michael@0 | 1854 | return AbortReason_Alloc; |
michael@0 | 1855 | |
michael@0 | 1856 | JS_ASSERT(recompile == HasIonScript(builder->script(), executionMode)); |
michael@0 | 1857 | JS_ASSERT(CanIonCompile(builder->script(), executionMode)); |
michael@0 | 1858 | |
michael@0 | 1859 | RootedScript builderScript(cx, builder->script()); |
michael@0 | 1860 | |
michael@0 | 1861 | if (recompile) { |
michael@0 | 1862 | JS_ASSERT(executionMode == SequentialExecution); |
michael@0 | 1863 | builderScript->ionScript()->setRecompiling(); |
michael@0 | 1864 | } |
michael@0 | 1865 | |
michael@0 | 1866 | IonSpewNewFunction(graph, builderScript); |
michael@0 | 1867 | |
michael@0 | 1868 | bool succeeded = builder->build(); |
michael@0 | 1869 | builder->clearForBackEnd(); |
michael@0 | 1870 | |
michael@0 | 1871 | if (!succeeded) |
michael@0 | 1872 | return builder->abortReason(); |
michael@0 | 1873 | |
michael@0 | 1874 | // If possible, compile the script off thread. |
michael@0 | 1875 | if (OffThreadCompilationAvailable(cx)) { |
michael@0 | 1876 | if (!recompile) |
michael@0 | 1877 | SetIonScript(builderScript, executionMode, ION_COMPILING_SCRIPT); |
michael@0 | 1878 | |
michael@0 | 1879 | IonSpew(IonSpew_Logs, "Can't log script %s:%d. (Compiled on background thread.)", |
michael@0 | 1880 | builderScript->filename(), builderScript->lineno()); |
michael@0 | 1881 | |
michael@0 | 1882 | if (!StartOffThreadIonCompile(cx, builder)) { |
michael@0 | 1883 | IonSpew(IonSpew_Abort, "Unable to start off-thread ion compilation."); |
michael@0 | 1884 | return AbortReason_Alloc; |
michael@0 | 1885 | } |
michael@0 | 1886 | |
michael@0 | 1887 | // The allocator and associated data will be destroyed after being |
michael@0 | 1888 | // processed in the finishedOffThreadCompilations list. |
michael@0 | 1889 | autoDelete.forget(); |
michael@0 | 1890 | |
michael@0 | 1891 | return AbortReason_NoAbort; |
michael@0 | 1892 | } |
michael@0 | 1893 | |
michael@0 | 1894 | ScopedJSDeletePtr<CodeGenerator> codegen(CompileBackEnd(builder)); |
michael@0 | 1895 | if (!codegen) { |
michael@0 | 1896 | IonSpew(IonSpew_Abort, "Failed during back-end compilation."); |
michael@0 | 1897 | return AbortReason_Disable; |
michael@0 | 1898 | } |
michael@0 | 1899 | |
michael@0 | 1900 | bool success = codegen->link(cx, builder->constraints()); |
michael@0 | 1901 | |
michael@0 | 1902 | IonSpewEndFunction(); |
michael@0 | 1903 | |
michael@0 | 1904 | return success ? AbortReason_NoAbort : AbortReason_Disable; |
michael@0 | 1905 | } |
michael@0 | 1906 | |
michael@0 | 1907 | static bool |
michael@0 | 1908 | CheckFrame(BaselineFrame *frame) |
michael@0 | 1909 | { |
michael@0 | 1910 | JS_ASSERT(!frame->isGeneratorFrame()); |
michael@0 | 1911 | JS_ASSERT(!frame->isDebuggerFrame()); |
michael@0 | 1912 | |
michael@0 | 1913 | // This check is to not overrun the stack. |
michael@0 | 1914 | if (frame->isFunctionFrame() && TooManyArguments(frame->numActualArgs())) { |
michael@0 | 1915 | IonSpew(IonSpew_Abort, "too many actual args"); |
michael@0 | 1916 | return false; |
michael@0 | 1917 | } |
michael@0 | 1918 | |
michael@0 | 1919 | return true; |
michael@0 | 1920 | } |
michael@0 | 1921 | |
michael@0 | 1922 | static bool |
michael@0 | 1923 | CheckScript(JSContext *cx, JSScript *script, bool osr) |
michael@0 | 1924 | { |
michael@0 | 1925 | if (script->isForEval()) { |
michael@0 | 1926 | // Eval frames are not yet supported. Supporting this will require new |
michael@0 | 1927 | // logic in pushBailoutFrame to deal with linking prev. |
michael@0 | 1928 | // Additionally, JSOP_DEFVAR support will require baking in isEvalFrame(). |
michael@0 | 1929 | IonSpew(IonSpew_Abort, "eval script"); |
michael@0 | 1930 | return false; |
michael@0 | 1931 | } |
michael@0 | 1932 | |
michael@0 | 1933 | if (!script->compileAndGo()) { |
michael@0 | 1934 | IonSpew(IonSpew_Abort, "not compile-and-go"); |
michael@0 | 1935 | return false; |
michael@0 | 1936 | } |
michael@0 | 1937 | |
michael@0 | 1938 | return true; |
michael@0 | 1939 | } |
michael@0 | 1940 | |
michael@0 | 1941 | static MethodStatus |
michael@0 | 1942 | CheckScriptSize(JSContext *cx, JSScript* script) |
michael@0 | 1943 | { |
michael@0 | 1944 | if (!js_JitOptions.limitScriptSize) |
michael@0 | 1945 | return Method_Compiled; |
michael@0 | 1946 | |
michael@0 | 1947 | if (script->length() > MAX_OFF_THREAD_SCRIPT_SIZE) { |
michael@0 | 1948 | // Some scripts are so large we never try to Ion compile them. |
michael@0 | 1949 | IonSpew(IonSpew_Abort, "Script too large (%u bytes)", script->length()); |
michael@0 | 1950 | return Method_CantCompile; |
michael@0 | 1951 | } |
michael@0 | 1952 | |
michael@0 | 1953 | uint32_t numLocalsAndArgs = analyze::TotalSlots(script); |
michael@0 | 1954 | if (cx->runtime()->isWorkerRuntime()) { |
michael@0 | 1955 | // DOM Workers don't have off thread compilation enabled. Since workers |
michael@0 | 1956 | // don't block the browser's event loop, allow them to compile larger |
michael@0 | 1957 | // scripts. |
michael@0 | 1958 | JS_ASSERT(!cx->runtime()->canUseParallelIonCompilation()); |
michael@0 | 1959 | |
michael@0 | 1960 | if (script->length() > MAX_DOM_WORKER_SCRIPT_SIZE || |
michael@0 | 1961 | numLocalsAndArgs > MAX_DOM_WORKER_LOCALS_AND_ARGS) |
michael@0 | 1962 | { |
michael@0 | 1963 | return Method_CantCompile; |
michael@0 | 1964 | } |
michael@0 | 1965 | |
michael@0 | 1966 | return Method_Compiled; |
michael@0 | 1967 | } |
michael@0 | 1968 | |
michael@0 | 1969 | if (script->length() > MAX_MAIN_THREAD_SCRIPT_SIZE || |
michael@0 | 1970 | numLocalsAndArgs > MAX_MAIN_THREAD_LOCALS_AND_ARGS) |
michael@0 | 1971 | { |
michael@0 | 1972 | #ifdef JS_THREADSAFE |
michael@0 | 1973 | size_t cpuCount = WorkerThreadState().cpuCount; |
michael@0 | 1974 | #else |
michael@0 | 1975 | size_t cpuCount = 1; |
michael@0 | 1976 | #endif |
michael@0 | 1977 | if (cx->runtime()->canUseParallelIonCompilation() && cpuCount > 1) { |
michael@0 | 1978 | // Even if off thread compilation is enabled, there are cases where |
michael@0 | 1979 | // compilation must still occur on the main thread. Don't compile |
michael@0 | 1980 | // in these cases (except when profiling scripts, as compilations |
michael@0 | 1981 | // occurring with profiling should reflect those without), but do |
michael@0 | 1982 | // not forbid compilation so that the script may be compiled later. |
michael@0 | 1983 | if (!OffThreadCompilationAvailable(cx) && !cx->runtime()->profilingScripts) { |
michael@0 | 1984 | IonSpew(IonSpew_Abort, |
michael@0 | 1985 | "Script too large for main thread, skipping (%u bytes) (%u locals/args)", |
michael@0 | 1986 | script->length(), numLocalsAndArgs); |
michael@0 | 1987 | return Method_Skipped; |
michael@0 | 1988 | } |
michael@0 | 1989 | } else { |
michael@0 | 1990 | IonSpew(IonSpew_Abort, "Script too large (%u bytes) (%u locals/args)", |
michael@0 | 1991 | script->length(), numLocalsAndArgs); |
michael@0 | 1992 | return Method_CantCompile; |
michael@0 | 1993 | } |
michael@0 | 1994 | } |
michael@0 | 1995 | |
michael@0 | 1996 | return Method_Compiled; |
michael@0 | 1997 | } |
michael@0 | 1998 | |
michael@0 | 1999 | bool |
michael@0 | 2000 | CanIonCompileScript(JSContext *cx, JSScript *script, bool osr) |
michael@0 | 2001 | { |
michael@0 | 2002 | if (!script->canIonCompile() || !CheckScript(cx, script, osr)) |
michael@0 | 2003 | return false; |
michael@0 | 2004 | |
michael@0 | 2005 | return CheckScriptSize(cx, script) == Method_Compiled; |
michael@0 | 2006 | } |
michael@0 | 2007 | |
michael@0 | 2008 | static OptimizationLevel |
michael@0 | 2009 | GetOptimizationLevel(HandleScript script, jsbytecode *pc, ExecutionMode executionMode) |
michael@0 | 2010 | { |
michael@0 | 2011 | if (executionMode == ParallelExecution) |
michael@0 | 2012 | return Optimization_Normal; |
michael@0 | 2013 | |
michael@0 | 2014 | JS_ASSERT(executionMode == SequentialExecution); |
michael@0 | 2015 | |
michael@0 | 2016 | return js_IonOptimizations.levelForScript(script, pc); |
michael@0 | 2017 | } |
michael@0 | 2018 | |
michael@0 | 2019 | static MethodStatus |
michael@0 | 2020 | Compile(JSContext *cx, HandleScript script, BaselineFrame *osrFrame, jsbytecode *osrPc, |
michael@0 | 2021 | bool constructing, ExecutionMode executionMode) |
michael@0 | 2022 | { |
michael@0 | 2023 | JS_ASSERT(jit::IsIonEnabled(cx)); |
michael@0 | 2024 | JS_ASSERT(jit::IsBaselineEnabled(cx)); |
michael@0 | 2025 | JS_ASSERT_IF(osrPc != nullptr, LoopEntryCanIonOsr(osrPc)); |
michael@0 | 2026 | JS_ASSERT_IF(executionMode == ParallelExecution, !osrFrame && !osrPc); |
michael@0 | 2027 | JS_ASSERT_IF(executionMode == ParallelExecution, !HasIonScript(script, executionMode)); |
michael@0 | 2028 | |
michael@0 | 2029 | if (!script->hasBaselineScript()) |
michael@0 | 2030 | return Method_Skipped; |
michael@0 | 2031 | |
michael@0 | 2032 | if (cx->compartment()->debugMode()) { |
michael@0 | 2033 | IonSpew(IonSpew_Abort, "debugging"); |
michael@0 | 2034 | return Method_CantCompile; |
michael@0 | 2035 | } |
michael@0 | 2036 | |
michael@0 | 2037 | if (!CheckScript(cx, script, bool(osrPc))) { |
michael@0 | 2038 | IonSpew(IonSpew_Abort, "Aborted compilation of %s:%d", script->filename(), script->lineno()); |
michael@0 | 2039 | return Method_CantCompile; |
michael@0 | 2040 | } |
michael@0 | 2041 | |
michael@0 | 2042 | MethodStatus status = CheckScriptSize(cx, script); |
michael@0 | 2043 | if (status != Method_Compiled) { |
michael@0 | 2044 | IonSpew(IonSpew_Abort, "Aborted compilation of %s:%d", script->filename(), script->lineno()); |
michael@0 | 2045 | return status; |
michael@0 | 2046 | } |
michael@0 | 2047 | |
michael@0 | 2048 | bool recompile = false; |
michael@0 | 2049 | OptimizationLevel optimizationLevel = GetOptimizationLevel(script, osrPc, executionMode); |
michael@0 | 2050 | if (optimizationLevel == Optimization_DontCompile) |
michael@0 | 2051 | return Method_Skipped; |
michael@0 | 2052 | |
michael@0 | 2053 | IonScript *scriptIon = GetIonScript(script, executionMode); |
michael@0 | 2054 | if (scriptIon) { |
michael@0 | 2055 | if (!scriptIon->method()) |
michael@0 | 2056 | return Method_CantCompile; |
michael@0 | 2057 | |
michael@0 | 2058 | MethodStatus failedState = Method_Compiled; |
michael@0 | 2059 | |
michael@0 | 2060 | // If we keep failing to enter the script due to an OSR pc mismatch, |
michael@0 | 2061 | // recompile with the right pc. |
michael@0 | 2062 | if (osrPc && script->ionScript()->osrPc() != osrPc) { |
michael@0 | 2063 | uint32_t count = script->ionScript()->incrOsrPcMismatchCounter(); |
michael@0 | 2064 | if (count <= js_JitOptions.osrPcMismatchesBeforeRecompile) |
michael@0 | 2065 | return Method_Skipped; |
michael@0 | 2066 | |
michael@0 | 2067 | failedState = Method_Skipped; |
michael@0 | 2068 | } |
michael@0 | 2069 | |
michael@0 | 2070 | // Don't recompile/overwrite higher optimized code, |
michael@0 | 2071 | // with a lower optimization level. |
michael@0 | 2072 | if (optimizationLevel < scriptIon->optimizationLevel()) |
michael@0 | 2073 | return failedState; |
michael@0 | 2074 | |
michael@0 | 2075 | if (optimizationLevel == scriptIon->optimizationLevel() && |
michael@0 | 2076 | (!osrPc || script->ionScript()->osrPc() == osrPc)) |
michael@0 | 2077 | { |
michael@0 | 2078 | return failedState; |
michael@0 | 2079 | } |
michael@0 | 2080 | |
michael@0 | 2081 | // Don't start compiling if already compiling |
michael@0 | 2082 | if (scriptIon->isRecompiling()) |
michael@0 | 2083 | return failedState; |
michael@0 | 2084 | |
michael@0 | 2085 | if (osrPc) |
michael@0 | 2086 | script->ionScript()->resetOsrPcMismatchCounter(); |
michael@0 | 2087 | |
michael@0 | 2088 | recompile = true; |
michael@0 | 2089 | } |
michael@0 | 2090 | |
michael@0 | 2091 | AbortReason reason = IonCompile(cx, script, osrFrame, osrPc, constructing, executionMode, |
michael@0 | 2092 | recompile, optimizationLevel); |
michael@0 | 2093 | if (reason == AbortReason_Error) |
michael@0 | 2094 | return Method_Error; |
michael@0 | 2095 | |
michael@0 | 2096 | if (reason == AbortReason_Disable) |
michael@0 | 2097 | return Method_CantCompile; |
michael@0 | 2098 | |
michael@0 | 2099 | if (reason == AbortReason_Alloc) { |
michael@0 | 2100 | js_ReportOutOfMemory(cx); |
michael@0 | 2101 | return Method_Error; |
michael@0 | 2102 | } |
michael@0 | 2103 | |
michael@0 | 2104 | // Compilation succeeded or we invalidated right away or an inlining/alloc abort |
michael@0 | 2105 | if (HasIonScript(script, executionMode)) { |
michael@0 | 2106 | if (osrPc && script->ionScript()->osrPc() != osrPc) |
michael@0 | 2107 | return Method_Skipped; |
michael@0 | 2108 | return Method_Compiled; |
michael@0 | 2109 | } |
michael@0 | 2110 | return Method_Skipped; |
michael@0 | 2111 | } |
michael@0 | 2112 | |
michael@0 | 2113 | } // namespace jit |
michael@0 | 2114 | } // namespace js |
michael@0 | 2115 | |
michael@0 | 2116 | // Decide if a transition from interpreter execution to Ion code should occur. |
michael@0 | 2117 | // May compile or recompile the target JSScript. |
michael@0 | 2118 | MethodStatus |
michael@0 | 2119 | jit::CanEnterAtBranch(JSContext *cx, JSScript *script, BaselineFrame *osrFrame, |
michael@0 | 2120 | jsbytecode *pc, bool isConstructing) |
michael@0 | 2121 | { |
michael@0 | 2122 | JS_ASSERT(jit::IsIonEnabled(cx)); |
michael@0 | 2123 | JS_ASSERT((JSOp)*pc == JSOP_LOOPENTRY); |
michael@0 | 2124 | JS_ASSERT(LoopEntryCanIonOsr(pc)); |
michael@0 | 2125 | |
michael@0 | 2126 | // Skip if the script has been disabled. |
michael@0 | 2127 | if (!script->canIonCompile()) |
michael@0 | 2128 | return Method_Skipped; |
michael@0 | 2129 | |
michael@0 | 2130 | // Skip if the script is being compiled off thread. |
michael@0 | 2131 | if (script->isIonCompilingOffThread()) |
michael@0 | 2132 | return Method_Skipped; |
michael@0 | 2133 | |
michael@0 | 2134 | // Skip if the code is expected to result in a bailout. |
michael@0 | 2135 | if (script->hasIonScript() && script->ionScript()->bailoutExpected()) |
michael@0 | 2136 | return Method_Skipped; |
michael@0 | 2137 | |
michael@0 | 2138 | // Optionally ignore on user request. |
michael@0 | 2139 | if (!js_JitOptions.osr) |
michael@0 | 2140 | return Method_Skipped; |
michael@0 | 2141 | |
michael@0 | 2142 | // Mark as forbidden if frame can't be handled. |
michael@0 | 2143 | if (!CheckFrame(osrFrame)) { |
michael@0 | 2144 | ForbidCompilation(cx, script); |
michael@0 | 2145 | return Method_CantCompile; |
michael@0 | 2146 | } |
michael@0 | 2147 | |
michael@0 | 2148 | // Attempt compilation. |
michael@0 | 2149 | // - Returns Method_Compiled if the right ionscript is present |
michael@0 | 2150 | // (Meaning it was present or a sequantial compile finished) |
michael@0 | 2151 | // - Returns Method_Skipped if pc doesn't match |
michael@0 | 2152 | // (This means a background thread compilation with that pc could have started or not.) |
michael@0 | 2153 | RootedScript rscript(cx, script); |
michael@0 | 2154 | MethodStatus status = Compile(cx, rscript, osrFrame, pc, isConstructing, SequentialExecution); |
michael@0 | 2155 | if (status != Method_Compiled) { |
michael@0 | 2156 | if (status == Method_CantCompile) |
michael@0 | 2157 | ForbidCompilation(cx, script); |
michael@0 | 2158 | return status; |
michael@0 | 2159 | } |
michael@0 | 2160 | |
michael@0 | 2161 | return Method_Compiled; |
michael@0 | 2162 | } |
michael@0 | 2163 | |
michael@0 | 2164 | MethodStatus |
michael@0 | 2165 | jit::CanEnter(JSContext *cx, RunState &state) |
michael@0 | 2166 | { |
michael@0 | 2167 | JS_ASSERT(jit::IsIonEnabled(cx)); |
michael@0 | 2168 | |
michael@0 | 2169 | JSScript *script = state.script(); |
michael@0 | 2170 | |
michael@0 | 2171 | // Skip if the script has been disabled. |
michael@0 | 2172 | if (!script->canIonCompile()) |
michael@0 | 2173 | return Method_Skipped; |
michael@0 | 2174 | |
michael@0 | 2175 | // Skip if the script is being compiled off thread. |
michael@0 | 2176 | if (script->isIonCompilingOffThread()) |
michael@0 | 2177 | return Method_Skipped; |
michael@0 | 2178 | |
michael@0 | 2179 | // Skip if the code is expected to result in a bailout. |
michael@0 | 2180 | if (script->hasIonScript() && script->ionScript()->bailoutExpected()) |
michael@0 | 2181 | return Method_Skipped; |
michael@0 | 2182 | |
michael@0 | 2183 | // If constructing, allocate a new |this| object before building Ion. |
michael@0 | 2184 | // Creating |this| is done before building Ion because it may change the |
michael@0 | 2185 | // type information and invalidate compilation results. |
michael@0 | 2186 | if (state.isInvoke()) { |
michael@0 | 2187 | InvokeState &invoke = *state.asInvoke(); |
michael@0 | 2188 | |
michael@0 | 2189 | if (TooManyArguments(invoke.args().length())) { |
michael@0 | 2190 | IonSpew(IonSpew_Abort, "too many actual args"); |
michael@0 | 2191 | ForbidCompilation(cx, script); |
michael@0 | 2192 | return Method_CantCompile; |
michael@0 | 2193 | } |
michael@0 | 2194 | |
michael@0 | 2195 | if (TooManyArguments(invoke.args().callee().as<JSFunction>().nargs())) { |
michael@0 | 2196 | IonSpew(IonSpew_Abort, "too many args"); |
michael@0 | 2197 | ForbidCompilation(cx, script); |
michael@0 | 2198 | return Method_CantCompile; |
michael@0 | 2199 | } |
michael@0 | 2200 | |
michael@0 | 2201 | if (invoke.constructing() && invoke.args().thisv().isPrimitive()) { |
michael@0 | 2202 | RootedScript scriptRoot(cx, script); |
michael@0 | 2203 | RootedObject callee(cx, &invoke.args().callee()); |
michael@0 | 2204 | RootedObject obj(cx, CreateThisForFunction(cx, callee, |
michael@0 | 2205 | invoke.useNewType() |
michael@0 | 2206 | ? SingletonObject |
michael@0 | 2207 | : GenericObject)); |
michael@0 | 2208 | if (!obj || !jit::IsIonEnabled(cx)) // Note: OOM under CreateThis can disable TI. |
michael@0 | 2209 | return Method_Skipped; |
michael@0 | 2210 | invoke.args().setThis(ObjectValue(*obj)); |
michael@0 | 2211 | script = scriptRoot; |
michael@0 | 2212 | } |
michael@0 | 2213 | } else if (state.isGenerator()) { |
michael@0 | 2214 | IonSpew(IonSpew_Abort, "generator frame"); |
michael@0 | 2215 | ForbidCompilation(cx, script); |
michael@0 | 2216 | return Method_CantCompile; |
michael@0 | 2217 | } |
michael@0 | 2218 | |
michael@0 | 2219 | // If --ion-eager is used, compile with Baseline first, so that we |
michael@0 | 2220 | // can directly enter IonMonkey. |
michael@0 | 2221 | RootedScript rscript(cx, script); |
michael@0 | 2222 | if (js_JitOptions.eagerCompilation && !rscript->hasBaselineScript()) { |
michael@0 | 2223 | MethodStatus status = CanEnterBaselineMethod(cx, state); |
michael@0 | 2224 | if (status != Method_Compiled) |
michael@0 | 2225 | return status; |
michael@0 | 2226 | } |
michael@0 | 2227 | |
michael@0 | 2228 | // Attempt compilation. Returns Method_Compiled if already compiled. |
michael@0 | 2229 | bool constructing = state.isInvoke() && state.asInvoke()->constructing(); |
michael@0 | 2230 | MethodStatus status = |
michael@0 | 2231 | Compile(cx, rscript, nullptr, nullptr, constructing, SequentialExecution); |
michael@0 | 2232 | if (status != Method_Compiled) { |
michael@0 | 2233 | if (status == Method_CantCompile) |
michael@0 | 2234 | ForbidCompilation(cx, rscript); |
michael@0 | 2235 | return status; |
michael@0 | 2236 | } |
michael@0 | 2237 | |
michael@0 | 2238 | return Method_Compiled; |
michael@0 | 2239 | } |
michael@0 | 2240 | |
michael@0 | 2241 | MethodStatus |
michael@0 | 2242 | jit::CompileFunctionForBaseline(JSContext *cx, HandleScript script, BaselineFrame *frame, |
michael@0 | 2243 | bool isConstructing) |
michael@0 | 2244 | { |
michael@0 | 2245 | JS_ASSERT(jit::IsIonEnabled(cx)); |
michael@0 | 2246 | JS_ASSERT(frame->fun()->nonLazyScript()->canIonCompile()); |
michael@0 | 2247 | JS_ASSERT(!frame->fun()->nonLazyScript()->isIonCompilingOffThread()); |
michael@0 | 2248 | JS_ASSERT(!frame->fun()->nonLazyScript()->hasIonScript()); |
michael@0 | 2249 | JS_ASSERT(frame->isFunctionFrame()); |
michael@0 | 2250 | |
michael@0 | 2251 | // Mark as forbidden if frame can't be handled. |
michael@0 | 2252 | if (!CheckFrame(frame)) { |
michael@0 | 2253 | ForbidCompilation(cx, script); |
michael@0 | 2254 | return Method_CantCompile; |
michael@0 | 2255 | } |
michael@0 | 2256 | |
michael@0 | 2257 | // Attempt compilation. Returns Method_Compiled if already compiled. |
michael@0 | 2258 | MethodStatus status = |
michael@0 | 2259 | Compile(cx, script, frame, nullptr, isConstructing, SequentialExecution); |
michael@0 | 2260 | if (status != Method_Compiled) { |
michael@0 | 2261 | if (status == Method_CantCompile) |
michael@0 | 2262 | ForbidCompilation(cx, script); |
michael@0 | 2263 | return status; |
michael@0 | 2264 | } |
michael@0 | 2265 | |
michael@0 | 2266 | return Method_Compiled; |
michael@0 | 2267 | } |
michael@0 | 2268 | |
michael@0 | 2269 | MethodStatus |
michael@0 | 2270 | jit::Recompile(JSContext *cx, HandleScript script, BaselineFrame *osrFrame, jsbytecode *osrPc, |
michael@0 | 2271 | bool constructing) |
michael@0 | 2272 | { |
michael@0 | 2273 | JS_ASSERT(script->hasIonScript()); |
michael@0 | 2274 | if (script->ionScript()->isRecompiling()) |
michael@0 | 2275 | return Method_Compiled; |
michael@0 | 2276 | |
michael@0 | 2277 | MethodStatus status = |
michael@0 | 2278 | Compile(cx, script, osrFrame, osrPc, constructing, SequentialExecution); |
michael@0 | 2279 | if (status != Method_Compiled) { |
michael@0 | 2280 | if (status == Method_CantCompile) |
michael@0 | 2281 | ForbidCompilation(cx, script); |
michael@0 | 2282 | return status; |
michael@0 | 2283 | } |
michael@0 | 2284 | |
michael@0 | 2285 | return Method_Compiled; |
michael@0 | 2286 | } |
michael@0 | 2287 | |
michael@0 | 2288 | MethodStatus |
michael@0 | 2289 | jit::CanEnterInParallel(JSContext *cx, HandleScript script) |
michael@0 | 2290 | { |
michael@0 | 2291 | // Skip if the script has been disabled. |
michael@0 | 2292 | // |
michael@0 | 2293 | // Note: We return Method_Skipped in this case because the other |
michael@0 | 2294 | // CanEnter() methods do so. However, ForkJoin.cpp detects this |
michael@0 | 2295 | // condition differently treats it more like an error. |
michael@0 | 2296 | if (!script->canParallelIonCompile()) |
michael@0 | 2297 | return Method_Skipped; |
michael@0 | 2298 | |
michael@0 | 2299 | // Skip if the script is being compiled off thread. |
michael@0 | 2300 | if (script->isParallelIonCompilingOffThread()) |
michael@0 | 2301 | return Method_Skipped; |
michael@0 | 2302 | |
michael@0 | 2303 | MethodStatus status = Compile(cx, script, nullptr, nullptr, false, ParallelExecution); |
michael@0 | 2304 | if (status != Method_Compiled) { |
michael@0 | 2305 | if (status == Method_CantCompile) |
michael@0 | 2306 | ForbidCompilation(cx, script, ParallelExecution); |
michael@0 | 2307 | return status; |
michael@0 | 2308 | } |
michael@0 | 2309 | |
michael@0 | 2310 | // This can GC, so afterward, script->parallelIon is |
michael@0 | 2311 | // not guaranteed to be valid. |
michael@0 | 2312 | if (!cx->runtime()->jitRuntime()->enterIon()) |
michael@0 | 2313 | return Method_Error; |
michael@0 | 2314 | |
michael@0 | 2315 | // Subtle: it is possible for GC to occur during |
michael@0 | 2316 | // compilation of one of the invoked functions, which |
michael@0 | 2317 | // would cause the earlier functions (such as the |
michael@0 | 2318 | // kernel itself) to be collected. In this event, we |
michael@0 | 2319 | // give up and fallback to sequential for now. |
michael@0 | 2320 | if (!script->hasParallelIonScript()) { |
michael@0 | 2321 | parallel::Spew( |
michael@0 | 2322 | parallel::SpewCompile, |
michael@0 | 2323 | "Script %p:%s:%u was garbage-collected or invalidated", |
michael@0 | 2324 | script.get(), script->filename(), script->lineno()); |
michael@0 | 2325 | return Method_Skipped; |
michael@0 | 2326 | } |
michael@0 | 2327 | |
michael@0 | 2328 | return Method_Compiled; |
michael@0 | 2329 | } |
michael@0 | 2330 | |
michael@0 | 2331 | MethodStatus |
michael@0 | 2332 | jit::CanEnterUsingFastInvoke(JSContext *cx, HandleScript script, uint32_t numActualArgs) |
michael@0 | 2333 | { |
michael@0 | 2334 | JS_ASSERT(jit::IsIonEnabled(cx)); |
michael@0 | 2335 | |
michael@0 | 2336 | // Skip if the code is expected to result in a bailout. |
michael@0 | 2337 | if (!script->hasIonScript() || script->ionScript()->bailoutExpected()) |
michael@0 | 2338 | return Method_Skipped; |
michael@0 | 2339 | |
michael@0 | 2340 | // Don't handle arguments underflow, to make this work we would have to pad |
michael@0 | 2341 | // missing arguments with |undefined|. |
michael@0 | 2342 | if (numActualArgs < script->functionNonDelazifying()->nargs()) |
michael@0 | 2343 | return Method_Skipped; |
michael@0 | 2344 | |
michael@0 | 2345 | if (!cx->compartment()->ensureJitCompartmentExists(cx)) |
michael@0 | 2346 | return Method_Error; |
michael@0 | 2347 | |
michael@0 | 2348 | // This can GC, so afterward, script->ion is not guaranteed to be valid. |
michael@0 | 2349 | if (!cx->runtime()->jitRuntime()->enterIon()) |
michael@0 | 2350 | return Method_Error; |
michael@0 | 2351 | |
michael@0 | 2352 | if (!script->hasIonScript()) |
michael@0 | 2353 | return Method_Skipped; |
michael@0 | 2354 | |
michael@0 | 2355 | return Method_Compiled; |
michael@0 | 2356 | } |
michael@0 | 2357 | |
michael@0 | 2358 | static IonExecStatus |
michael@0 | 2359 | EnterIon(JSContext *cx, EnterJitData &data) |
michael@0 | 2360 | { |
michael@0 | 2361 | JS_CHECK_RECURSION(cx, return IonExec_Aborted); |
michael@0 | 2362 | JS_ASSERT(jit::IsIonEnabled(cx)); |
michael@0 | 2363 | JS_ASSERT(!data.osrFrame); |
michael@0 | 2364 | |
michael@0 | 2365 | EnterJitCode enter = cx->runtime()->jitRuntime()->enterIon(); |
michael@0 | 2366 | |
michael@0 | 2367 | // Caller must construct |this| before invoking the Ion function. |
michael@0 | 2368 | JS_ASSERT_IF(data.constructing, data.maxArgv[0].isObject()); |
michael@0 | 2369 | |
michael@0 | 2370 | data.result.setInt32(data.numActualArgs); |
michael@0 | 2371 | { |
michael@0 | 2372 | AssertCompartmentUnchanged pcc(cx); |
michael@0 | 2373 | JitActivation activation(cx, data.constructing); |
michael@0 | 2374 | |
michael@0 | 2375 | CALL_GENERATED_CODE(enter, data.jitcode, data.maxArgc, data.maxArgv, /* osrFrame = */nullptr, data.calleeToken, |
michael@0 | 2376 | /* scopeChain = */ nullptr, 0, data.result.address()); |
michael@0 | 2377 | } |
michael@0 | 2378 | |
michael@0 | 2379 | JS_ASSERT(!cx->runtime()->hasIonReturnOverride()); |
michael@0 | 2380 | |
michael@0 | 2381 | // Jit callers wrap primitive constructor return. |
michael@0 | 2382 | if (!data.result.isMagic() && data.constructing && data.result.isPrimitive()) |
michael@0 | 2383 | data.result = data.maxArgv[0]; |
michael@0 | 2384 | |
michael@0 | 2385 | // Release temporary buffer used for OSR into Ion. |
michael@0 | 2386 | cx->runtime()->getJitRuntime(cx)->freeOsrTempData(); |
michael@0 | 2387 | |
michael@0 | 2388 | JS_ASSERT_IF(data.result.isMagic(), data.result.isMagic(JS_ION_ERROR)); |
michael@0 | 2389 | return data.result.isMagic() ? IonExec_Error : IonExec_Ok; |
michael@0 | 2390 | } |
michael@0 | 2391 | |
michael@0 | 2392 | bool |
michael@0 | 2393 | jit::SetEnterJitData(JSContext *cx, EnterJitData &data, RunState &state, AutoValueVector &vals) |
michael@0 | 2394 | { |
michael@0 | 2395 | data.osrFrame = nullptr; |
michael@0 | 2396 | |
michael@0 | 2397 | if (state.isInvoke()) { |
michael@0 | 2398 | CallArgs &args = state.asInvoke()->args(); |
michael@0 | 2399 | unsigned numFormals = state.script()->functionNonDelazifying()->nargs(); |
michael@0 | 2400 | data.constructing = state.asInvoke()->constructing(); |
michael@0 | 2401 | data.numActualArgs = args.length(); |
michael@0 | 2402 | data.maxArgc = Max(args.length(), numFormals) + 1; |
michael@0 | 2403 | data.scopeChain = nullptr; |
michael@0 | 2404 | data.calleeToken = CalleeToToken(&args.callee().as<JSFunction>()); |
michael@0 | 2405 | |
michael@0 | 2406 | if (data.numActualArgs >= numFormals) { |
michael@0 | 2407 | data.maxArgv = args.base() + 1; |
michael@0 | 2408 | } else { |
michael@0 | 2409 | // Pad missing arguments with |undefined|. |
michael@0 | 2410 | for (size_t i = 1; i < args.length() + 2; i++) { |
michael@0 | 2411 | if (!vals.append(args.base()[i])) |
michael@0 | 2412 | return false; |
michael@0 | 2413 | } |
michael@0 | 2414 | |
michael@0 | 2415 | while (vals.length() < numFormals + 1) { |
michael@0 | 2416 | if (!vals.append(UndefinedValue())) |
michael@0 | 2417 | return false; |
michael@0 | 2418 | } |
michael@0 | 2419 | |
michael@0 | 2420 | JS_ASSERT(vals.length() >= numFormals + 1); |
michael@0 | 2421 | data.maxArgv = vals.begin(); |
michael@0 | 2422 | } |
michael@0 | 2423 | } else { |
michael@0 | 2424 | data.constructing = false; |
michael@0 | 2425 | data.numActualArgs = 0; |
michael@0 | 2426 | data.maxArgc = 1; |
michael@0 | 2427 | data.maxArgv = state.asExecute()->addressOfThisv(); |
michael@0 | 2428 | data.scopeChain = state.asExecute()->scopeChain(); |
michael@0 | 2429 | |
michael@0 | 2430 | data.calleeToken = CalleeToToken(state.script()); |
michael@0 | 2431 | |
michael@0 | 2432 | if (state.script()->isForEval() && |
michael@0 | 2433 | !(state.asExecute()->type() & InterpreterFrame::GLOBAL)) |
michael@0 | 2434 | { |
michael@0 | 2435 | ScriptFrameIter iter(cx); |
michael@0 | 2436 | if (iter.isFunctionFrame()) |
michael@0 | 2437 | data.calleeToken = CalleeToToken(iter.callee()); |
michael@0 | 2438 | } |
michael@0 | 2439 | } |
michael@0 | 2440 | |
michael@0 | 2441 | return true; |
michael@0 | 2442 | } |
michael@0 | 2443 | |
michael@0 | 2444 | IonExecStatus |
michael@0 | 2445 | jit::IonCannon(JSContext *cx, RunState &state) |
michael@0 | 2446 | { |
michael@0 | 2447 | IonScript *ion = state.script()->ionScript(); |
michael@0 | 2448 | |
michael@0 | 2449 | EnterJitData data(cx); |
michael@0 | 2450 | data.jitcode = ion->method()->raw(); |
michael@0 | 2451 | |
michael@0 | 2452 | AutoValueVector vals(cx); |
michael@0 | 2453 | if (!SetEnterJitData(cx, data, state, vals)) |
michael@0 | 2454 | return IonExec_Error; |
michael@0 | 2455 | |
michael@0 | 2456 | IonExecStatus status = EnterIon(cx, data); |
michael@0 | 2457 | |
michael@0 | 2458 | if (status == IonExec_Ok) |
michael@0 | 2459 | state.setReturnValue(data.result); |
michael@0 | 2460 | |
michael@0 | 2461 | return status; |
michael@0 | 2462 | } |
michael@0 | 2463 | |
michael@0 | 2464 | IonExecStatus |
michael@0 | 2465 | jit::FastInvoke(JSContext *cx, HandleFunction fun, CallArgs &args) |
michael@0 | 2466 | { |
michael@0 | 2467 | JS_CHECK_RECURSION(cx, return IonExec_Error); |
michael@0 | 2468 | |
michael@0 | 2469 | IonScript *ion = fun->nonLazyScript()->ionScript(); |
michael@0 | 2470 | JitCode *code = ion->method(); |
michael@0 | 2471 | void *jitcode = code->raw(); |
michael@0 | 2472 | |
michael@0 | 2473 | JS_ASSERT(jit::IsIonEnabled(cx)); |
michael@0 | 2474 | JS_ASSERT(!ion->bailoutExpected()); |
michael@0 | 2475 | |
michael@0 | 2476 | JitActivation activation(cx, /* firstFrameIsConstructing = */false); |
michael@0 | 2477 | |
michael@0 | 2478 | EnterJitCode enter = cx->runtime()->jitRuntime()->enterIon(); |
michael@0 | 2479 | void *calleeToken = CalleeToToken(fun); |
michael@0 | 2480 | |
michael@0 | 2481 | RootedValue result(cx, Int32Value(args.length())); |
michael@0 | 2482 | JS_ASSERT(args.length() >= fun->nargs()); |
michael@0 | 2483 | |
michael@0 | 2484 | CALL_GENERATED_CODE(enter, jitcode, args.length() + 1, args.array() - 1, /* osrFrame = */nullptr, |
michael@0 | 2485 | calleeToken, /* scopeChain = */ nullptr, 0, result.address()); |
michael@0 | 2486 | |
michael@0 | 2487 | JS_ASSERT(!cx->runtime()->hasIonReturnOverride()); |
michael@0 | 2488 | |
michael@0 | 2489 | args.rval().set(result); |
michael@0 | 2490 | |
michael@0 | 2491 | JS_ASSERT_IF(result.isMagic(), result.isMagic(JS_ION_ERROR)); |
michael@0 | 2492 | return result.isMagic() ? IonExec_Error : IonExec_Ok; |
michael@0 | 2493 | } |
michael@0 | 2494 | |
michael@0 | 2495 | static void |
michael@0 | 2496 | InvalidateActivation(FreeOp *fop, uint8_t *ionTop, bool invalidateAll) |
michael@0 | 2497 | { |
michael@0 | 2498 | IonSpew(IonSpew_Invalidate, "BEGIN invalidating activation"); |
michael@0 | 2499 | |
michael@0 | 2500 | size_t frameno = 1; |
michael@0 | 2501 | |
michael@0 | 2502 | for (JitFrameIterator it(ionTop, SequentialExecution); !it.done(); ++it, ++frameno) { |
michael@0 | 2503 | JS_ASSERT_IF(frameno == 1, it.type() == JitFrame_Exit); |
michael@0 | 2504 | |
michael@0 | 2505 | #ifdef DEBUG |
michael@0 | 2506 | switch (it.type()) { |
michael@0 | 2507 | case JitFrame_Exit: |
michael@0 | 2508 | IonSpew(IonSpew_Invalidate, "#%d exit frame @ %p", frameno, it.fp()); |
michael@0 | 2509 | break; |
michael@0 | 2510 | case JitFrame_BaselineJS: |
michael@0 | 2511 | case JitFrame_IonJS: |
michael@0 | 2512 | { |
michael@0 | 2513 | JS_ASSERT(it.isScripted()); |
michael@0 | 2514 | const char *type = it.isIonJS() ? "Optimized" : "Baseline"; |
michael@0 | 2515 | IonSpew(IonSpew_Invalidate, "#%d %s JS frame @ %p, %s:%d (fun: %p, script: %p, pc %p)", |
michael@0 | 2516 | frameno, type, it.fp(), it.script()->filename(), it.script()->lineno(), |
michael@0 | 2517 | it.maybeCallee(), (JSScript *)it.script(), it.returnAddressToFp()); |
michael@0 | 2518 | break; |
michael@0 | 2519 | } |
michael@0 | 2520 | case JitFrame_BaselineStub: |
michael@0 | 2521 | IonSpew(IonSpew_Invalidate, "#%d baseline stub frame @ %p", frameno, it.fp()); |
michael@0 | 2522 | break; |
michael@0 | 2523 | case JitFrame_Rectifier: |
michael@0 | 2524 | IonSpew(IonSpew_Invalidate, "#%d rectifier frame @ %p", frameno, it.fp()); |
michael@0 | 2525 | break; |
michael@0 | 2526 | case JitFrame_Unwound_IonJS: |
michael@0 | 2527 | case JitFrame_Unwound_BaselineStub: |
michael@0 | 2528 | MOZ_ASSUME_UNREACHABLE("invalid"); |
michael@0 | 2529 | case JitFrame_Unwound_Rectifier: |
michael@0 | 2530 | IonSpew(IonSpew_Invalidate, "#%d unwound rectifier frame @ %p", frameno, it.fp()); |
michael@0 | 2531 | break; |
michael@0 | 2532 | case JitFrame_Entry: |
michael@0 | 2533 | IonSpew(IonSpew_Invalidate, "#%d entry frame @ %p", frameno, it.fp()); |
michael@0 | 2534 | break; |
michael@0 | 2535 | } |
michael@0 | 2536 | #endif |
michael@0 | 2537 | |
michael@0 | 2538 | if (!it.isIonJS()) |
michael@0 | 2539 | continue; |
michael@0 | 2540 | |
michael@0 | 2541 | // See if the frame has already been invalidated. |
michael@0 | 2542 | if (it.checkInvalidation()) |
michael@0 | 2543 | continue; |
michael@0 | 2544 | |
michael@0 | 2545 | JSScript *script = it.script(); |
michael@0 | 2546 | if (!script->hasIonScript()) |
michael@0 | 2547 | continue; |
michael@0 | 2548 | |
michael@0 | 2549 | if (!invalidateAll && !script->ionScript()->invalidated()) |
michael@0 | 2550 | continue; |
michael@0 | 2551 | |
michael@0 | 2552 | IonScript *ionScript = script->ionScript(); |
michael@0 | 2553 | |
michael@0 | 2554 | // Purge ICs before we mark this script as invalidated. This will |
michael@0 | 2555 | // prevent lastJump_ from appearing to be a bogus pointer, just |
michael@0 | 2556 | // in case anyone tries to read it. |
michael@0 | 2557 | ionScript->purgeCaches(); |
michael@0 | 2558 | |
michael@0 | 2559 | // Clean up any pointers from elsewhere in the runtime to this IonScript |
michael@0 | 2560 | // which is about to become disconnected from its JSScript. |
michael@0 | 2561 | ionScript->unlinkFromRuntime(fop); |
michael@0 | 2562 | |
michael@0 | 2563 | // This frame needs to be invalidated. We do the following: |
michael@0 | 2564 | // |
michael@0 | 2565 | // 1. Increment the reference counter to keep the ionScript alive |
michael@0 | 2566 | // for the invalidation bailout or for the exception handler. |
michael@0 | 2567 | // 2. Determine safepoint that corresponds to the current call. |
michael@0 | 2568 | // 3. From safepoint, get distance to the OSI-patchable offset. |
michael@0 | 2569 | // 4. From the IonScript, determine the distance between the |
michael@0 | 2570 | // call-patchable offset and the invalidation epilogue. |
michael@0 | 2571 | // 5. Patch the OSI point with a call-relative to the |
michael@0 | 2572 | // invalidation epilogue. |
michael@0 | 2573 | // |
michael@0 | 2574 | // The code generator ensures that there's enough space for us |
michael@0 | 2575 | // to patch in a call-relative operation at each invalidation |
michael@0 | 2576 | // point. |
michael@0 | 2577 | // |
michael@0 | 2578 | // Note: you can't simplify this mechanism to "just patch the |
michael@0 | 2579 | // instruction immediately after the call" because things may |
michael@0 | 2580 | // need to move into a well-defined register state (using move |
michael@0 | 2581 | // instructions after the call) in to capture an appropriate |
michael@0 | 2582 | // snapshot after the call occurs. |
michael@0 | 2583 | |
michael@0 | 2584 | ionScript->incref(); |
michael@0 | 2585 | |
michael@0 | 2586 | const SafepointIndex *si = ionScript->getSafepointIndex(it.returnAddressToFp()); |
michael@0 | 2587 | JitCode *ionCode = ionScript->method(); |
michael@0 | 2588 | |
michael@0 | 2589 | JS::Zone *zone = script->zone(); |
michael@0 | 2590 | if (zone->needsBarrier()) { |
michael@0 | 2591 | // We're about to remove edges from the JSScript to gcthings |
michael@0 | 2592 | // embedded in the JitCode. Perform one final trace of the |
michael@0 | 2593 | // JitCode for the incremental GC, as it must know about |
michael@0 | 2594 | // those edges. |
michael@0 | 2595 | ionCode->trace(zone->barrierTracer()); |
michael@0 | 2596 | } |
michael@0 | 2597 | ionCode->setInvalidated(); |
michael@0 | 2598 | |
michael@0 | 2599 | // Write the delta (from the return address offset to the |
michael@0 | 2600 | // IonScript pointer embedded into the invalidation epilogue) |
michael@0 | 2601 | // where the safepointed call instruction used to be. We rely on |
michael@0 | 2602 | // the call sequence causing the safepoint being >= the size of |
michael@0 | 2603 | // a uint32, which is checked during safepoint index |
michael@0 | 2604 | // construction. |
michael@0 | 2605 | CodeLocationLabel dataLabelToMunge(it.returnAddressToFp()); |
michael@0 | 2606 | ptrdiff_t delta = ionScript->invalidateEpilogueDataOffset() - |
michael@0 | 2607 | (it.returnAddressToFp() - ionCode->raw()); |
michael@0 | 2608 | Assembler::patchWrite_Imm32(dataLabelToMunge, Imm32(delta)); |
michael@0 | 2609 | |
michael@0 | 2610 | CodeLocationLabel osiPatchPoint = SafepointReader::InvalidationPatchPoint(ionScript, si); |
michael@0 | 2611 | CodeLocationLabel invalidateEpilogue(ionCode, ionScript->invalidateEpilogueOffset()); |
michael@0 | 2612 | |
michael@0 | 2613 | IonSpew(IonSpew_Invalidate, " ! Invalidate ionScript %p (ref %u) -> patching osipoint %p", |
michael@0 | 2614 | ionScript, ionScript->refcount(), (void *) osiPatchPoint.raw()); |
michael@0 | 2615 | Assembler::patchWrite_NearCall(osiPatchPoint, invalidateEpilogue); |
michael@0 | 2616 | } |
michael@0 | 2617 | |
michael@0 | 2618 | IonSpew(IonSpew_Invalidate, "END invalidating activation"); |
michael@0 | 2619 | } |
michael@0 | 2620 | |
michael@0 | 2621 | void |
michael@0 | 2622 | jit::StopAllOffThreadCompilations(JSCompartment *comp) |
michael@0 | 2623 | { |
michael@0 | 2624 | if (!comp->jitCompartment()) |
michael@0 | 2625 | return; |
michael@0 | 2626 | CancelOffThreadIonCompile(comp, nullptr); |
michael@0 | 2627 | FinishAllOffThreadCompilations(comp); |
michael@0 | 2628 | } |
michael@0 | 2629 | |
michael@0 | 2630 | void |
michael@0 | 2631 | jit::InvalidateAll(FreeOp *fop, Zone *zone) |
michael@0 | 2632 | { |
michael@0 | 2633 | for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) |
michael@0 | 2634 | StopAllOffThreadCompilations(comp); |
michael@0 | 2635 | |
michael@0 | 2636 | for (JitActivationIterator iter(fop->runtime()); !iter.done(); ++iter) { |
michael@0 | 2637 | if (iter->compartment()->zone() == zone) { |
michael@0 | 2638 | IonSpew(IonSpew_Invalidate, "Invalidating all frames for GC"); |
michael@0 | 2639 | InvalidateActivation(fop, iter.jitTop(), true); |
michael@0 | 2640 | } |
michael@0 | 2641 | } |
michael@0 | 2642 | } |
michael@0 | 2643 | |
michael@0 | 2644 | |
michael@0 | 2645 | void |
michael@0 | 2646 | jit::Invalidate(types::TypeZone &types, FreeOp *fop, |
michael@0 | 2647 | const Vector<types::RecompileInfo> &invalid, bool resetUses, |
michael@0 | 2648 | bool cancelOffThread) |
michael@0 | 2649 | { |
michael@0 | 2650 | IonSpew(IonSpew_Invalidate, "Start invalidation."); |
michael@0 | 2651 | |
michael@0 | 2652 | // Add an invalidation reference to all invalidated IonScripts to indicate |
michael@0 | 2653 | // to the traversal which frames have been invalidated. |
michael@0 | 2654 | size_t numInvalidations = 0; |
michael@0 | 2655 | for (size_t i = 0; i < invalid.length(); i++) { |
michael@0 | 2656 | const types::CompilerOutput &co = *invalid[i].compilerOutput(types); |
michael@0 | 2657 | if (!co.isValid()) |
michael@0 | 2658 | continue; |
michael@0 | 2659 | |
michael@0 | 2660 | if (cancelOffThread) |
michael@0 | 2661 | CancelOffThreadIonCompile(co.script()->compartment(), co.script()); |
michael@0 | 2662 | |
michael@0 | 2663 | if (!co.ion()) |
michael@0 | 2664 | continue; |
michael@0 | 2665 | |
michael@0 | 2666 | IonSpew(IonSpew_Invalidate, " Invalidate %s:%u, IonScript %p", |
michael@0 | 2667 | co.script()->filename(), co.script()->lineno(), co.ion()); |
michael@0 | 2668 | |
michael@0 | 2669 | // Keep the ion script alive during the invalidation and flag this |
michael@0 | 2670 | // ionScript as being invalidated. This increment is removed by the |
michael@0 | 2671 | // loop after the calls to InvalidateActivation. |
michael@0 | 2672 | co.ion()->incref(); |
michael@0 | 2673 | numInvalidations++; |
michael@0 | 2674 | } |
michael@0 | 2675 | |
michael@0 | 2676 | if (!numInvalidations) { |
michael@0 | 2677 | IonSpew(IonSpew_Invalidate, " No IonScript invalidation."); |
michael@0 | 2678 | return; |
michael@0 | 2679 | } |
michael@0 | 2680 | |
michael@0 | 2681 | for (JitActivationIterator iter(fop->runtime()); !iter.done(); ++iter) |
michael@0 | 2682 | InvalidateActivation(fop, iter.jitTop(), false); |
michael@0 | 2683 | |
michael@0 | 2684 | // Drop the references added above. If a script was never active, its |
michael@0 | 2685 | // IonScript will be immediately destroyed. Otherwise, it will be held live |
michael@0 | 2686 | // until its last invalidated frame is destroyed. |
michael@0 | 2687 | for (size_t i = 0; i < invalid.length(); i++) { |
michael@0 | 2688 | types::CompilerOutput &co = *invalid[i].compilerOutput(types); |
michael@0 | 2689 | if (!co.isValid()) |
michael@0 | 2690 | continue; |
michael@0 | 2691 | |
michael@0 | 2692 | ExecutionMode executionMode = co.mode(); |
michael@0 | 2693 | JSScript *script = co.script(); |
michael@0 | 2694 | IonScript *ionScript = co.ion(); |
michael@0 | 2695 | if (!ionScript) |
michael@0 | 2696 | continue; |
michael@0 | 2697 | |
michael@0 | 2698 | SetIonScript(script, executionMode, nullptr); |
michael@0 | 2699 | ionScript->decref(fop); |
michael@0 | 2700 | co.invalidate(); |
michael@0 | 2701 | numInvalidations--; |
michael@0 | 2702 | |
michael@0 | 2703 | // Wait for the scripts to get warm again before doing another |
michael@0 | 2704 | // compile, unless either: |
michael@0 | 2705 | // (1) we are recompiling *because* a script got hot; |
michael@0 | 2706 | // (resetUses is false); or, |
michael@0 | 2707 | // (2) we are invalidating a parallel script. This is because |
michael@0 | 2708 | // the useCount only applies to sequential uses. Parallel |
michael@0 | 2709 | // execution *requires* ion, and so we don't limit it to |
michael@0 | 2710 | // methods with a high usage count (though we do check that |
michael@0 | 2711 | // the useCount is at least 1 when compiling the transitive |
michael@0 | 2712 | // closure of potential callees, to avoid compiling things |
michael@0 | 2713 | // that are never run at all). |
michael@0 | 2714 | if (resetUses && executionMode != ParallelExecution) |
michael@0 | 2715 | script->resetUseCount(); |
michael@0 | 2716 | } |
michael@0 | 2717 | |
michael@0 | 2718 | // Make sure we didn't leak references by invalidating the same IonScript |
michael@0 | 2719 | // multiple times in the above loop. |
michael@0 | 2720 | JS_ASSERT(!numInvalidations); |
michael@0 | 2721 | } |
michael@0 | 2722 | |
michael@0 | 2723 | void |
michael@0 | 2724 | jit::Invalidate(JSContext *cx, const Vector<types::RecompileInfo> &invalid, bool resetUses, |
michael@0 | 2725 | bool cancelOffThread) |
michael@0 | 2726 | { |
michael@0 | 2727 | jit::Invalidate(cx->zone()->types, cx->runtime()->defaultFreeOp(), invalid, resetUses, |
michael@0 | 2728 | cancelOffThread); |
michael@0 | 2729 | } |
michael@0 | 2730 | |
michael@0 | 2731 | bool |
michael@0 | 2732 | jit::Invalidate(JSContext *cx, JSScript *script, ExecutionMode mode, bool resetUses, |
michael@0 | 2733 | bool cancelOffThread) |
michael@0 | 2734 | { |
michael@0 | 2735 | JS_ASSERT(script->hasIonScript()); |
michael@0 | 2736 | |
michael@0 | 2737 | if (cx->runtime()->spsProfiler.enabled()) { |
michael@0 | 2738 | // Register invalidation with profiler. |
michael@0 | 2739 | // Format of event payload string: |
michael@0 | 2740 | // "<filename>:<lineno>" |
michael@0 | 2741 | |
michael@0 | 2742 | // Get the script filename, if any, and its length. |
michael@0 | 2743 | const char *filename = script->filename(); |
michael@0 | 2744 | if (filename == nullptr) |
michael@0 | 2745 | filename = "<unknown>"; |
michael@0 | 2746 | |
michael@0 | 2747 | size_t len = strlen(filename) + 20; |
michael@0 | 2748 | char *buf = js_pod_malloc<char>(len); |
michael@0 | 2749 | if (!buf) |
michael@0 | 2750 | return false; |
michael@0 | 2751 | |
michael@0 | 2752 | // Construct the descriptive string. |
michael@0 | 2753 | JS_snprintf(buf, len, "Invalidate %s:%llu", filename, script->lineno()); |
michael@0 | 2754 | cx->runtime()->spsProfiler.markEvent(buf); |
michael@0 | 2755 | js_free(buf); |
michael@0 | 2756 | } |
michael@0 | 2757 | |
michael@0 | 2758 | Vector<types::RecompileInfo> scripts(cx); |
michael@0 | 2759 | |
michael@0 | 2760 | switch (mode) { |
michael@0 | 2761 | case SequentialExecution: |
michael@0 | 2762 | JS_ASSERT(script->hasIonScript()); |
michael@0 | 2763 | if (!scripts.append(script->ionScript()->recompileInfo())) |
michael@0 | 2764 | return false; |
michael@0 | 2765 | break; |
michael@0 | 2766 | case ParallelExecution: |
michael@0 | 2767 | JS_ASSERT(script->hasParallelIonScript()); |
michael@0 | 2768 | if (!scripts.append(script->parallelIonScript()->recompileInfo())) |
michael@0 | 2769 | return false; |
michael@0 | 2770 | break; |
michael@0 | 2771 | default: |
michael@0 | 2772 | MOZ_ASSUME_UNREACHABLE("No such execution mode"); |
michael@0 | 2773 | } |
michael@0 | 2774 | |
michael@0 | 2775 | Invalidate(cx, scripts, resetUses, cancelOffThread); |
michael@0 | 2776 | return true; |
michael@0 | 2777 | } |
michael@0 | 2778 | |
michael@0 | 2779 | bool |
michael@0 | 2780 | jit::Invalidate(JSContext *cx, JSScript *script, bool resetUses, bool cancelOffThread) |
michael@0 | 2781 | { |
michael@0 | 2782 | return Invalidate(cx, script, SequentialExecution, resetUses, cancelOffThread); |
michael@0 | 2783 | } |
michael@0 | 2784 | |
michael@0 | 2785 | static void |
michael@0 | 2786 | FinishInvalidationOf(FreeOp *fop, JSScript *script, IonScript *ionScript) |
michael@0 | 2787 | { |
michael@0 | 2788 | types::TypeZone &types = script->zone()->types; |
michael@0 | 2789 | |
michael@0 | 2790 | // Note: If the script is about to be swept, the compiler output may have |
michael@0 | 2791 | // already been destroyed. |
michael@0 | 2792 | if (types::CompilerOutput *output = ionScript->recompileInfo().compilerOutput(types)) |
michael@0 | 2793 | output->invalidate(); |
michael@0 | 2794 | |
michael@0 | 2795 | // If this script has Ion code on the stack, invalidated() will return |
michael@0 | 2796 | // true. In this case we have to wait until destroying it. |
michael@0 | 2797 | if (!ionScript->invalidated()) |
michael@0 | 2798 | jit::IonScript::Destroy(fop, ionScript); |
michael@0 | 2799 | } |
michael@0 | 2800 | |
michael@0 | 2801 | template <ExecutionMode mode> |
michael@0 | 2802 | void |
michael@0 | 2803 | jit::FinishInvalidation(FreeOp *fop, JSScript *script) |
michael@0 | 2804 | { |
michael@0 | 2805 | // In all cases, nullptr out script->ion or script->parallelIon to avoid |
michael@0 | 2806 | // re-entry. |
michael@0 | 2807 | switch (mode) { |
michael@0 | 2808 | case SequentialExecution: |
michael@0 | 2809 | if (script->hasIonScript()) { |
michael@0 | 2810 | IonScript *ion = script->ionScript(); |
michael@0 | 2811 | script->setIonScript(nullptr); |
michael@0 | 2812 | FinishInvalidationOf(fop, script, ion); |
michael@0 | 2813 | } |
michael@0 | 2814 | return; |
michael@0 | 2815 | |
michael@0 | 2816 | case ParallelExecution: |
michael@0 | 2817 | if (script->hasParallelIonScript()) { |
michael@0 | 2818 | IonScript *parallelIon = script->parallelIonScript(); |
michael@0 | 2819 | script->setParallelIonScript(nullptr); |
michael@0 | 2820 | FinishInvalidationOf(fop, script, parallelIon); |
michael@0 | 2821 | } |
michael@0 | 2822 | return; |
michael@0 | 2823 | |
michael@0 | 2824 | default: |
michael@0 | 2825 | MOZ_ASSUME_UNREACHABLE("bad execution mode"); |
michael@0 | 2826 | } |
michael@0 | 2827 | } |
michael@0 | 2828 | |
michael@0 | 2829 | template void |
michael@0 | 2830 | jit::FinishInvalidation<SequentialExecution>(FreeOp *fop, JSScript *script); |
michael@0 | 2831 | |
michael@0 | 2832 | template void |
michael@0 | 2833 | jit::FinishInvalidation<ParallelExecution>(FreeOp *fop, JSScript *script); |
michael@0 | 2834 | |
michael@0 | 2835 | void |
michael@0 | 2836 | jit::MarkValueFromIon(JSRuntime *rt, Value *vp) |
michael@0 | 2837 | { |
michael@0 | 2838 | gc::MarkValueUnbarriered(&rt->gcMarker, vp, "write barrier"); |
michael@0 | 2839 | } |
michael@0 | 2840 | |
michael@0 | 2841 | void |
michael@0 | 2842 | jit::MarkShapeFromIon(JSRuntime *rt, Shape **shapep) |
michael@0 | 2843 | { |
michael@0 | 2844 | gc::MarkShapeUnbarriered(&rt->gcMarker, shapep, "write barrier"); |
michael@0 | 2845 | } |
michael@0 | 2846 | |
michael@0 | 2847 | void |
michael@0 | 2848 | jit::ForbidCompilation(JSContext *cx, JSScript *script) |
michael@0 | 2849 | { |
michael@0 | 2850 | ForbidCompilation(cx, script, SequentialExecution); |
michael@0 | 2851 | } |
michael@0 | 2852 | |
michael@0 | 2853 | void |
michael@0 | 2854 | jit::ForbidCompilation(JSContext *cx, JSScript *script, ExecutionMode mode) |
michael@0 | 2855 | { |
michael@0 | 2856 | IonSpew(IonSpew_Abort, "Disabling Ion mode %d compilation of script %s:%d", |
michael@0 | 2857 | mode, script->filename(), script->lineno()); |
michael@0 | 2858 | |
michael@0 | 2859 | CancelOffThreadIonCompile(cx->compartment(), script); |
michael@0 | 2860 | |
michael@0 | 2861 | switch (mode) { |
michael@0 | 2862 | case SequentialExecution: |
michael@0 | 2863 | if (script->hasIonScript()) { |
michael@0 | 2864 | // It is only safe to modify script->ion if the script is not currently |
michael@0 | 2865 | // running, because JitFrameIterator needs to tell what ionScript to |
michael@0 | 2866 | // use (either the one on the JSScript, or the one hidden in the |
michael@0 | 2867 | // breadcrumbs Invalidation() leaves). Therefore, if invalidation |
michael@0 | 2868 | // fails, we cannot disable the script. |
michael@0 | 2869 | if (!Invalidate(cx, script, mode, false)) |
michael@0 | 2870 | return; |
michael@0 | 2871 | } |
michael@0 | 2872 | |
michael@0 | 2873 | script->setIonScript(ION_DISABLED_SCRIPT); |
michael@0 | 2874 | return; |
michael@0 | 2875 | |
michael@0 | 2876 | case ParallelExecution: |
michael@0 | 2877 | if (script->hasParallelIonScript()) { |
michael@0 | 2878 | if (!Invalidate(cx, script, mode, false)) |
michael@0 | 2879 | return; |
michael@0 | 2880 | } |
michael@0 | 2881 | |
michael@0 | 2882 | script->setParallelIonScript(ION_DISABLED_SCRIPT); |
michael@0 | 2883 | return; |
michael@0 | 2884 | |
michael@0 | 2885 | default: |
michael@0 | 2886 | MOZ_ASSUME_UNREACHABLE("No such execution mode"); |
michael@0 | 2887 | } |
michael@0 | 2888 | |
michael@0 | 2889 | MOZ_ASSUME_UNREACHABLE("No such execution mode"); |
michael@0 | 2890 | } |
michael@0 | 2891 | |
michael@0 | 2892 | AutoFlushICache * |
michael@0 | 2893 | PerThreadData::autoFlushICache() const |
michael@0 | 2894 | { |
michael@0 | 2895 | return autoFlushICache_; |
michael@0 | 2896 | } |
michael@0 | 2897 | |
michael@0 | 2898 | void |
michael@0 | 2899 | PerThreadData::setAutoFlushICache(AutoFlushICache *afc) |
michael@0 | 2900 | { |
michael@0 | 2901 | autoFlushICache_ = afc; |
michael@0 | 2902 | } |
michael@0 | 2903 | |
michael@0 | 2904 | // Set the range for the merging of flushes. The flushing is deferred until the end of |
michael@0 | 2905 | // the AutoFlushICache context. Subsequent flushing within this range will is also |
michael@0 | 2906 | // deferred. This is only expected to be defined once for each AutoFlushICache |
michael@0 | 2907 | // context. It assumes the range will be flushed is required to be within an |
michael@0 | 2908 | // AutoFlushICache context. |
michael@0 | 2909 | void |
michael@0 | 2910 | AutoFlushICache::setRange(uintptr_t start, size_t len) |
michael@0 | 2911 | { |
michael@0 | 2912 | #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) |
michael@0 | 2913 | AutoFlushICache *afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache(); |
michael@0 | 2914 | JS_ASSERT(afc); |
michael@0 | 2915 | JS_ASSERT(!afc->start_); |
michael@0 | 2916 | IonSpewCont(IonSpew_CacheFlush, "(%x %x):", start, len); |
michael@0 | 2917 | |
michael@0 | 2918 | uintptr_t stop = start + len; |
michael@0 | 2919 | afc->start_ = start; |
michael@0 | 2920 | afc->stop_ = stop; |
michael@0 | 2921 | #endif |
michael@0 | 2922 | } |
michael@0 | 2923 | |
michael@0 | 2924 | // Flush the instruction cache. |
michael@0 | 2925 | // |
michael@0 | 2926 | // If called within a dynamic AutoFlushICache context and if the range is already pending |
michael@0 | 2927 | // flushing for this AutoFlushICache context then the request is ignored with the |
michael@0 | 2928 | // understanding that it will be flushed on exit from the AutoFlushICache context. |
michael@0 | 2929 | // Otherwise the range is flushed immediately. |
michael@0 | 2930 | // |
michael@0 | 2931 | // Updates outside the current code object are typically the exception so they are flushed |
michael@0 | 2932 | // immediately rather than attempting to merge them. |
michael@0 | 2933 | // |
michael@0 | 2934 | // For efficiency it is expected that all large ranges will be flushed within an |
michael@0 | 2935 | // AutoFlushICache, so check. If this assertion is hit then it does not necessarily |
michael@0 | 2936 | // indicate a progam fault but it might indicate a lost opportunity to merge cache |
michael@0 | 2937 | // flushing. It can be corrected by wrapping the call in an AutoFlushICache to context. |
michael@0 | 2938 | void |
michael@0 | 2939 | AutoFlushICache::flush(uintptr_t start, size_t len) |
michael@0 | 2940 | { |
michael@0 | 2941 | #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) |
michael@0 | 2942 | AutoFlushICache *afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache(); |
michael@0 | 2943 | if (!afc) { |
michael@0 | 2944 | IonSpewCont(IonSpew_CacheFlush, "#"); |
michael@0 | 2945 | JSC::ExecutableAllocator::cacheFlush((void*)start, len); |
michael@0 | 2946 | JS_ASSERT(len <= 16); |
michael@0 | 2947 | return; |
michael@0 | 2948 | } |
michael@0 | 2949 | |
michael@0 | 2950 | uintptr_t stop = start + len; |
michael@0 | 2951 | if (start >= afc->start_ && stop <= afc->stop_) { |
michael@0 | 2952 | // Update is within the pending flush range, so defer to the end of the context. |
michael@0 | 2953 | IonSpewCont(IonSpew_CacheFlush, afc->inhibit_ ? "-" : "="); |
michael@0 | 2954 | return; |
michael@0 | 2955 | } |
michael@0 | 2956 | |
michael@0 | 2957 | IonSpewCont(IonSpew_CacheFlush, afc->inhibit_ ? "x" : "*"); |
michael@0 | 2958 | JSC::ExecutableAllocator::cacheFlush((void *)start, len); |
michael@0 | 2959 | #endif |
michael@0 | 2960 | } |
michael@0 | 2961 | |
michael@0 | 2962 | // Flag the current dynamic AutoFlushICache as inhibiting flushing. Useful in error paths |
michael@0 | 2963 | // where the changes are being abandoned. |
michael@0 | 2964 | void |
michael@0 | 2965 | AutoFlushICache::setInhibit() |
michael@0 | 2966 | { |
michael@0 | 2967 | #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) |
michael@0 | 2968 | AutoFlushICache *afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache(); |
michael@0 | 2969 | JS_ASSERT(afc); |
michael@0 | 2970 | JS_ASSERT(afc->start_); |
michael@0 | 2971 | IonSpewCont(IonSpew_CacheFlush, "I"); |
michael@0 | 2972 | afc->inhibit_ = true; |
michael@0 | 2973 | #endif |
michael@0 | 2974 | } |
michael@0 | 2975 | |
michael@0 | 2976 | // The common use case is merging cache flushes when preparing a code object. In this |
michael@0 | 2977 | // case the entire range of the code object is being flushed and as the code is patched |
michael@0 | 2978 | // smaller redundant flushes could occur. The design allows an AutoFlushICache dynamic |
michael@0 | 2979 | // thread local context to be declared in which the range of the code object can be set |
michael@0 | 2980 | // which defers flushing until the end of this dynamic context. The redundant flushing |
michael@0 | 2981 | // within this code range is also deferred avoiding redundant flushing. Flushing outside |
michael@0 | 2982 | // this code range is not affected and proceeds immediately. |
michael@0 | 2983 | // |
michael@0 | 2984 | // In some cases flushing is not necessary, such as when compiling an asm.js module which |
michael@0 | 2985 | // is flushed again when dynamically linked, and also in error paths that abandon the |
michael@0 | 2986 | // code. Flushing within the set code range can be inhibited within the AutoFlushICache |
michael@0 | 2987 | // dynamic context by setting an inhibit flag. |
michael@0 | 2988 | // |
michael@0 | 2989 | // The JS compiler can be re-entered while within an AutoFlushICache dynamic context and |
michael@0 | 2990 | // it is assumed that code being assembled or patched is not executed before the exit of |
michael@0 | 2991 | // the respective AutoFlushICache dynamic context. |
michael@0 | 2992 | // |
michael@0 | 2993 | AutoFlushICache::AutoFlushICache(const char *nonce, bool inhibit) |
michael@0 | 2994 | : start_(0), |
michael@0 | 2995 | stop_(0), |
michael@0 | 2996 | name_(nonce), |
michael@0 | 2997 | inhibit_(inhibit) |
michael@0 | 2998 | { |
michael@0 | 2999 | #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) |
michael@0 | 3000 | PerThreadData *pt = TlsPerThreadData.get(); |
michael@0 | 3001 | AutoFlushICache *afc = pt->PerThreadData::autoFlushICache(); |
michael@0 | 3002 | if (afc) |
michael@0 | 3003 | IonSpew(IonSpew_CacheFlush, "<%s,%s%s ", nonce, afc->name_, inhibit ? " I" : ""); |
michael@0 | 3004 | else |
michael@0 | 3005 | IonSpewCont(IonSpew_CacheFlush, "<%s%s ", nonce, inhibit ? " I" : ""); |
michael@0 | 3006 | |
michael@0 | 3007 | prev_ = afc; |
michael@0 | 3008 | pt->PerThreadData::setAutoFlushICache(this); |
michael@0 | 3009 | #endif |
michael@0 | 3010 | } |
michael@0 | 3011 | |
michael@0 | 3012 | AutoFlushICache::~AutoFlushICache() |
michael@0 | 3013 | { |
michael@0 | 3014 | #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) |
michael@0 | 3015 | PerThreadData *pt = TlsPerThreadData.get(); |
michael@0 | 3016 | JS_ASSERT(pt->PerThreadData::autoFlushICache() == this); |
michael@0 | 3017 | |
michael@0 | 3018 | if (!inhibit_ && start_) |
michael@0 | 3019 | JSC::ExecutableAllocator::cacheFlush((void *)start_, size_t(stop_ - start_)); |
michael@0 | 3020 | |
michael@0 | 3021 | IonSpewCont(IonSpew_CacheFlush, "%s%s>", name_, start_ ? "" : " U"); |
michael@0 | 3022 | IonSpewFin(IonSpew_CacheFlush); |
michael@0 | 3023 | pt->PerThreadData::setAutoFlushICache(prev_); |
michael@0 | 3024 | #endif |
michael@0 | 3025 | } |
michael@0 | 3026 | |
michael@0 | 3027 | void |
michael@0 | 3028 | jit::PurgeCaches(JSScript *script) |
michael@0 | 3029 | { |
michael@0 | 3030 | if (script->hasIonScript()) |
michael@0 | 3031 | script->ionScript()->purgeCaches(); |
michael@0 | 3032 | |
michael@0 | 3033 | if (script->hasParallelIonScript()) |
michael@0 | 3034 | script->parallelIonScript()->purgeCaches(); |
michael@0 | 3035 | } |
michael@0 | 3036 | |
michael@0 | 3037 | size_t |
michael@0 | 3038 | jit::SizeOfIonData(JSScript *script, mozilla::MallocSizeOf mallocSizeOf) |
michael@0 | 3039 | { |
michael@0 | 3040 | size_t result = 0; |
michael@0 | 3041 | |
michael@0 | 3042 | if (script->hasIonScript()) |
michael@0 | 3043 | result += script->ionScript()->sizeOfIncludingThis(mallocSizeOf); |
michael@0 | 3044 | |
michael@0 | 3045 | if (script->hasParallelIonScript()) |
michael@0 | 3046 | result += script->parallelIonScript()->sizeOfIncludingThis(mallocSizeOf); |
michael@0 | 3047 | |
michael@0 | 3048 | return result; |
michael@0 | 3049 | } |
michael@0 | 3050 | |
michael@0 | 3051 | void |
michael@0 | 3052 | jit::DestroyIonScripts(FreeOp *fop, JSScript *script) |
michael@0 | 3053 | { |
michael@0 | 3054 | if (script->hasIonScript()) |
michael@0 | 3055 | jit::IonScript::Destroy(fop, script->ionScript()); |
michael@0 | 3056 | |
michael@0 | 3057 | if (script->hasParallelIonScript()) |
michael@0 | 3058 | jit::IonScript::Destroy(fop, script->parallelIonScript()); |
michael@0 | 3059 | |
michael@0 | 3060 | if (script->hasBaselineScript()) |
michael@0 | 3061 | jit::BaselineScript::Destroy(fop, script->baselineScript()); |
michael@0 | 3062 | } |
michael@0 | 3063 | |
michael@0 | 3064 | void |
michael@0 | 3065 | jit::TraceIonScripts(JSTracer* trc, JSScript *script) |
michael@0 | 3066 | { |
michael@0 | 3067 | if (script->hasIonScript()) |
michael@0 | 3068 | jit::IonScript::Trace(trc, script->ionScript()); |
michael@0 | 3069 | |
michael@0 | 3070 | if (script->hasParallelIonScript()) |
michael@0 | 3071 | jit::IonScript::Trace(trc, script->parallelIonScript()); |
michael@0 | 3072 | |
michael@0 | 3073 | if (script->hasBaselineScript()) |
michael@0 | 3074 | jit::BaselineScript::Trace(trc, script->baselineScript()); |
michael@0 | 3075 | } |
michael@0 | 3076 | |
michael@0 | 3077 | bool |
michael@0 | 3078 | jit::RematerializeAllFrames(JSContext *cx, JSCompartment *comp) |
michael@0 | 3079 | { |
michael@0 | 3080 | for (JitActivationIterator iter(comp->runtimeFromMainThread()); !iter.done(); ++iter) { |
michael@0 | 3081 | if (iter.activation()->compartment() == comp) { |
michael@0 | 3082 | for (JitFrameIterator frameIter(iter); !frameIter.done(); ++frameIter) { |
michael@0 | 3083 | if (!frameIter.isIonJS()) |
michael@0 | 3084 | continue; |
michael@0 | 3085 | if (!iter.activation()->asJit()->getRematerializedFrame(cx, frameIter)) |
michael@0 | 3086 | return false; |
michael@0 | 3087 | } |
michael@0 | 3088 | } |
michael@0 | 3089 | } |
michael@0 | 3090 | return true; |
michael@0 | 3091 | } |
michael@0 | 3092 | |
michael@0 | 3093 | bool |
michael@0 | 3094 | jit::UpdateForDebugMode(JSContext *maybecx, JSCompartment *comp, |
michael@0 | 3095 | AutoDebugModeInvalidation &invalidate) |
michael@0 | 3096 | { |
michael@0 | 3097 | MOZ_ASSERT(invalidate.isFor(comp)); |
michael@0 | 3098 | |
michael@0 | 3099 | // Schedule invalidation of all optimized JIT code since debug mode |
michael@0 | 3100 | // invalidates assumptions. |
michael@0 | 3101 | invalidate.scheduleInvalidation(comp->debugMode()); |
michael@0 | 3102 | |
michael@0 | 3103 | // Recompile on-stack baseline scripts if we have a cx. |
michael@0 | 3104 | if (maybecx) { |
michael@0 | 3105 | IonContext ictx(maybecx, nullptr); |
michael@0 | 3106 | if (!RecompileOnStackBaselineScriptsForDebugMode(maybecx, comp)) { |
michael@0 | 3107 | js_ReportOutOfMemory(maybecx); |
michael@0 | 3108 | return false; |
michael@0 | 3109 | } |
michael@0 | 3110 | } |
michael@0 | 3111 | |
michael@0 | 3112 | return true; |
michael@0 | 3113 | } |
michael@0 | 3114 | |
michael@0 | 3115 | AutoDebugModeInvalidation::~AutoDebugModeInvalidation() |
michael@0 | 3116 | { |
michael@0 | 3117 | MOZ_ASSERT(!!comp_ != !!zone_); |
michael@0 | 3118 | |
michael@0 | 3119 | if (needInvalidation_ == NoNeed) |
michael@0 | 3120 | return; |
michael@0 | 3121 | |
michael@0 | 3122 | Zone *zone = zone_ ? zone_ : comp_->zone(); |
michael@0 | 3123 | JSRuntime *rt = zone->runtimeFromMainThread(); |
michael@0 | 3124 | FreeOp *fop = rt->defaultFreeOp(); |
michael@0 | 3125 | |
michael@0 | 3126 | if (comp_) { |
michael@0 | 3127 | StopAllOffThreadCompilations(comp_); |
michael@0 | 3128 | } else { |
michael@0 | 3129 | for (CompartmentsInZoneIter comp(zone_); !comp.done(); comp.next()) |
michael@0 | 3130 | StopAllOffThreadCompilations(comp); |
michael@0 | 3131 | } |
michael@0 | 3132 | |
michael@0 | 3133 | // Don't discard active baseline scripts. They are recompiled for debug |
michael@0 | 3134 | // mode. |
michael@0 | 3135 | jit::MarkActiveBaselineScripts(zone); |
michael@0 | 3136 | |
michael@0 | 3137 | for (JitActivationIterator iter(rt); !iter.done(); ++iter) { |
michael@0 | 3138 | JSCompartment *comp = iter->compartment(); |
michael@0 | 3139 | if (comp_ == comp || zone_ == comp->zone()) { |
michael@0 | 3140 | IonContext ictx(CompileRuntime::get(rt)); |
michael@0 | 3141 | IonSpew(IonSpew_Invalidate, "Invalidating frames for debug mode toggle"); |
michael@0 | 3142 | InvalidateActivation(fop, iter.jitTop(), true); |
michael@0 | 3143 | } |
michael@0 | 3144 | } |
michael@0 | 3145 | |
michael@0 | 3146 | for (gc::CellIter i(zone, gc::FINALIZE_SCRIPT); !i.done(); i.next()) { |
michael@0 | 3147 | JSScript *script = i.get<JSScript>(); |
michael@0 | 3148 | if (script->compartment() == comp_ || zone_) { |
michael@0 | 3149 | FinishInvalidation<SequentialExecution>(fop, script); |
michael@0 | 3150 | FinishInvalidation<ParallelExecution>(fop, script); |
michael@0 | 3151 | FinishDiscardBaselineScript(fop, script); |
michael@0 | 3152 | script->resetUseCount(); |
michael@0 | 3153 | } else if (script->hasBaselineScript()) { |
michael@0 | 3154 | script->baselineScript()->resetActive(); |
michael@0 | 3155 | } |
michael@0 | 3156 | } |
michael@0 | 3157 | } |