michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/AsmJSSignalHandlers.h" michael@0: michael@0: #include "mozilla/BinarySearch.h" michael@0: michael@0: #include "assembler/assembler/MacroAssembler.h" michael@0: #include "jit/AsmJSModule.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: using namespace mozilla; michael@0: michael@0: using JS::GenericNaN; michael@0: michael@0: #if defined(XP_WIN) michael@0: # define XMM_sig(p,i) ((p)->Xmm##i) michael@0: # define EIP_sig(p) ((p)->Eip) michael@0: # define RIP_sig(p) ((p)->Rip) michael@0: # define RAX_sig(p) ((p)->Rax) michael@0: # define RCX_sig(p) ((p)->Rcx) michael@0: # define RDX_sig(p) ((p)->Rdx) michael@0: # define RBX_sig(p) ((p)->Rbx) michael@0: # define RSP_sig(p) ((p)->Rsp) michael@0: # define RBP_sig(p) ((p)->Rbp) michael@0: # define RSI_sig(p) ((p)->Rsi) michael@0: # define RDI_sig(p) ((p)->Rdi) michael@0: # define R8_sig(p) ((p)->R8) michael@0: # define R9_sig(p) ((p)->R9) michael@0: # define R10_sig(p) ((p)->R10) michael@0: # define R11_sig(p) ((p)->R11) michael@0: # define R12_sig(p) ((p)->R12) michael@0: # define R13_sig(p) ((p)->R13) michael@0: # define R14_sig(p) ((p)->R14) michael@0: # define R15_sig(p) ((p)->R15) michael@0: #elif defined(__OpenBSD__) michael@0: # define XMM_sig(p,i) ((p)->sc_fpstate->fx_xmm[i]) michael@0: # define EIP_sig(p) ((p)->sc_eip) michael@0: # define RIP_sig(p) ((p)->sc_rip) michael@0: # define RAX_sig(p) ((p)->sc_rax) michael@0: # define RCX_sig(p) ((p)->sc_rcx) michael@0: # define RDX_sig(p) ((p)->sc_rdx) michael@0: # define RBX_sig(p) ((p)->sc_rbx) michael@0: # define RSP_sig(p) ((p)->sc_rsp) michael@0: # define RBP_sig(p) ((p)->sc_rbp) michael@0: # define RSI_sig(p) ((p)->sc_rsi) michael@0: # define RDI_sig(p) ((p)->sc_rdi) michael@0: # define R8_sig(p) ((p)->sc_r8) michael@0: # define R9_sig(p) ((p)->sc_r9) michael@0: # define R10_sig(p) ((p)->sc_r10) michael@0: # define R11_sig(p) ((p)->sc_r11) michael@0: # define R12_sig(p) ((p)->sc_r12) michael@0: # define R13_sig(p) ((p)->sc_r13) michael@0: # define R14_sig(p) ((p)->sc_r14) michael@0: # define R15_sig(p) ((p)->sc_r15) michael@0: #elif defined(__linux__) || defined(SOLARIS) michael@0: # if defined(__linux__) michael@0: # define XMM_sig(p,i) ((p)->uc_mcontext.fpregs->_xmm[i]) michael@0: # define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_EIP]) michael@0: # else michael@0: # define XMM_sig(p,i) ((p)->uc_mcontext.fpregs.fp_reg_set.fpchip_state.xmm[i]) michael@0: # define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_PC]) michael@0: # endif michael@0: # define RIP_sig(p) ((p)->uc_mcontext.gregs[REG_RIP]) michael@0: # define RAX_sig(p) ((p)->uc_mcontext.gregs[REG_RAX]) michael@0: # define RCX_sig(p) ((p)->uc_mcontext.gregs[REG_RCX]) michael@0: # define RDX_sig(p) ((p)->uc_mcontext.gregs[REG_RDX]) michael@0: # define RBX_sig(p) ((p)->uc_mcontext.gregs[REG_RBX]) michael@0: # define RSP_sig(p) ((p)->uc_mcontext.gregs[REG_RSP]) michael@0: # define RBP_sig(p) ((p)->uc_mcontext.gregs[REG_RBP]) michael@0: # define RSI_sig(p) ((p)->uc_mcontext.gregs[REG_RSI]) michael@0: # define RDI_sig(p) ((p)->uc_mcontext.gregs[REG_RDI]) michael@0: # define R8_sig(p) ((p)->uc_mcontext.gregs[REG_R8]) michael@0: # define R9_sig(p) ((p)->uc_mcontext.gregs[REG_R9]) michael@0: # define R10_sig(p) ((p)->uc_mcontext.gregs[REG_R10]) michael@0: # define R11_sig(p) ((p)->uc_mcontext.gregs[REG_R11]) michael@0: # define R12_sig(p) ((p)->uc_mcontext.gregs[REG_R12]) michael@0: # define R13_sig(p) ((p)->uc_mcontext.gregs[REG_R13]) michael@0: # define R14_sig(p) ((p)->uc_mcontext.gregs[REG_R14]) michael@0: # if defined(__linux__) && defined(__arm__) michael@0: # define R15_sig(p) ((p)->uc_mcontext.arm_pc) michael@0: # else michael@0: # define R15_sig(p) ((p)->uc_mcontext.gregs[REG_R15]) michael@0: # endif michael@0: #elif defined(__NetBSD__) michael@0: # define XMM_sig(p,i) (((struct fxsave64 *)(p)->uc_mcontext.__fpregs)->fx_xmm[i]) michael@0: # define EIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EIP]) michael@0: # define RIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RIP]) michael@0: # define RAX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RAX]) michael@0: # define RCX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RCX]) michael@0: # define RDX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RDX]) michael@0: # define RBX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBX]) michael@0: # define RSP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSP]) michael@0: # define RBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBP]) michael@0: # define RSI_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSI]) michael@0: # define RDI_sig(p) ((p)->uc_mcontext.__gregs[_REG_RDI]) michael@0: # define R8_sig(p) ((p)->uc_mcontext.__gregs[_REG_R8]) michael@0: # define R9_sig(p) ((p)->uc_mcontext.__gregs[_REG_R9]) michael@0: # define R10_sig(p) ((p)->uc_mcontext.__gregs[_REG_R10]) michael@0: # define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11]) michael@0: # define R12_sig(p) ((p)->uc_mcontext.__gregs[_REG_R12]) michael@0: # define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13]) michael@0: # define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14]) michael@0: # define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15]) michael@0: #elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) michael@0: # if defined(__DragonFly__) michael@0: # define XMM_sig(p,i) (((union savefpu *)(p)->uc_mcontext.mc_fpregs)->sv_xmm.sv_xmm[i]) michael@0: # else michael@0: # define XMM_sig(p,i) (((struct savefpu *)(p)->uc_mcontext.mc_fpstate)->sv_xmm[i]) michael@0: # endif michael@0: # define EIP_sig(p) ((p)->uc_mcontext.mc_eip) michael@0: # define RIP_sig(p) ((p)->uc_mcontext.mc_rip) michael@0: # define RAX_sig(p) ((p)->uc_mcontext.mc_rax) michael@0: # define RCX_sig(p) ((p)->uc_mcontext.mc_rcx) michael@0: # define RDX_sig(p) ((p)->uc_mcontext.mc_rdx) michael@0: # define RBX_sig(p) ((p)->uc_mcontext.mc_rbx) michael@0: # define RSP_sig(p) ((p)->uc_mcontext.mc_rsp) michael@0: # define RBP_sig(p) ((p)->uc_mcontext.mc_rbp) michael@0: # define RSI_sig(p) ((p)->uc_mcontext.mc_rsi) michael@0: # define RDI_sig(p) ((p)->uc_mcontext.mc_rdi) michael@0: # define R8_sig(p) ((p)->uc_mcontext.mc_r8) michael@0: # define R9_sig(p) ((p)->uc_mcontext.mc_r9) michael@0: # define R10_sig(p) ((p)->uc_mcontext.mc_r10) michael@0: # define R11_sig(p) ((p)->uc_mcontext.mc_r11) michael@0: # define R12_sig(p) ((p)->uc_mcontext.mc_r12) michael@0: # define R13_sig(p) ((p)->uc_mcontext.mc_r13) michael@0: # define R14_sig(p) ((p)->uc_mcontext.mc_r14) michael@0: # if defined(__FreeBSD__) && defined(__arm__) michael@0: # define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15]) michael@0: # else michael@0: # define R15_sig(p) ((p)->uc_mcontext.mc_r15) michael@0: # endif michael@0: #elif defined(XP_MACOSX) michael@0: // Mach requires special treatment. michael@0: #else michael@0: # error "Don't know how to read/write to the thread state via the mcontext_t." michael@0: #endif michael@0: michael@0: // For platforms where the signal/exception handler runs on the same michael@0: // thread/stack as the victim (Unix and Windows), we can use TLS to find any michael@0: // currently executing asm.js code. michael@0: #if !defined(XP_MACOSX) michael@0: static AsmJSActivation * michael@0: InnermostAsmJSActivation() michael@0: { michael@0: PerThreadData *threadData = TlsPerThreadData.get(); michael@0: if (!threadData) michael@0: return nullptr; michael@0: michael@0: return threadData->asmJSActivationStackFromOwnerThread(); michael@0: } michael@0: michael@0: static JSRuntime * michael@0: RuntimeForCurrentThread() michael@0: { michael@0: PerThreadData *threadData = TlsPerThreadData.get(); michael@0: if (!threadData) michael@0: return nullptr; michael@0: michael@0: return threadData->runtimeIfOnOwnerThread(); michael@0: } michael@0: #endif // !defined(XP_MACOSX) michael@0: michael@0: // Crashing inside the signal handler can cause the handler to be recursively michael@0: // invoked, eventually blowing the stack without actually showing a crash michael@0: // report dialog via Breakpad. To guard against this we watch for such michael@0: // recursion and fall through to the next handler immediately rather than michael@0: // trying to handle it. michael@0: class AutoSetHandlingSignal michael@0: { michael@0: JSRuntime *rt; michael@0: michael@0: public: michael@0: AutoSetHandlingSignal(JSRuntime *rt) michael@0: : rt(rt) michael@0: { michael@0: JS_ASSERT(!rt->handlingSignal); michael@0: rt->handlingSignal = true; michael@0: } michael@0: michael@0: ~AutoSetHandlingSignal() michael@0: { michael@0: JS_ASSERT(rt->handlingSignal); michael@0: rt->handlingSignal = false; michael@0: } michael@0: }; michael@0: michael@0: #if defined(JS_CODEGEN_X64) michael@0: template michael@0: static void michael@0: SetXMMRegToNaN(bool isFloat32, T *xmm_reg) michael@0: { michael@0: if (isFloat32) { michael@0: JS_STATIC_ASSERT(sizeof(T) == 4 * sizeof(float)); michael@0: float *floats = reinterpret_cast(xmm_reg); michael@0: floats[0] = GenericNaN(); michael@0: floats[1] = 0; michael@0: floats[2] = 0; michael@0: floats[3] = 0; michael@0: } else { michael@0: JS_STATIC_ASSERT(sizeof(T) == 2 * sizeof(double)); michael@0: double *dbls = reinterpret_cast(xmm_reg); michael@0: dbls[0] = GenericNaN(); michael@0: dbls[1] = 0; michael@0: } michael@0: } michael@0: michael@0: struct GetHeapAccessOffset michael@0: { michael@0: const AsmJSModule &module; michael@0: explicit GetHeapAccessOffset(const AsmJSModule &module) : module(module) {} michael@0: uintptr_t operator[](size_t index) const { michael@0: return module.heapAccess(index).offset(); michael@0: } michael@0: }; michael@0: michael@0: // Perform a binary search on the projected offsets of the known heap accesses michael@0: // in the module. michael@0: static const AsmJSHeapAccess * michael@0: LookupHeapAccess(const AsmJSModule &module, uint8_t *pc) michael@0: { michael@0: JS_ASSERT(module.containsPC(pc)); michael@0: michael@0: uintptr_t pcOff = pc - module.codeBase(); michael@0: michael@0: size_t match; michael@0: if (!BinarySearch(GetHeapAccessOffset(module), 0, module.numHeapAccesses(), pcOff, &match)) michael@0: return nullptr; michael@0: michael@0: return &module.heapAccess(match); michael@0: } michael@0: #endif michael@0: michael@0: #if defined(XP_WIN) michael@0: # include "jswin.h" michael@0: #else michael@0: # include michael@0: # include michael@0: #endif michael@0: michael@0: #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) michael@0: # include // for ucontext_t, mcontext_t michael@0: #endif michael@0: michael@0: #if defined(JS_CODEGEN_X64) michael@0: # if defined(__DragonFly__) michael@0: # include // for union savefpu michael@0: # elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \ michael@0: defined(__NetBSD__) || defined(__OpenBSD__) michael@0: # include // for struct savefpu/fxsave64 michael@0: # endif michael@0: #endif michael@0: michael@0: #if defined(ANDROID) michael@0: // Not all versions of the Android NDK define ucontext_t or mcontext_t. michael@0: // Detect this and provide custom but compatible definitions. Note that these michael@0: // follow the GLibc naming convention to access register values from michael@0: // mcontext_t. michael@0: // michael@0: // See: https://chromiumcodereview.appspot.com/10829122/ michael@0: // See: http://code.google.com/p/android/issues/detail?id=34784 michael@0: # if !defined(__BIONIC_HAVE_UCONTEXT_T) michael@0: # if defined(__arm__) michael@0: michael@0: // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'. michael@0: // Old versions of the C library didn't define the type. michael@0: # if !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT) michael@0: # include michael@0: # endif michael@0: michael@0: typedef struct sigcontext mcontext_t; michael@0: michael@0: typedef struct ucontext { michael@0: uint32_t uc_flags; michael@0: struct ucontext* uc_link; michael@0: stack_t uc_stack; michael@0: mcontext_t uc_mcontext; michael@0: // Other fields are not used so don't define them here. michael@0: } ucontext_t; michael@0: michael@0: # elif defined(__i386__) michael@0: // x86 version for Android. michael@0: typedef struct { michael@0: uint32_t gregs[19]; michael@0: void* fpregs; michael@0: uint32_t oldmask; michael@0: uint32_t cr2; michael@0: } mcontext_t; michael@0: michael@0: typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks michael@0: typedef struct ucontext { michael@0: uint32_t uc_flags; michael@0: struct ucontext* uc_link; michael@0: stack_t uc_stack; michael@0: mcontext_t uc_mcontext; michael@0: // Other fields are not used by V8, don't define them here. michael@0: } ucontext_t; michael@0: enum { REG_EIP = 14 }; michael@0: # endif // defined(__i386__) michael@0: # endif // !defined(__BIONIC_HAVE_UCONTEXT_T) michael@0: #endif // defined(ANDROID) michael@0: michael@0: #if defined(ANDROID) && defined(MOZ_LINKER) michael@0: // Apparently, on some Android systems, the signal handler is always passed michael@0: // nullptr as the faulting address. This would cause the asm.js signal handler michael@0: // to think that a safe out-of-bounds access was a nullptr-deref. This michael@0: // brokenness is already detected by ElfLoader (enabled by MOZ_LINKER), so michael@0: // reuse that check to disable asm.js compilation on systems where the signal michael@0: // handler is broken. michael@0: extern "C" MFBT_API bool IsSignalHandlingBroken(); michael@0: #else michael@0: static bool IsSignalHandlingBroken() { return false; } michael@0: #endif // defined(MOZ_LINKER) michael@0: michael@0: #if !defined(XP_WIN) michael@0: # define CONTEXT ucontext_t michael@0: #endif michael@0: michael@0: #if defined(JS_CPU_X64) michael@0: # define PC_sig(p) RIP_sig(p) michael@0: #elif defined(JS_CPU_X86) michael@0: # define PC_sig(p) EIP_sig(p) michael@0: #elif defined(JS_CPU_ARM) michael@0: # define PC_sig(p) R15_sig(p) michael@0: #endif michael@0: michael@0: static bool michael@0: HandleSimulatorInterrupt(JSRuntime *rt, AsmJSActivation *activation, void *faultingAddress) michael@0: { michael@0: // If the ARM simulator is enabled, the pc is in the simulator C++ code and michael@0: // not in the generated code, so we check the simulator's pc manually. Also michael@0: // note that we can't simply use simulator->set_pc() here because the michael@0: // simulator could be in the middle of an instruction. On ARM, the signal michael@0: // handlers are currently only used for Odin code, see bug 964258. michael@0: michael@0: #ifdef JS_ARM_SIMULATOR michael@0: const AsmJSModule &module = activation->module(); michael@0: if (module.containsPC((void *)rt->mainThread.simulator()->get_pc()) && michael@0: module.containsPC(faultingAddress)) michael@0: { michael@0: activation->setInterrupted(nullptr); michael@0: int32_t nextpc = int32_t(module.interruptExit()); michael@0: rt->mainThread.simulator()->set_resume_pc(nextpc); michael@0: return true; michael@0: } michael@0: #endif michael@0: return false; michael@0: } michael@0: michael@0: #if !defined(XP_MACOSX) michael@0: static uint8_t ** michael@0: ContextToPC(CONTEXT *context) michael@0: { michael@0: JS_STATIC_ASSERT(sizeof(PC_sig(context)) == sizeof(void*)); michael@0: return reinterpret_cast(&PC_sig(context)); michael@0: } michael@0: michael@0: # if defined(JS_CODEGEN_X64) michael@0: static void michael@0: SetRegisterToCoercedUndefined(CONTEXT *context, bool isFloat32, AnyRegister reg) michael@0: { michael@0: if (reg.isFloat()) { michael@0: switch (reg.fpu().code()) { michael@0: case JSC::X86Registers::xmm0: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 0)); break; michael@0: case JSC::X86Registers::xmm1: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 1)); break; michael@0: case JSC::X86Registers::xmm2: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 2)); break; michael@0: case JSC::X86Registers::xmm3: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 3)); break; michael@0: case JSC::X86Registers::xmm4: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 4)); break; michael@0: case JSC::X86Registers::xmm5: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 5)); break; michael@0: case JSC::X86Registers::xmm6: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 6)); break; michael@0: case JSC::X86Registers::xmm7: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 7)); break; michael@0: case JSC::X86Registers::xmm8: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 8)); break; michael@0: case JSC::X86Registers::xmm9: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 9)); break; michael@0: case JSC::X86Registers::xmm10: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 10)); break; michael@0: case JSC::X86Registers::xmm11: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 11)); break; michael@0: case JSC::X86Registers::xmm12: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 12)); break; michael@0: case JSC::X86Registers::xmm13: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 13)); break; michael@0: case JSC::X86Registers::xmm14: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 14)); break; michael@0: case JSC::X86Registers::xmm15: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 15)); break; michael@0: default: MOZ_CRASH(); michael@0: } michael@0: } else { michael@0: switch (reg.gpr().code()) { michael@0: case JSC::X86Registers::eax: RAX_sig(context) = 0; break; michael@0: case JSC::X86Registers::ecx: RCX_sig(context) = 0; break; michael@0: case JSC::X86Registers::edx: RDX_sig(context) = 0; break; michael@0: case JSC::X86Registers::ebx: RBX_sig(context) = 0; break; michael@0: case JSC::X86Registers::esp: RSP_sig(context) = 0; break; michael@0: case JSC::X86Registers::ebp: RBP_sig(context) = 0; break; michael@0: case JSC::X86Registers::esi: RSI_sig(context) = 0; break; michael@0: case JSC::X86Registers::edi: RDI_sig(context) = 0; break; michael@0: case JSC::X86Registers::r8: R8_sig(context) = 0; break; michael@0: case JSC::X86Registers::r9: R9_sig(context) = 0; break; michael@0: case JSC::X86Registers::r10: R10_sig(context) = 0; break; michael@0: case JSC::X86Registers::r11: R11_sig(context) = 0; break; michael@0: case JSC::X86Registers::r12: R12_sig(context) = 0; break; michael@0: case JSC::X86Registers::r13: R13_sig(context) = 0; break; michael@0: case JSC::X86Registers::r14: R14_sig(context) = 0; break; michael@0: case JSC::X86Registers::r15: R15_sig(context) = 0; break; michael@0: default: MOZ_CRASH(); michael@0: } michael@0: } michael@0: } michael@0: # endif // JS_CODEGEN_X64 michael@0: #endif // !XP_MACOSX michael@0: michael@0: #if defined(XP_WIN) michael@0: michael@0: static bool michael@0: HandleException(PEXCEPTION_POINTERS exception) michael@0: { michael@0: EXCEPTION_RECORD *record = exception->ExceptionRecord; michael@0: CONTEXT *context = exception->ContextRecord; michael@0: michael@0: if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION) michael@0: return false; michael@0: michael@0: uint8_t **ppc = ContextToPC(context); michael@0: uint8_t *pc = *ppc; michael@0: JS_ASSERT(pc == record->ExceptionAddress); michael@0: michael@0: if (record->NumberParameters < 2) michael@0: return false; michael@0: michael@0: void *faultingAddress = (void*)record->ExceptionInformation[1]; michael@0: michael@0: JSRuntime *rt = RuntimeForCurrentThread(); michael@0: michael@0: // Don't allow recursive handling of signals, see AutoSetHandlingSignal. michael@0: if (!rt || rt->handlingSignal) michael@0: return false; michael@0: AutoSetHandlingSignal handling(rt); michael@0: michael@0: if (rt->jitRuntime() && rt->jitRuntime()->handleAccessViolation(rt, faultingAddress)) michael@0: return true; michael@0: michael@0: AsmJSActivation *activation = InnermostAsmJSActivation(); michael@0: if (!activation) michael@0: return false; michael@0: michael@0: const AsmJSModule &module = activation->module(); michael@0: if (!module.containsPC(pc)) michael@0: return false; michael@0: michael@0: // If we faulted trying to execute code in 'module', this must be an michael@0: // interrupt callback (see RequestInterruptForAsmJSCode). Redirect michael@0: // execution to a trampoline which will call js::HandleExecutionInterrupt. michael@0: // The trampoline will jump to activation->resumePC if execution isn't michael@0: // interrupted. michael@0: if (module.containsPC(faultingAddress)) { michael@0: activation->setInterrupted(pc); michael@0: *ppc = module.interruptExit(); michael@0: michael@0: JSRuntime::AutoLockForInterrupt lock(rt); michael@0: module.unprotectCode(rt); michael@0: return true; michael@0: } michael@0: michael@0: # if defined(JS_CODEGEN_X64) michael@0: // These checks aren't necessary, but, since we can, check anyway to make michael@0: // sure we aren't covering up a real bug. michael@0: if (!module.maybeHeap() || michael@0: faultingAddress < module.maybeHeap() || michael@0: faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize) michael@0: { michael@0: return false; michael@0: } michael@0: michael@0: const AsmJSHeapAccess *heapAccess = LookupHeapAccess(module, pc); michael@0: if (!heapAccess) michael@0: return false; michael@0: michael@0: // Also not necessary, but, since we can, do. michael@0: if (heapAccess->isLoad() != !record->ExceptionInformation[0]) michael@0: return false; michael@0: michael@0: // We now know that this is an out-of-bounds access made by an asm.js michael@0: // load/store that we should handle. If this is a load, assign the michael@0: // JS-defined result value to the destination register (ToInt32(undefined) michael@0: // or ToNumber(undefined), determined by the type of the destination michael@0: // register) and set the PC to the next op. Upon return from the handler, michael@0: // execution will resume at this next PC. michael@0: if (heapAccess->isLoad()) michael@0: SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg()); michael@0: *ppc += heapAccess->opLength(); michael@0: return true; michael@0: # else michael@0: return false; michael@0: # endif michael@0: } michael@0: michael@0: static LONG WINAPI michael@0: AsmJSExceptionHandler(LPEXCEPTION_POINTERS exception) michael@0: { michael@0: if (HandleException(exception)) michael@0: return EXCEPTION_CONTINUE_EXECUTION; michael@0: michael@0: // No need to worry about calling other handlers, the OS does this for us. michael@0: return EXCEPTION_CONTINUE_SEARCH; michael@0: } michael@0: michael@0: #elif defined(XP_MACOSX) michael@0: # include michael@0: michael@0: static uint8_t ** michael@0: ContextToPC(x86_thread_state_t &state) michael@0: { michael@0: # if defined(JS_CODEGEN_X64) michael@0: JS_STATIC_ASSERT(sizeof(state.uts.ts64.__rip) == sizeof(void*)); michael@0: return reinterpret_cast(&state.uts.ts64.__rip); michael@0: # else michael@0: JS_STATIC_ASSERT(sizeof(state.uts.ts32.__eip) == sizeof(void*)); michael@0: return reinterpret_cast(&state.uts.ts32.__eip); michael@0: # endif michael@0: } michael@0: michael@0: # if defined(JS_CODEGEN_X64) michael@0: static bool michael@0: SetRegisterToCoercedUndefined(mach_port_t rtThread, x86_thread_state64_t &state, michael@0: const AsmJSHeapAccess &heapAccess) michael@0: { michael@0: if (heapAccess.loadedReg().isFloat()) { michael@0: kern_return_t kret; michael@0: michael@0: x86_float_state64_t fstate; michael@0: unsigned int count = x86_FLOAT_STATE64_COUNT; michael@0: kret = thread_get_state(rtThread, x86_FLOAT_STATE64, (thread_state_t) &fstate, &count); michael@0: if (kret != KERN_SUCCESS) michael@0: return false; michael@0: michael@0: bool f32 = heapAccess.isFloat32Load(); michael@0: switch (heapAccess.loadedReg().fpu().code()) { michael@0: case JSC::X86Registers::xmm0: SetXMMRegToNaN(f32, &fstate.__fpu_xmm0); break; michael@0: case JSC::X86Registers::xmm1: SetXMMRegToNaN(f32, &fstate.__fpu_xmm1); break; michael@0: case JSC::X86Registers::xmm2: SetXMMRegToNaN(f32, &fstate.__fpu_xmm2); break; michael@0: case JSC::X86Registers::xmm3: SetXMMRegToNaN(f32, &fstate.__fpu_xmm3); break; michael@0: case JSC::X86Registers::xmm4: SetXMMRegToNaN(f32, &fstate.__fpu_xmm4); break; michael@0: case JSC::X86Registers::xmm5: SetXMMRegToNaN(f32, &fstate.__fpu_xmm5); break; michael@0: case JSC::X86Registers::xmm6: SetXMMRegToNaN(f32, &fstate.__fpu_xmm6); break; michael@0: case JSC::X86Registers::xmm7: SetXMMRegToNaN(f32, &fstate.__fpu_xmm7); break; michael@0: case JSC::X86Registers::xmm8: SetXMMRegToNaN(f32, &fstate.__fpu_xmm8); break; michael@0: case JSC::X86Registers::xmm9: SetXMMRegToNaN(f32, &fstate.__fpu_xmm9); break; michael@0: case JSC::X86Registers::xmm10: SetXMMRegToNaN(f32, &fstate.__fpu_xmm10); break; michael@0: case JSC::X86Registers::xmm11: SetXMMRegToNaN(f32, &fstate.__fpu_xmm11); break; michael@0: case JSC::X86Registers::xmm12: SetXMMRegToNaN(f32, &fstate.__fpu_xmm12); break; michael@0: case JSC::X86Registers::xmm13: SetXMMRegToNaN(f32, &fstate.__fpu_xmm13); break; michael@0: case JSC::X86Registers::xmm14: SetXMMRegToNaN(f32, &fstate.__fpu_xmm14); break; michael@0: case JSC::X86Registers::xmm15: SetXMMRegToNaN(f32, &fstate.__fpu_xmm15); break; michael@0: default: MOZ_CRASH(); michael@0: } michael@0: michael@0: kret = thread_set_state(rtThread, x86_FLOAT_STATE64, (thread_state_t)&fstate, x86_FLOAT_STATE64_COUNT); michael@0: if (kret != KERN_SUCCESS) michael@0: return false; michael@0: } else { michael@0: switch (heapAccess.loadedReg().gpr().code()) { michael@0: case JSC::X86Registers::eax: state.__rax = 0; break; michael@0: case JSC::X86Registers::ecx: state.__rcx = 0; break; michael@0: case JSC::X86Registers::edx: state.__rdx = 0; break; michael@0: case JSC::X86Registers::ebx: state.__rbx = 0; break; michael@0: case JSC::X86Registers::esp: state.__rsp = 0; break; michael@0: case JSC::X86Registers::ebp: state.__rbp = 0; break; michael@0: case JSC::X86Registers::esi: state.__rsi = 0; break; michael@0: case JSC::X86Registers::edi: state.__rdi = 0; break; michael@0: case JSC::X86Registers::r8: state.__r8 = 0; break; michael@0: case JSC::X86Registers::r9: state.__r9 = 0; break; michael@0: case JSC::X86Registers::r10: state.__r10 = 0; break; michael@0: case JSC::X86Registers::r11: state.__r11 = 0; break; michael@0: case JSC::X86Registers::r12: state.__r12 = 0; break; michael@0: case JSC::X86Registers::r13: state.__r13 = 0; break; michael@0: case JSC::X86Registers::r14: state.__r14 = 0; break; michael@0: case JSC::X86Registers::r15: state.__r15 = 0; break; michael@0: default: MOZ_CRASH(); michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: # endif michael@0: michael@0: // This definition was generated by mig (the Mach Interface Generator) for the michael@0: // routine 'exception_raise' (exc.defs). michael@0: #pragma pack(4) michael@0: typedef struct { michael@0: mach_msg_header_t Head; michael@0: /* start of the kernel processed data */ michael@0: mach_msg_body_t msgh_body; michael@0: mach_msg_port_descriptor_t thread; michael@0: mach_msg_port_descriptor_t task; michael@0: /* end of the kernel processed data */ michael@0: NDR_record_t NDR; michael@0: exception_type_t exception; michael@0: mach_msg_type_number_t codeCnt; michael@0: int64_t code[2]; michael@0: } Request__mach_exception_raise_t; michael@0: #pragma pack() michael@0: michael@0: // The full Mach message also includes a trailer. michael@0: struct ExceptionRequest michael@0: { michael@0: Request__mach_exception_raise_t body; michael@0: mach_msg_trailer_t trailer; michael@0: }; michael@0: michael@0: static bool michael@0: HandleMachException(JSRuntime *rt, const ExceptionRequest &request) michael@0: { michael@0: // Don't allow recursive handling of signals, see AutoSetHandlingSignal. michael@0: if (rt->handlingSignal) michael@0: return false; michael@0: AutoSetHandlingSignal handling(rt); michael@0: michael@0: // Get the port of the JSRuntime's thread from the message. michael@0: mach_port_t rtThread = request.body.thread.name; michael@0: michael@0: // Read out the JSRuntime thread's register state. michael@0: x86_thread_state_t state; michael@0: unsigned int count = x86_THREAD_STATE_COUNT; michael@0: kern_return_t kret; michael@0: kret = thread_get_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, &count); michael@0: if (kret != KERN_SUCCESS) michael@0: return false; michael@0: michael@0: uint8_t **ppc = ContextToPC(state); michael@0: uint8_t *pc = *ppc; michael@0: michael@0: if (request.body.exception != EXC_BAD_ACCESS || request.body.codeCnt != 2) michael@0: return false; michael@0: michael@0: void *faultingAddress = (void*)request.body.code[1]; michael@0: michael@0: if (rt->jitRuntime() && rt->jitRuntime()->handleAccessViolation(rt, faultingAddress)) michael@0: return true; michael@0: michael@0: AsmJSActivation *activation = rt->mainThread.asmJSActivationStackFromAnyThread(); michael@0: if (!activation) michael@0: return false; michael@0: michael@0: const AsmJSModule &module = activation->module(); michael@0: if (HandleSimulatorInterrupt(rt, activation, faultingAddress)) { michael@0: JSRuntime::AutoLockForInterrupt lock(rt); michael@0: module.unprotectCode(rt); michael@0: return true; michael@0: } michael@0: michael@0: if (!module.containsPC(pc)) michael@0: return false; michael@0: michael@0: // If we faulted trying to execute code in 'module', this must be an michael@0: // interrupt callback (see RequestInterruptForAsmJSCode). Redirect michael@0: // execution to a trampoline which will call js::HandleExecutionInterrupt. michael@0: // The trampoline will jump to activation->resumePC if execution isn't michael@0: // interrupted. michael@0: if (module.containsPC(faultingAddress)) { michael@0: activation->setInterrupted(pc); michael@0: *ppc = module.interruptExit(); michael@0: michael@0: JSRuntime::AutoLockForInterrupt lock(rt); michael@0: module.unprotectCode(rt); michael@0: michael@0: // Update the thread state with the new pc. michael@0: kret = thread_set_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, x86_THREAD_STATE_COUNT); michael@0: return kret == KERN_SUCCESS; michael@0: } michael@0: michael@0: # if defined(JS_CODEGEN_X64) michael@0: // These checks aren't necessary, but, since we can, check anyway to make michael@0: // sure we aren't covering up a real bug. michael@0: if (!module.maybeHeap() || michael@0: faultingAddress < module.maybeHeap() || michael@0: faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize) michael@0: { michael@0: return false; michael@0: } michael@0: michael@0: const AsmJSHeapAccess *heapAccess = LookupHeapAccess(module, pc); michael@0: if (!heapAccess) michael@0: return false; michael@0: michael@0: // We now know that this is an out-of-bounds access made by an asm.js michael@0: // load/store that we should handle. If this is a load, assign the michael@0: // JS-defined result value to the destination register (ToInt32(undefined) michael@0: // or ToNumber(undefined), determined by the type of the destination michael@0: // register) and set the PC to the next op. Upon return from the handler, michael@0: // execution will resume at this next PC. michael@0: if (heapAccess->isLoad()) { michael@0: if (!SetRegisterToCoercedUndefined(rtThread, state.uts.ts64, *heapAccess)) michael@0: return false; michael@0: } michael@0: *ppc += heapAccess->opLength(); michael@0: michael@0: // Update the thread state with the new pc. michael@0: kret = thread_set_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, x86_THREAD_STATE_COUNT); michael@0: if (kret != KERN_SUCCESS) michael@0: return false; michael@0: michael@0: return true; michael@0: # else michael@0: return false; michael@0: # endif michael@0: } michael@0: michael@0: // Taken from mach_exc in /usr/include/mach/mach_exc.defs. michael@0: static const mach_msg_id_t sExceptionId = 2405; michael@0: michael@0: // The choice of id here is arbitrary, the only constraint is that sQuitId != sExceptionId. michael@0: static const mach_msg_id_t sQuitId = 42; michael@0: michael@0: void michael@0: AsmJSMachExceptionHandlerThread(void *threadArg) michael@0: { michael@0: JSRuntime *rt = reinterpret_cast(threadArg); michael@0: mach_port_t port = rt->asmJSMachExceptionHandler.port(); michael@0: kern_return_t kret; michael@0: michael@0: while(true) { michael@0: ExceptionRequest request; michael@0: kret = mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request), michael@0: port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); michael@0: michael@0: // If we fail even receiving the message, we can't even send a reply! michael@0: // Rather than hanging the faulting thread (hanging the browser), crash. michael@0: if (kret != KERN_SUCCESS) { michael@0: fprintf(stderr, "AsmJSMachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret); michael@0: MOZ_CRASH(); michael@0: } michael@0: michael@0: // There are only two messages we should be receiving: an exception michael@0: // message that occurs when the runtime's thread faults and the quit michael@0: // message sent when the runtime is shutting down. michael@0: if (request.body.Head.msgh_id == sQuitId) michael@0: break; michael@0: if (request.body.Head.msgh_id != sExceptionId) { michael@0: fprintf(stderr, "Unexpected msg header id %d\n", (int)request.body.Head.msgh_bits); michael@0: MOZ_CRASH(); michael@0: } michael@0: michael@0: // Some thread just commited an EXC_BAD_ACCESS and has been suspended by michael@0: // the kernel. The kernel is waiting for us to reply with instructions. michael@0: // Our default is the "not handled" reply (by setting the RetCode field michael@0: // of the reply to KERN_FAILURE) which tells the kernel to continue michael@0: // searching at the process and system level. If this is an asm.js michael@0: // expected exception, we handle it and return KERN_SUCCESS. michael@0: bool handled = HandleMachException(rt, request); michael@0: kern_return_t replyCode = handled ? KERN_SUCCESS : KERN_FAILURE; michael@0: michael@0: // This magic incantation to send a reply back to the kernel was derived michael@0: // from the exc_server generated by 'mig -v /usr/include/mach/mach_exc.defs'. michael@0: __Reply__exception_raise_t reply; michael@0: reply.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0); michael@0: reply.Head.msgh_size = sizeof(reply); michael@0: reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port; michael@0: reply.Head.msgh_local_port = MACH_PORT_NULL; michael@0: reply.Head.msgh_id = request.body.Head.msgh_id + 100; michael@0: reply.NDR = NDR_record; michael@0: reply.RetCode = replyCode; michael@0: mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL, michael@0: MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); michael@0: } michael@0: } michael@0: michael@0: AsmJSMachExceptionHandler::AsmJSMachExceptionHandler() michael@0: : installed_(false), michael@0: thread_(nullptr), michael@0: port_(MACH_PORT_NULL) michael@0: {} michael@0: michael@0: void michael@0: AsmJSMachExceptionHandler::uninstall() michael@0: { michael@0: #ifdef JS_THREADSAFE michael@0: if (installed_) { michael@0: thread_port_t thread = mach_thread_self(); michael@0: kern_return_t kret = thread_set_exception_ports(thread, michael@0: EXC_MASK_BAD_ACCESS, michael@0: MACH_PORT_NULL, michael@0: EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, michael@0: THREAD_STATE_NONE); michael@0: mach_port_deallocate(mach_task_self(), thread); michael@0: if (kret != KERN_SUCCESS) michael@0: MOZ_CRASH(); michael@0: installed_ = false; michael@0: } michael@0: if (thread_ != nullptr) { michael@0: // Break the handler thread out of the mach_msg loop. michael@0: mach_msg_header_t msg; michael@0: msg.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0); michael@0: msg.msgh_size = sizeof(msg); michael@0: msg.msgh_remote_port = port_; michael@0: msg.msgh_local_port = MACH_PORT_NULL; michael@0: msg.msgh_reserved = 0; michael@0: msg.msgh_id = sQuitId; michael@0: kern_return_t kret = mach_msg(&msg, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL, michael@0: MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); michael@0: if (kret != KERN_SUCCESS) { michael@0: fprintf(stderr, "AsmJSMachExceptionHandler: failed to send quit message: %d\n", (int)kret); michael@0: MOZ_CRASH(); michael@0: } michael@0: michael@0: // Wait for the handler thread to complete before deallocating the port. michael@0: PR_JoinThread(thread_); michael@0: thread_ = nullptr; michael@0: } michael@0: if (port_ != MACH_PORT_NULL) { michael@0: DebugOnly kret = mach_port_destroy(mach_task_self(), port_); michael@0: JS_ASSERT(kret == KERN_SUCCESS); michael@0: port_ = MACH_PORT_NULL; michael@0: } michael@0: #else michael@0: JS_ASSERT(!installed_); michael@0: #endif michael@0: } michael@0: michael@0: bool michael@0: AsmJSMachExceptionHandler::install(JSRuntime *rt) michael@0: { michael@0: #ifdef JS_THREADSAFE michael@0: JS_ASSERT(!installed()); michael@0: kern_return_t kret; michael@0: mach_port_t thread; michael@0: michael@0: // Get a port which can send and receive data. michael@0: kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port_); michael@0: if (kret != KERN_SUCCESS) michael@0: goto error; michael@0: kret = mach_port_insert_right(mach_task_self(), port_, port_, MACH_MSG_TYPE_MAKE_SEND); michael@0: if (kret != KERN_SUCCESS) michael@0: goto error; michael@0: michael@0: // Create a thread to block on reading port_. michael@0: thread_ = PR_CreateThread(PR_USER_THREAD, AsmJSMachExceptionHandlerThread, rt, michael@0: PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 0); michael@0: if (!thread_) michael@0: goto error; michael@0: michael@0: // Direct exceptions on this thread to port_ (and thus our handler thread). michael@0: // Note: we are totally clobbering any existing *thread* exception ports and michael@0: // not even attempting to forward. Breakpad and gdb both use the *process* michael@0: // exception ports which are only called if the thread doesn't handle the michael@0: // exception, so we should be fine. michael@0: thread = mach_thread_self(); michael@0: kret = thread_set_exception_ports(thread, michael@0: EXC_MASK_BAD_ACCESS, michael@0: port_, michael@0: EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, michael@0: THREAD_STATE_NONE); michael@0: mach_port_deallocate(mach_task_self(), thread); michael@0: if (kret != KERN_SUCCESS) michael@0: goto error; michael@0: michael@0: installed_ = true; michael@0: return true; michael@0: michael@0: error: michael@0: uninstall(); michael@0: return false; michael@0: #else michael@0: return false; michael@0: #endif michael@0: } michael@0: michael@0: #else // If not Windows or Mac, assume Unix michael@0: michael@0: // Be very cautious and default to not handling; we don't want to accidentally michael@0: // silence real crashes from real bugs. michael@0: static bool michael@0: HandleSignal(int signum, siginfo_t *info, void *ctx) michael@0: { michael@0: CONTEXT *context = (CONTEXT *)ctx; michael@0: uint8_t **ppc = ContextToPC(context); michael@0: uint8_t *pc = *ppc; michael@0: michael@0: void *faultingAddress = info->si_addr; michael@0: michael@0: JSRuntime *rt = RuntimeForCurrentThread(); michael@0: michael@0: // Don't allow recursive handling of signals, see AutoSetHandlingSignal. michael@0: if (!rt || rt->handlingSignal) michael@0: return false; michael@0: AutoSetHandlingSignal handling(rt); michael@0: michael@0: if (rt->jitRuntime() && rt->jitRuntime()->handleAccessViolation(rt, faultingAddress)) michael@0: return true; michael@0: michael@0: AsmJSActivation *activation = InnermostAsmJSActivation(); michael@0: if (!activation) michael@0: return false; michael@0: michael@0: const AsmJSModule &module = activation->module(); michael@0: if (HandleSimulatorInterrupt(rt, activation, faultingAddress)) { michael@0: JSRuntime::AutoLockForInterrupt lock(rt); michael@0: module.unprotectCode(rt); michael@0: return true; michael@0: } michael@0: michael@0: if (!module.containsPC(pc)) michael@0: return false; michael@0: michael@0: // If we faulted trying to execute code in 'module', this must be an michael@0: // interrupt callback (see RequestInterruptForAsmJSCode). Redirect michael@0: // execution to a trampoline which will call js::HandleExecutionInterrupt. michael@0: // The trampoline will jump to activation->resumePC if execution isn't michael@0: // interrupted. michael@0: if (module.containsPC(faultingAddress)) { michael@0: activation->setInterrupted(pc); michael@0: *ppc = module.interruptExit(); michael@0: michael@0: JSRuntime::AutoLockForInterrupt lock(rt); michael@0: module.unprotectCode(rt); michael@0: return true; michael@0: } michael@0: michael@0: # if defined(JS_CODEGEN_X64) michael@0: // These checks aren't necessary, but, since we can, check anyway to make michael@0: // sure we aren't covering up a real bug. michael@0: if (!module.maybeHeap() || michael@0: faultingAddress < module.maybeHeap() || michael@0: faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize) michael@0: { michael@0: return false; michael@0: } michael@0: michael@0: const AsmJSHeapAccess *heapAccess = LookupHeapAccess(module, pc); michael@0: if (!heapAccess) michael@0: return false; michael@0: michael@0: // We now know that this is an out-of-bounds access made by an asm.js michael@0: // load/store that we should handle. If this is a load, assign the michael@0: // JS-defined result value to the destination register (ToInt32(undefined) michael@0: // or ToNumber(undefined), determined by the type of the destination michael@0: // register) and set the PC to the next op. Upon return from the handler, michael@0: // execution will resume at this next PC. michael@0: if (heapAccess->isLoad()) michael@0: SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg()); michael@0: *ppc += heapAccess->opLength(); michael@0: return true; michael@0: # else michael@0: return false; michael@0: # endif michael@0: } michael@0: michael@0: static struct sigaction sPrevHandler; michael@0: michael@0: static void michael@0: AsmJSFaultHandler(int signum, siginfo_t *info, void *context) michael@0: { michael@0: if (HandleSignal(signum, info, context)) michael@0: return; michael@0: michael@0: // This signal is not for any asm.js code we expect, so we need to forward michael@0: // the signal to the next handler. If there is no next handler (SIG_IGN or michael@0: // SIG_DFL), then it's time to crash. To do this, we set the signal back to michael@0: // its original disposition and return. This will cause the faulting op to michael@0: // be re-executed which will crash in the normal way. The advantage of michael@0: // doing this to calling _exit() is that we remove ourselves from the crash michael@0: // stack which improves crash reports. If there is a next handler, call it. michael@0: // It will either crash synchronously, fix up the instruction so that michael@0: // execution can continue and return, or trigger a crash by returning the michael@0: // signal to it's original disposition and returning. michael@0: // michael@0: // Note: the order of these tests matter. michael@0: if (sPrevHandler.sa_flags & SA_SIGINFO) michael@0: sPrevHandler.sa_sigaction(signum, info, context); michael@0: else if (sPrevHandler.sa_handler == SIG_DFL || sPrevHandler.sa_handler == SIG_IGN) michael@0: sigaction(signum, &sPrevHandler, nullptr); michael@0: else michael@0: sPrevHandler.sa_handler(signum); michael@0: } michael@0: #endif michael@0: michael@0: #if !defined(XP_MACOSX) michael@0: static bool sHandlersInstalled = false; michael@0: #endif michael@0: michael@0: bool michael@0: js::EnsureAsmJSSignalHandlersInstalled(JSRuntime *rt) michael@0: { michael@0: if (IsSignalHandlingBroken()) michael@0: return false; michael@0: michael@0: #if defined(XP_MACOSX) michael@0: // On OSX, each JSRuntime gets its own handler. michael@0: return rt->asmJSMachExceptionHandler.installed() || rt->asmJSMachExceptionHandler.install(rt); michael@0: #else michael@0: // Assume Windows or Unix. For these platforms, there is a single, michael@0: // process-wide signal handler installed. Take care to only install it once. michael@0: if (sHandlersInstalled) michael@0: return true; michael@0: michael@0: # if defined(XP_WIN) michael@0: if (!AddVectoredExceptionHandler(/* FirstHandler = */true, AsmJSExceptionHandler)) michael@0: return false; michael@0: # else michael@0: // Assume Unix. SA_NODEFER allows us to reenter the signal handler if we michael@0: // crash while handling the signal, and fall through to the Breakpad michael@0: // handler by testing handlingSignal. michael@0: struct sigaction sigAction; michael@0: sigAction.sa_flags = SA_SIGINFO | SA_NODEFER; michael@0: sigAction.sa_sigaction = &AsmJSFaultHandler; michael@0: sigemptyset(&sigAction.sa_mask); michael@0: if (sigaction(SIGSEGV, &sigAction, &sPrevHandler)) michael@0: return false; michael@0: # endif michael@0: michael@0: sHandlersInstalled = true; michael@0: #endif michael@0: return true; michael@0: } michael@0: michael@0: // To interrupt execution of a JSRuntime, any thread may call michael@0: // JS_RequestInterruptCallback (JSRuntime::requestInterruptCallback from inside michael@0: // the engine). In the simplest case, this sets some state that is polled at michael@0: // regular intervals (function prologues, loop headers). For tight loops, this michael@0: // poses non-trivial overhead. For asm.js, we can do better: when another michael@0: // thread requests an interrupt, we simply mprotect all of the innermost asm.js michael@0: // module activation's code. This will trigger a SIGSEGV, taking us into michael@0: // AsmJSFaultHandler. From there, we can manually redirect execution to call michael@0: // js::HandleExecutionInterrupt. The memory is un-protected from the signal michael@0: // handler after control flow is redirected. michael@0: void michael@0: js::RequestInterruptForAsmJSCode(JSRuntime *rt) michael@0: { michael@0: JS_ASSERT(rt->currentThreadOwnsInterruptLock()); michael@0: michael@0: AsmJSActivation *activation = rt->mainThread.asmJSActivationStackFromAnyThread(); michael@0: if (!activation) michael@0: return; michael@0: michael@0: activation->module().protectCode(rt); michael@0: } michael@0: michael@0: #if defined(MOZ_ASAN) && defined(JS_STANDALONE) michael@0: // Usually, this definition is found in mozglue (see mozglue/build/AsanOptions.cpp). michael@0: // However, when doing standalone JS builds, mozglue is not used and we must ensure michael@0: // that we still allow custom SIGSEGV handlers for asm.js and ion to work correctly. michael@0: extern "C" MOZ_ASAN_BLACKLIST michael@0: const char* __asan_default_options() { michael@0: return "allow_user_segv_handler=1"; michael@0: } michael@0: #endif