Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "jit/AsmJSSignalHandlers.h" |
michael@0 | 8 | |
michael@0 | 9 | #include "mozilla/BinarySearch.h" |
michael@0 | 10 | |
michael@0 | 11 | #include "assembler/assembler/MacroAssembler.h" |
michael@0 | 12 | #include "jit/AsmJSModule.h" |
michael@0 | 13 | |
michael@0 | 14 | using namespace js; |
michael@0 | 15 | using namespace js::jit; |
michael@0 | 16 | using namespace mozilla; |
michael@0 | 17 | |
michael@0 | 18 | using JS::GenericNaN; |
michael@0 | 19 | |
michael@0 | 20 | #if defined(XP_WIN) |
michael@0 | 21 | # define XMM_sig(p,i) ((p)->Xmm##i) |
michael@0 | 22 | # define EIP_sig(p) ((p)->Eip) |
michael@0 | 23 | # define RIP_sig(p) ((p)->Rip) |
michael@0 | 24 | # define RAX_sig(p) ((p)->Rax) |
michael@0 | 25 | # define RCX_sig(p) ((p)->Rcx) |
michael@0 | 26 | # define RDX_sig(p) ((p)->Rdx) |
michael@0 | 27 | # define RBX_sig(p) ((p)->Rbx) |
michael@0 | 28 | # define RSP_sig(p) ((p)->Rsp) |
michael@0 | 29 | # define RBP_sig(p) ((p)->Rbp) |
michael@0 | 30 | # define RSI_sig(p) ((p)->Rsi) |
michael@0 | 31 | # define RDI_sig(p) ((p)->Rdi) |
michael@0 | 32 | # define R8_sig(p) ((p)->R8) |
michael@0 | 33 | # define R9_sig(p) ((p)->R9) |
michael@0 | 34 | # define R10_sig(p) ((p)->R10) |
michael@0 | 35 | # define R11_sig(p) ((p)->R11) |
michael@0 | 36 | # define R12_sig(p) ((p)->R12) |
michael@0 | 37 | # define R13_sig(p) ((p)->R13) |
michael@0 | 38 | # define R14_sig(p) ((p)->R14) |
michael@0 | 39 | # define R15_sig(p) ((p)->R15) |
michael@0 | 40 | #elif defined(__OpenBSD__) |
michael@0 | 41 | # define XMM_sig(p,i) ((p)->sc_fpstate->fx_xmm[i]) |
michael@0 | 42 | # define EIP_sig(p) ((p)->sc_eip) |
michael@0 | 43 | # define RIP_sig(p) ((p)->sc_rip) |
michael@0 | 44 | # define RAX_sig(p) ((p)->sc_rax) |
michael@0 | 45 | # define RCX_sig(p) ((p)->sc_rcx) |
michael@0 | 46 | # define RDX_sig(p) ((p)->sc_rdx) |
michael@0 | 47 | # define RBX_sig(p) ((p)->sc_rbx) |
michael@0 | 48 | # define RSP_sig(p) ((p)->sc_rsp) |
michael@0 | 49 | # define RBP_sig(p) ((p)->sc_rbp) |
michael@0 | 50 | # define RSI_sig(p) ((p)->sc_rsi) |
michael@0 | 51 | # define RDI_sig(p) ((p)->sc_rdi) |
michael@0 | 52 | # define R8_sig(p) ((p)->sc_r8) |
michael@0 | 53 | # define R9_sig(p) ((p)->sc_r9) |
michael@0 | 54 | # define R10_sig(p) ((p)->sc_r10) |
michael@0 | 55 | # define R11_sig(p) ((p)->sc_r11) |
michael@0 | 56 | # define R12_sig(p) ((p)->sc_r12) |
michael@0 | 57 | # define R13_sig(p) ((p)->sc_r13) |
michael@0 | 58 | # define R14_sig(p) ((p)->sc_r14) |
michael@0 | 59 | # define R15_sig(p) ((p)->sc_r15) |
michael@0 | 60 | #elif defined(__linux__) || defined(SOLARIS) |
michael@0 | 61 | # if defined(__linux__) |
michael@0 | 62 | # define XMM_sig(p,i) ((p)->uc_mcontext.fpregs->_xmm[i]) |
michael@0 | 63 | # define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_EIP]) |
michael@0 | 64 | # else |
michael@0 | 65 | # define XMM_sig(p,i) ((p)->uc_mcontext.fpregs.fp_reg_set.fpchip_state.xmm[i]) |
michael@0 | 66 | # define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_PC]) |
michael@0 | 67 | # endif |
michael@0 | 68 | # define RIP_sig(p) ((p)->uc_mcontext.gregs[REG_RIP]) |
michael@0 | 69 | # define RAX_sig(p) ((p)->uc_mcontext.gregs[REG_RAX]) |
michael@0 | 70 | # define RCX_sig(p) ((p)->uc_mcontext.gregs[REG_RCX]) |
michael@0 | 71 | # define RDX_sig(p) ((p)->uc_mcontext.gregs[REG_RDX]) |
michael@0 | 72 | # define RBX_sig(p) ((p)->uc_mcontext.gregs[REG_RBX]) |
michael@0 | 73 | # define RSP_sig(p) ((p)->uc_mcontext.gregs[REG_RSP]) |
michael@0 | 74 | # define RBP_sig(p) ((p)->uc_mcontext.gregs[REG_RBP]) |
michael@0 | 75 | # define RSI_sig(p) ((p)->uc_mcontext.gregs[REG_RSI]) |
michael@0 | 76 | # define RDI_sig(p) ((p)->uc_mcontext.gregs[REG_RDI]) |
michael@0 | 77 | # define R8_sig(p) ((p)->uc_mcontext.gregs[REG_R8]) |
michael@0 | 78 | # define R9_sig(p) ((p)->uc_mcontext.gregs[REG_R9]) |
michael@0 | 79 | # define R10_sig(p) ((p)->uc_mcontext.gregs[REG_R10]) |
michael@0 | 80 | # define R11_sig(p) ((p)->uc_mcontext.gregs[REG_R11]) |
michael@0 | 81 | # define R12_sig(p) ((p)->uc_mcontext.gregs[REG_R12]) |
michael@0 | 82 | # define R13_sig(p) ((p)->uc_mcontext.gregs[REG_R13]) |
michael@0 | 83 | # define R14_sig(p) ((p)->uc_mcontext.gregs[REG_R14]) |
michael@0 | 84 | # if defined(__linux__) && defined(__arm__) |
michael@0 | 85 | # define R15_sig(p) ((p)->uc_mcontext.arm_pc) |
michael@0 | 86 | # else |
michael@0 | 87 | # define R15_sig(p) ((p)->uc_mcontext.gregs[REG_R15]) |
michael@0 | 88 | # endif |
michael@0 | 89 | #elif defined(__NetBSD__) |
michael@0 | 90 | # define XMM_sig(p,i) (((struct fxsave64 *)(p)->uc_mcontext.__fpregs)->fx_xmm[i]) |
michael@0 | 91 | # define EIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EIP]) |
michael@0 | 92 | # define RIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RIP]) |
michael@0 | 93 | # define RAX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RAX]) |
michael@0 | 94 | # define RCX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RCX]) |
michael@0 | 95 | # define RDX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RDX]) |
michael@0 | 96 | # define RBX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBX]) |
michael@0 | 97 | # define RSP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSP]) |
michael@0 | 98 | # define RBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBP]) |
michael@0 | 99 | # define RSI_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSI]) |
michael@0 | 100 | # define RDI_sig(p) ((p)->uc_mcontext.__gregs[_REG_RDI]) |
michael@0 | 101 | # define R8_sig(p) ((p)->uc_mcontext.__gregs[_REG_R8]) |
michael@0 | 102 | # define R9_sig(p) ((p)->uc_mcontext.__gregs[_REG_R9]) |
michael@0 | 103 | # define R10_sig(p) ((p)->uc_mcontext.__gregs[_REG_R10]) |
michael@0 | 104 | # define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11]) |
michael@0 | 105 | # define R12_sig(p) ((p)->uc_mcontext.__gregs[_REG_R12]) |
michael@0 | 106 | # define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13]) |
michael@0 | 107 | # define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14]) |
michael@0 | 108 | # define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15]) |
michael@0 | 109 | #elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) |
michael@0 | 110 | # if defined(__DragonFly__) |
michael@0 | 111 | # define XMM_sig(p,i) (((union savefpu *)(p)->uc_mcontext.mc_fpregs)->sv_xmm.sv_xmm[i]) |
michael@0 | 112 | # else |
michael@0 | 113 | # define XMM_sig(p,i) (((struct savefpu *)(p)->uc_mcontext.mc_fpstate)->sv_xmm[i]) |
michael@0 | 114 | # endif |
michael@0 | 115 | # define EIP_sig(p) ((p)->uc_mcontext.mc_eip) |
michael@0 | 116 | # define RIP_sig(p) ((p)->uc_mcontext.mc_rip) |
michael@0 | 117 | # define RAX_sig(p) ((p)->uc_mcontext.mc_rax) |
michael@0 | 118 | # define RCX_sig(p) ((p)->uc_mcontext.mc_rcx) |
michael@0 | 119 | # define RDX_sig(p) ((p)->uc_mcontext.mc_rdx) |
michael@0 | 120 | # define RBX_sig(p) ((p)->uc_mcontext.mc_rbx) |
michael@0 | 121 | # define RSP_sig(p) ((p)->uc_mcontext.mc_rsp) |
michael@0 | 122 | # define RBP_sig(p) ((p)->uc_mcontext.mc_rbp) |
michael@0 | 123 | # define RSI_sig(p) ((p)->uc_mcontext.mc_rsi) |
michael@0 | 124 | # define RDI_sig(p) ((p)->uc_mcontext.mc_rdi) |
michael@0 | 125 | # define R8_sig(p) ((p)->uc_mcontext.mc_r8) |
michael@0 | 126 | # define R9_sig(p) ((p)->uc_mcontext.mc_r9) |
michael@0 | 127 | # define R10_sig(p) ((p)->uc_mcontext.mc_r10) |
michael@0 | 128 | # define R11_sig(p) ((p)->uc_mcontext.mc_r11) |
michael@0 | 129 | # define R12_sig(p) ((p)->uc_mcontext.mc_r12) |
michael@0 | 130 | # define R13_sig(p) ((p)->uc_mcontext.mc_r13) |
michael@0 | 131 | # define R14_sig(p) ((p)->uc_mcontext.mc_r14) |
michael@0 | 132 | # if defined(__FreeBSD__) && defined(__arm__) |
michael@0 | 133 | # define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15]) |
michael@0 | 134 | # else |
michael@0 | 135 | # define R15_sig(p) ((p)->uc_mcontext.mc_r15) |
michael@0 | 136 | # endif |
michael@0 | 137 | #elif defined(XP_MACOSX) |
michael@0 | 138 | // Mach requires special treatment. |
michael@0 | 139 | #else |
michael@0 | 140 | # error "Don't know how to read/write to the thread state via the mcontext_t." |
michael@0 | 141 | #endif |
michael@0 | 142 | |
michael@0 | 143 | // For platforms where the signal/exception handler runs on the same |
michael@0 | 144 | // thread/stack as the victim (Unix and Windows), we can use TLS to find any |
michael@0 | 145 | // currently executing asm.js code. |
michael@0 | 146 | #if !defined(XP_MACOSX) |
michael@0 | 147 | static AsmJSActivation * |
michael@0 | 148 | InnermostAsmJSActivation() |
michael@0 | 149 | { |
michael@0 | 150 | PerThreadData *threadData = TlsPerThreadData.get(); |
michael@0 | 151 | if (!threadData) |
michael@0 | 152 | return nullptr; |
michael@0 | 153 | |
michael@0 | 154 | return threadData->asmJSActivationStackFromOwnerThread(); |
michael@0 | 155 | } |
michael@0 | 156 | |
michael@0 | 157 | static JSRuntime * |
michael@0 | 158 | RuntimeForCurrentThread() |
michael@0 | 159 | { |
michael@0 | 160 | PerThreadData *threadData = TlsPerThreadData.get(); |
michael@0 | 161 | if (!threadData) |
michael@0 | 162 | return nullptr; |
michael@0 | 163 | |
michael@0 | 164 | return threadData->runtimeIfOnOwnerThread(); |
michael@0 | 165 | } |
michael@0 | 166 | #endif // !defined(XP_MACOSX) |
michael@0 | 167 | |
michael@0 | 168 | // Crashing inside the signal handler can cause the handler to be recursively |
michael@0 | 169 | // invoked, eventually blowing the stack without actually showing a crash |
michael@0 | 170 | // report dialog via Breakpad. To guard against this we watch for such |
michael@0 | 171 | // recursion and fall through to the next handler immediately rather than |
michael@0 | 172 | // trying to handle it. |
michael@0 | 173 | class AutoSetHandlingSignal |
michael@0 | 174 | { |
michael@0 | 175 | JSRuntime *rt; |
michael@0 | 176 | |
michael@0 | 177 | public: |
michael@0 | 178 | AutoSetHandlingSignal(JSRuntime *rt) |
michael@0 | 179 | : rt(rt) |
michael@0 | 180 | { |
michael@0 | 181 | JS_ASSERT(!rt->handlingSignal); |
michael@0 | 182 | rt->handlingSignal = true; |
michael@0 | 183 | } |
michael@0 | 184 | |
michael@0 | 185 | ~AutoSetHandlingSignal() |
michael@0 | 186 | { |
michael@0 | 187 | JS_ASSERT(rt->handlingSignal); |
michael@0 | 188 | rt->handlingSignal = false; |
michael@0 | 189 | } |
michael@0 | 190 | }; |
michael@0 | 191 | |
michael@0 | 192 | #if defined(JS_CODEGEN_X64) |
michael@0 | 193 | template <class T> |
michael@0 | 194 | static void |
michael@0 | 195 | SetXMMRegToNaN(bool isFloat32, T *xmm_reg) |
michael@0 | 196 | { |
michael@0 | 197 | if (isFloat32) { |
michael@0 | 198 | JS_STATIC_ASSERT(sizeof(T) == 4 * sizeof(float)); |
michael@0 | 199 | float *floats = reinterpret_cast<float*>(xmm_reg); |
michael@0 | 200 | floats[0] = GenericNaN(); |
michael@0 | 201 | floats[1] = 0; |
michael@0 | 202 | floats[2] = 0; |
michael@0 | 203 | floats[3] = 0; |
michael@0 | 204 | } else { |
michael@0 | 205 | JS_STATIC_ASSERT(sizeof(T) == 2 * sizeof(double)); |
michael@0 | 206 | double *dbls = reinterpret_cast<double*>(xmm_reg); |
michael@0 | 207 | dbls[0] = GenericNaN(); |
michael@0 | 208 | dbls[1] = 0; |
michael@0 | 209 | } |
michael@0 | 210 | } |
michael@0 | 211 | |
michael@0 | 212 | struct GetHeapAccessOffset |
michael@0 | 213 | { |
michael@0 | 214 | const AsmJSModule &module; |
michael@0 | 215 | explicit GetHeapAccessOffset(const AsmJSModule &module) : module(module) {} |
michael@0 | 216 | uintptr_t operator[](size_t index) const { |
michael@0 | 217 | return module.heapAccess(index).offset(); |
michael@0 | 218 | } |
michael@0 | 219 | }; |
michael@0 | 220 | |
michael@0 | 221 | // Perform a binary search on the projected offsets of the known heap accesses |
michael@0 | 222 | // in the module. |
michael@0 | 223 | static const AsmJSHeapAccess * |
michael@0 | 224 | LookupHeapAccess(const AsmJSModule &module, uint8_t *pc) |
michael@0 | 225 | { |
michael@0 | 226 | JS_ASSERT(module.containsPC(pc)); |
michael@0 | 227 | |
michael@0 | 228 | uintptr_t pcOff = pc - module.codeBase(); |
michael@0 | 229 | |
michael@0 | 230 | size_t match; |
michael@0 | 231 | if (!BinarySearch(GetHeapAccessOffset(module), 0, module.numHeapAccesses(), pcOff, &match)) |
michael@0 | 232 | return nullptr; |
michael@0 | 233 | |
michael@0 | 234 | return &module.heapAccess(match); |
michael@0 | 235 | } |
michael@0 | 236 | #endif |
michael@0 | 237 | |
michael@0 | 238 | #if defined(XP_WIN) |
michael@0 | 239 | # include "jswin.h" |
michael@0 | 240 | #else |
michael@0 | 241 | # include <signal.h> |
michael@0 | 242 | # include <sys/mman.h> |
michael@0 | 243 | #endif |
michael@0 | 244 | |
michael@0 | 245 | #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) |
michael@0 | 246 | # include <sys/ucontext.h> // for ucontext_t, mcontext_t |
michael@0 | 247 | #endif |
michael@0 | 248 | |
michael@0 | 249 | #if defined(JS_CODEGEN_X64) |
michael@0 | 250 | # if defined(__DragonFly__) |
michael@0 | 251 | # include <machine/npx.h> // for union savefpu |
michael@0 | 252 | # elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \ |
michael@0 | 253 | defined(__NetBSD__) || defined(__OpenBSD__) |
michael@0 | 254 | # include <machine/fpu.h> // for struct savefpu/fxsave64 |
michael@0 | 255 | # endif |
michael@0 | 256 | #endif |
michael@0 | 257 | |
michael@0 | 258 | #if defined(ANDROID) |
michael@0 | 259 | // Not all versions of the Android NDK define ucontext_t or mcontext_t. |
michael@0 | 260 | // Detect this and provide custom but compatible definitions. Note that these |
michael@0 | 261 | // follow the GLibc naming convention to access register values from |
michael@0 | 262 | // mcontext_t. |
michael@0 | 263 | // |
michael@0 | 264 | // See: https://chromiumcodereview.appspot.com/10829122/ |
michael@0 | 265 | // See: http://code.google.com/p/android/issues/detail?id=34784 |
michael@0 | 266 | # if !defined(__BIONIC_HAVE_UCONTEXT_T) |
michael@0 | 267 | # if defined(__arm__) |
michael@0 | 268 | |
michael@0 | 269 | // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'. |
michael@0 | 270 | // Old versions of the C library <signal.h> didn't define the type. |
michael@0 | 271 | # if !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT) |
michael@0 | 272 | # include <asm/sigcontext.h> |
michael@0 | 273 | # endif |
michael@0 | 274 | |
michael@0 | 275 | typedef struct sigcontext mcontext_t; |
michael@0 | 276 | |
michael@0 | 277 | typedef struct ucontext { |
michael@0 | 278 | uint32_t uc_flags; |
michael@0 | 279 | struct ucontext* uc_link; |
michael@0 | 280 | stack_t uc_stack; |
michael@0 | 281 | mcontext_t uc_mcontext; |
michael@0 | 282 | // Other fields are not used so don't define them here. |
michael@0 | 283 | } ucontext_t; |
michael@0 | 284 | |
michael@0 | 285 | # elif defined(__i386__) |
michael@0 | 286 | // x86 version for Android. |
michael@0 | 287 | typedef struct { |
michael@0 | 288 | uint32_t gregs[19]; |
michael@0 | 289 | void* fpregs; |
michael@0 | 290 | uint32_t oldmask; |
michael@0 | 291 | uint32_t cr2; |
michael@0 | 292 | } mcontext_t; |
michael@0 | 293 | |
michael@0 | 294 | typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks |
michael@0 | 295 | typedef struct ucontext { |
michael@0 | 296 | uint32_t uc_flags; |
michael@0 | 297 | struct ucontext* uc_link; |
michael@0 | 298 | stack_t uc_stack; |
michael@0 | 299 | mcontext_t uc_mcontext; |
michael@0 | 300 | // Other fields are not used by V8, don't define them here. |
michael@0 | 301 | } ucontext_t; |
michael@0 | 302 | enum { REG_EIP = 14 }; |
michael@0 | 303 | # endif // defined(__i386__) |
michael@0 | 304 | # endif // !defined(__BIONIC_HAVE_UCONTEXT_T) |
michael@0 | 305 | #endif // defined(ANDROID) |
michael@0 | 306 | |
michael@0 | 307 | #if defined(ANDROID) && defined(MOZ_LINKER) |
michael@0 | 308 | // Apparently, on some Android systems, the signal handler is always passed |
michael@0 | 309 | // nullptr as the faulting address. This would cause the asm.js signal handler |
michael@0 | 310 | // to think that a safe out-of-bounds access was a nullptr-deref. This |
michael@0 | 311 | // brokenness is already detected by ElfLoader (enabled by MOZ_LINKER), so |
michael@0 | 312 | // reuse that check to disable asm.js compilation on systems where the signal |
michael@0 | 313 | // handler is broken. |
michael@0 | 314 | extern "C" MFBT_API bool IsSignalHandlingBroken(); |
michael@0 | 315 | #else |
michael@0 | 316 | static bool IsSignalHandlingBroken() { return false; } |
michael@0 | 317 | #endif // defined(MOZ_LINKER) |
michael@0 | 318 | |
michael@0 | 319 | #if !defined(XP_WIN) |
michael@0 | 320 | # define CONTEXT ucontext_t |
michael@0 | 321 | #endif |
michael@0 | 322 | |
michael@0 | 323 | #if defined(JS_CPU_X64) |
michael@0 | 324 | # define PC_sig(p) RIP_sig(p) |
michael@0 | 325 | #elif defined(JS_CPU_X86) |
michael@0 | 326 | # define PC_sig(p) EIP_sig(p) |
michael@0 | 327 | #elif defined(JS_CPU_ARM) |
michael@0 | 328 | # define PC_sig(p) R15_sig(p) |
michael@0 | 329 | #endif |
michael@0 | 330 | |
michael@0 | 331 | static bool |
michael@0 | 332 | HandleSimulatorInterrupt(JSRuntime *rt, AsmJSActivation *activation, void *faultingAddress) |
michael@0 | 333 | { |
michael@0 | 334 | // If the ARM simulator is enabled, the pc is in the simulator C++ code and |
michael@0 | 335 | // not in the generated code, so we check the simulator's pc manually. Also |
michael@0 | 336 | // note that we can't simply use simulator->set_pc() here because the |
michael@0 | 337 | // simulator could be in the middle of an instruction. On ARM, the signal |
michael@0 | 338 | // handlers are currently only used for Odin code, see bug 964258. |
michael@0 | 339 | |
michael@0 | 340 | #ifdef JS_ARM_SIMULATOR |
michael@0 | 341 | const AsmJSModule &module = activation->module(); |
michael@0 | 342 | if (module.containsPC((void *)rt->mainThread.simulator()->get_pc()) && |
michael@0 | 343 | module.containsPC(faultingAddress)) |
michael@0 | 344 | { |
michael@0 | 345 | activation->setInterrupted(nullptr); |
michael@0 | 346 | int32_t nextpc = int32_t(module.interruptExit()); |
michael@0 | 347 | rt->mainThread.simulator()->set_resume_pc(nextpc); |
michael@0 | 348 | return true; |
michael@0 | 349 | } |
michael@0 | 350 | #endif |
michael@0 | 351 | return false; |
michael@0 | 352 | } |
michael@0 | 353 | |
michael@0 | 354 | #if !defined(XP_MACOSX) |
michael@0 | 355 | static uint8_t ** |
michael@0 | 356 | ContextToPC(CONTEXT *context) |
michael@0 | 357 | { |
michael@0 | 358 | JS_STATIC_ASSERT(sizeof(PC_sig(context)) == sizeof(void*)); |
michael@0 | 359 | return reinterpret_cast<uint8_t**>(&PC_sig(context)); |
michael@0 | 360 | } |
michael@0 | 361 | |
michael@0 | 362 | # if defined(JS_CODEGEN_X64) |
michael@0 | 363 | static void |
michael@0 | 364 | SetRegisterToCoercedUndefined(CONTEXT *context, bool isFloat32, AnyRegister reg) |
michael@0 | 365 | { |
michael@0 | 366 | if (reg.isFloat()) { |
michael@0 | 367 | switch (reg.fpu().code()) { |
michael@0 | 368 | case JSC::X86Registers::xmm0: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 0)); break; |
michael@0 | 369 | case JSC::X86Registers::xmm1: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 1)); break; |
michael@0 | 370 | case JSC::X86Registers::xmm2: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 2)); break; |
michael@0 | 371 | case JSC::X86Registers::xmm3: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 3)); break; |
michael@0 | 372 | case JSC::X86Registers::xmm4: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 4)); break; |
michael@0 | 373 | case JSC::X86Registers::xmm5: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 5)); break; |
michael@0 | 374 | case JSC::X86Registers::xmm6: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 6)); break; |
michael@0 | 375 | case JSC::X86Registers::xmm7: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 7)); break; |
michael@0 | 376 | case JSC::X86Registers::xmm8: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 8)); break; |
michael@0 | 377 | case JSC::X86Registers::xmm9: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 9)); break; |
michael@0 | 378 | case JSC::X86Registers::xmm10: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 10)); break; |
michael@0 | 379 | case JSC::X86Registers::xmm11: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 11)); break; |
michael@0 | 380 | case JSC::X86Registers::xmm12: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 12)); break; |
michael@0 | 381 | case JSC::X86Registers::xmm13: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 13)); break; |
michael@0 | 382 | case JSC::X86Registers::xmm14: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 14)); break; |
michael@0 | 383 | case JSC::X86Registers::xmm15: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 15)); break; |
michael@0 | 384 | default: MOZ_CRASH(); |
michael@0 | 385 | } |
michael@0 | 386 | } else { |
michael@0 | 387 | switch (reg.gpr().code()) { |
michael@0 | 388 | case JSC::X86Registers::eax: RAX_sig(context) = 0; break; |
michael@0 | 389 | case JSC::X86Registers::ecx: RCX_sig(context) = 0; break; |
michael@0 | 390 | case JSC::X86Registers::edx: RDX_sig(context) = 0; break; |
michael@0 | 391 | case JSC::X86Registers::ebx: RBX_sig(context) = 0; break; |
michael@0 | 392 | case JSC::X86Registers::esp: RSP_sig(context) = 0; break; |
michael@0 | 393 | case JSC::X86Registers::ebp: RBP_sig(context) = 0; break; |
michael@0 | 394 | case JSC::X86Registers::esi: RSI_sig(context) = 0; break; |
michael@0 | 395 | case JSC::X86Registers::edi: RDI_sig(context) = 0; break; |
michael@0 | 396 | case JSC::X86Registers::r8: R8_sig(context) = 0; break; |
michael@0 | 397 | case JSC::X86Registers::r9: R9_sig(context) = 0; break; |
michael@0 | 398 | case JSC::X86Registers::r10: R10_sig(context) = 0; break; |
michael@0 | 399 | case JSC::X86Registers::r11: R11_sig(context) = 0; break; |
michael@0 | 400 | case JSC::X86Registers::r12: R12_sig(context) = 0; break; |
michael@0 | 401 | case JSC::X86Registers::r13: R13_sig(context) = 0; break; |
michael@0 | 402 | case JSC::X86Registers::r14: R14_sig(context) = 0; break; |
michael@0 | 403 | case JSC::X86Registers::r15: R15_sig(context) = 0; break; |
michael@0 | 404 | default: MOZ_CRASH(); |
michael@0 | 405 | } |
michael@0 | 406 | } |
michael@0 | 407 | } |
michael@0 | 408 | # endif // JS_CODEGEN_X64 |
michael@0 | 409 | #endif // !XP_MACOSX |
michael@0 | 410 | |
michael@0 | 411 | #if defined(XP_WIN) |
michael@0 | 412 | |
michael@0 | 413 | static bool |
michael@0 | 414 | HandleException(PEXCEPTION_POINTERS exception) |
michael@0 | 415 | { |
michael@0 | 416 | EXCEPTION_RECORD *record = exception->ExceptionRecord; |
michael@0 | 417 | CONTEXT *context = exception->ContextRecord; |
michael@0 | 418 | |
michael@0 | 419 | if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION) |
michael@0 | 420 | return false; |
michael@0 | 421 | |
michael@0 | 422 | uint8_t **ppc = ContextToPC(context); |
michael@0 | 423 | uint8_t *pc = *ppc; |
michael@0 | 424 | JS_ASSERT(pc == record->ExceptionAddress); |
michael@0 | 425 | |
michael@0 | 426 | if (record->NumberParameters < 2) |
michael@0 | 427 | return false; |
michael@0 | 428 | |
michael@0 | 429 | void *faultingAddress = (void*)record->ExceptionInformation[1]; |
michael@0 | 430 | |
michael@0 | 431 | JSRuntime *rt = RuntimeForCurrentThread(); |
michael@0 | 432 | |
michael@0 | 433 | // Don't allow recursive handling of signals, see AutoSetHandlingSignal. |
michael@0 | 434 | if (!rt || rt->handlingSignal) |
michael@0 | 435 | return false; |
michael@0 | 436 | AutoSetHandlingSignal handling(rt); |
michael@0 | 437 | |
michael@0 | 438 | if (rt->jitRuntime() && rt->jitRuntime()->handleAccessViolation(rt, faultingAddress)) |
michael@0 | 439 | return true; |
michael@0 | 440 | |
michael@0 | 441 | AsmJSActivation *activation = InnermostAsmJSActivation(); |
michael@0 | 442 | if (!activation) |
michael@0 | 443 | return false; |
michael@0 | 444 | |
michael@0 | 445 | const AsmJSModule &module = activation->module(); |
michael@0 | 446 | if (!module.containsPC(pc)) |
michael@0 | 447 | return false; |
michael@0 | 448 | |
michael@0 | 449 | // If we faulted trying to execute code in 'module', this must be an |
michael@0 | 450 | // interrupt callback (see RequestInterruptForAsmJSCode). Redirect |
michael@0 | 451 | // execution to a trampoline which will call js::HandleExecutionInterrupt. |
michael@0 | 452 | // The trampoline will jump to activation->resumePC if execution isn't |
michael@0 | 453 | // interrupted. |
michael@0 | 454 | if (module.containsPC(faultingAddress)) { |
michael@0 | 455 | activation->setInterrupted(pc); |
michael@0 | 456 | *ppc = module.interruptExit(); |
michael@0 | 457 | |
michael@0 | 458 | JSRuntime::AutoLockForInterrupt lock(rt); |
michael@0 | 459 | module.unprotectCode(rt); |
michael@0 | 460 | return true; |
michael@0 | 461 | } |
michael@0 | 462 | |
michael@0 | 463 | # if defined(JS_CODEGEN_X64) |
michael@0 | 464 | // These checks aren't necessary, but, since we can, check anyway to make |
michael@0 | 465 | // sure we aren't covering up a real bug. |
michael@0 | 466 | if (!module.maybeHeap() || |
michael@0 | 467 | faultingAddress < module.maybeHeap() || |
michael@0 | 468 | faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize) |
michael@0 | 469 | { |
michael@0 | 470 | return false; |
michael@0 | 471 | } |
michael@0 | 472 | |
michael@0 | 473 | const AsmJSHeapAccess *heapAccess = LookupHeapAccess(module, pc); |
michael@0 | 474 | if (!heapAccess) |
michael@0 | 475 | return false; |
michael@0 | 476 | |
michael@0 | 477 | // Also not necessary, but, since we can, do. |
michael@0 | 478 | if (heapAccess->isLoad() != !record->ExceptionInformation[0]) |
michael@0 | 479 | return false; |
michael@0 | 480 | |
michael@0 | 481 | // We now know that this is an out-of-bounds access made by an asm.js |
michael@0 | 482 | // load/store that we should handle. If this is a load, assign the |
michael@0 | 483 | // JS-defined result value to the destination register (ToInt32(undefined) |
michael@0 | 484 | // or ToNumber(undefined), determined by the type of the destination |
michael@0 | 485 | // register) and set the PC to the next op. Upon return from the handler, |
michael@0 | 486 | // execution will resume at this next PC. |
michael@0 | 487 | if (heapAccess->isLoad()) |
michael@0 | 488 | SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg()); |
michael@0 | 489 | *ppc += heapAccess->opLength(); |
michael@0 | 490 | return true; |
michael@0 | 491 | # else |
michael@0 | 492 | return false; |
michael@0 | 493 | # endif |
michael@0 | 494 | } |
michael@0 | 495 | |
michael@0 | 496 | static LONG WINAPI |
michael@0 | 497 | AsmJSExceptionHandler(LPEXCEPTION_POINTERS exception) |
michael@0 | 498 | { |
michael@0 | 499 | if (HandleException(exception)) |
michael@0 | 500 | return EXCEPTION_CONTINUE_EXECUTION; |
michael@0 | 501 | |
michael@0 | 502 | // No need to worry about calling other handlers, the OS does this for us. |
michael@0 | 503 | return EXCEPTION_CONTINUE_SEARCH; |
michael@0 | 504 | } |
michael@0 | 505 | |
michael@0 | 506 | #elif defined(XP_MACOSX) |
michael@0 | 507 | # include <mach/exc.h> |
michael@0 | 508 | |
michael@0 | 509 | static uint8_t ** |
michael@0 | 510 | ContextToPC(x86_thread_state_t &state) |
michael@0 | 511 | { |
michael@0 | 512 | # if defined(JS_CODEGEN_X64) |
michael@0 | 513 | JS_STATIC_ASSERT(sizeof(state.uts.ts64.__rip) == sizeof(void*)); |
michael@0 | 514 | return reinterpret_cast<uint8_t**>(&state.uts.ts64.__rip); |
michael@0 | 515 | # else |
michael@0 | 516 | JS_STATIC_ASSERT(sizeof(state.uts.ts32.__eip) == sizeof(void*)); |
michael@0 | 517 | return reinterpret_cast<uint8_t**>(&state.uts.ts32.__eip); |
michael@0 | 518 | # endif |
michael@0 | 519 | } |
michael@0 | 520 | |
michael@0 | 521 | # if defined(JS_CODEGEN_X64) |
michael@0 | 522 | static bool |
michael@0 | 523 | SetRegisterToCoercedUndefined(mach_port_t rtThread, x86_thread_state64_t &state, |
michael@0 | 524 | const AsmJSHeapAccess &heapAccess) |
michael@0 | 525 | { |
michael@0 | 526 | if (heapAccess.loadedReg().isFloat()) { |
michael@0 | 527 | kern_return_t kret; |
michael@0 | 528 | |
michael@0 | 529 | x86_float_state64_t fstate; |
michael@0 | 530 | unsigned int count = x86_FLOAT_STATE64_COUNT; |
michael@0 | 531 | kret = thread_get_state(rtThread, x86_FLOAT_STATE64, (thread_state_t) &fstate, &count); |
michael@0 | 532 | if (kret != KERN_SUCCESS) |
michael@0 | 533 | return false; |
michael@0 | 534 | |
michael@0 | 535 | bool f32 = heapAccess.isFloat32Load(); |
michael@0 | 536 | switch (heapAccess.loadedReg().fpu().code()) { |
michael@0 | 537 | case JSC::X86Registers::xmm0: SetXMMRegToNaN(f32, &fstate.__fpu_xmm0); break; |
michael@0 | 538 | case JSC::X86Registers::xmm1: SetXMMRegToNaN(f32, &fstate.__fpu_xmm1); break; |
michael@0 | 539 | case JSC::X86Registers::xmm2: SetXMMRegToNaN(f32, &fstate.__fpu_xmm2); break; |
michael@0 | 540 | case JSC::X86Registers::xmm3: SetXMMRegToNaN(f32, &fstate.__fpu_xmm3); break; |
michael@0 | 541 | case JSC::X86Registers::xmm4: SetXMMRegToNaN(f32, &fstate.__fpu_xmm4); break; |
michael@0 | 542 | case JSC::X86Registers::xmm5: SetXMMRegToNaN(f32, &fstate.__fpu_xmm5); break; |
michael@0 | 543 | case JSC::X86Registers::xmm6: SetXMMRegToNaN(f32, &fstate.__fpu_xmm6); break; |
michael@0 | 544 | case JSC::X86Registers::xmm7: SetXMMRegToNaN(f32, &fstate.__fpu_xmm7); break; |
michael@0 | 545 | case JSC::X86Registers::xmm8: SetXMMRegToNaN(f32, &fstate.__fpu_xmm8); break; |
michael@0 | 546 | case JSC::X86Registers::xmm9: SetXMMRegToNaN(f32, &fstate.__fpu_xmm9); break; |
michael@0 | 547 | case JSC::X86Registers::xmm10: SetXMMRegToNaN(f32, &fstate.__fpu_xmm10); break; |
michael@0 | 548 | case JSC::X86Registers::xmm11: SetXMMRegToNaN(f32, &fstate.__fpu_xmm11); break; |
michael@0 | 549 | case JSC::X86Registers::xmm12: SetXMMRegToNaN(f32, &fstate.__fpu_xmm12); break; |
michael@0 | 550 | case JSC::X86Registers::xmm13: SetXMMRegToNaN(f32, &fstate.__fpu_xmm13); break; |
michael@0 | 551 | case JSC::X86Registers::xmm14: SetXMMRegToNaN(f32, &fstate.__fpu_xmm14); break; |
michael@0 | 552 | case JSC::X86Registers::xmm15: SetXMMRegToNaN(f32, &fstate.__fpu_xmm15); break; |
michael@0 | 553 | default: MOZ_CRASH(); |
michael@0 | 554 | } |
michael@0 | 555 | |
michael@0 | 556 | kret = thread_set_state(rtThread, x86_FLOAT_STATE64, (thread_state_t)&fstate, x86_FLOAT_STATE64_COUNT); |
michael@0 | 557 | if (kret != KERN_SUCCESS) |
michael@0 | 558 | return false; |
michael@0 | 559 | } else { |
michael@0 | 560 | switch (heapAccess.loadedReg().gpr().code()) { |
michael@0 | 561 | case JSC::X86Registers::eax: state.__rax = 0; break; |
michael@0 | 562 | case JSC::X86Registers::ecx: state.__rcx = 0; break; |
michael@0 | 563 | case JSC::X86Registers::edx: state.__rdx = 0; break; |
michael@0 | 564 | case JSC::X86Registers::ebx: state.__rbx = 0; break; |
michael@0 | 565 | case JSC::X86Registers::esp: state.__rsp = 0; break; |
michael@0 | 566 | case JSC::X86Registers::ebp: state.__rbp = 0; break; |
michael@0 | 567 | case JSC::X86Registers::esi: state.__rsi = 0; break; |
michael@0 | 568 | case JSC::X86Registers::edi: state.__rdi = 0; break; |
michael@0 | 569 | case JSC::X86Registers::r8: state.__r8 = 0; break; |
michael@0 | 570 | case JSC::X86Registers::r9: state.__r9 = 0; break; |
michael@0 | 571 | case JSC::X86Registers::r10: state.__r10 = 0; break; |
michael@0 | 572 | case JSC::X86Registers::r11: state.__r11 = 0; break; |
michael@0 | 573 | case JSC::X86Registers::r12: state.__r12 = 0; break; |
michael@0 | 574 | case JSC::X86Registers::r13: state.__r13 = 0; break; |
michael@0 | 575 | case JSC::X86Registers::r14: state.__r14 = 0; break; |
michael@0 | 576 | case JSC::X86Registers::r15: state.__r15 = 0; break; |
michael@0 | 577 | default: MOZ_CRASH(); |
michael@0 | 578 | } |
michael@0 | 579 | } |
michael@0 | 580 | return true; |
michael@0 | 581 | } |
michael@0 | 582 | # endif |
michael@0 | 583 | |
michael@0 | 584 | // This definition was generated by mig (the Mach Interface Generator) for the |
michael@0 | 585 | // routine 'exception_raise' (exc.defs). |
michael@0 | 586 | #pragma pack(4) |
michael@0 | 587 | typedef struct { |
michael@0 | 588 | mach_msg_header_t Head; |
michael@0 | 589 | /* start of the kernel processed data */ |
michael@0 | 590 | mach_msg_body_t msgh_body; |
michael@0 | 591 | mach_msg_port_descriptor_t thread; |
michael@0 | 592 | mach_msg_port_descriptor_t task; |
michael@0 | 593 | /* end of the kernel processed data */ |
michael@0 | 594 | NDR_record_t NDR; |
michael@0 | 595 | exception_type_t exception; |
michael@0 | 596 | mach_msg_type_number_t codeCnt; |
michael@0 | 597 | int64_t code[2]; |
michael@0 | 598 | } Request__mach_exception_raise_t; |
michael@0 | 599 | #pragma pack() |
michael@0 | 600 | |
michael@0 | 601 | // The full Mach message also includes a trailer. |
michael@0 | 602 | struct ExceptionRequest |
michael@0 | 603 | { |
michael@0 | 604 | Request__mach_exception_raise_t body; |
michael@0 | 605 | mach_msg_trailer_t trailer; |
michael@0 | 606 | }; |
michael@0 | 607 | |
michael@0 | 608 | static bool |
michael@0 | 609 | HandleMachException(JSRuntime *rt, const ExceptionRequest &request) |
michael@0 | 610 | { |
michael@0 | 611 | // Don't allow recursive handling of signals, see AutoSetHandlingSignal. |
michael@0 | 612 | if (rt->handlingSignal) |
michael@0 | 613 | return false; |
michael@0 | 614 | AutoSetHandlingSignal handling(rt); |
michael@0 | 615 | |
michael@0 | 616 | // Get the port of the JSRuntime's thread from the message. |
michael@0 | 617 | mach_port_t rtThread = request.body.thread.name; |
michael@0 | 618 | |
michael@0 | 619 | // Read out the JSRuntime thread's register state. |
michael@0 | 620 | x86_thread_state_t state; |
michael@0 | 621 | unsigned int count = x86_THREAD_STATE_COUNT; |
michael@0 | 622 | kern_return_t kret; |
michael@0 | 623 | kret = thread_get_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, &count); |
michael@0 | 624 | if (kret != KERN_SUCCESS) |
michael@0 | 625 | return false; |
michael@0 | 626 | |
michael@0 | 627 | uint8_t **ppc = ContextToPC(state); |
michael@0 | 628 | uint8_t *pc = *ppc; |
michael@0 | 629 | |
michael@0 | 630 | if (request.body.exception != EXC_BAD_ACCESS || request.body.codeCnt != 2) |
michael@0 | 631 | return false; |
michael@0 | 632 | |
michael@0 | 633 | void *faultingAddress = (void*)request.body.code[1]; |
michael@0 | 634 | |
michael@0 | 635 | if (rt->jitRuntime() && rt->jitRuntime()->handleAccessViolation(rt, faultingAddress)) |
michael@0 | 636 | return true; |
michael@0 | 637 | |
michael@0 | 638 | AsmJSActivation *activation = rt->mainThread.asmJSActivationStackFromAnyThread(); |
michael@0 | 639 | if (!activation) |
michael@0 | 640 | return false; |
michael@0 | 641 | |
michael@0 | 642 | const AsmJSModule &module = activation->module(); |
michael@0 | 643 | if (HandleSimulatorInterrupt(rt, activation, faultingAddress)) { |
michael@0 | 644 | JSRuntime::AutoLockForInterrupt lock(rt); |
michael@0 | 645 | module.unprotectCode(rt); |
michael@0 | 646 | return true; |
michael@0 | 647 | } |
michael@0 | 648 | |
michael@0 | 649 | if (!module.containsPC(pc)) |
michael@0 | 650 | return false; |
michael@0 | 651 | |
michael@0 | 652 | // If we faulted trying to execute code in 'module', this must be an |
michael@0 | 653 | // interrupt callback (see RequestInterruptForAsmJSCode). Redirect |
michael@0 | 654 | // execution to a trampoline which will call js::HandleExecutionInterrupt. |
michael@0 | 655 | // The trampoline will jump to activation->resumePC if execution isn't |
michael@0 | 656 | // interrupted. |
michael@0 | 657 | if (module.containsPC(faultingAddress)) { |
michael@0 | 658 | activation->setInterrupted(pc); |
michael@0 | 659 | *ppc = module.interruptExit(); |
michael@0 | 660 | |
michael@0 | 661 | JSRuntime::AutoLockForInterrupt lock(rt); |
michael@0 | 662 | module.unprotectCode(rt); |
michael@0 | 663 | |
michael@0 | 664 | // Update the thread state with the new pc. |
michael@0 | 665 | kret = thread_set_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, x86_THREAD_STATE_COUNT); |
michael@0 | 666 | return kret == KERN_SUCCESS; |
michael@0 | 667 | } |
michael@0 | 668 | |
michael@0 | 669 | # if defined(JS_CODEGEN_X64) |
michael@0 | 670 | // These checks aren't necessary, but, since we can, check anyway to make |
michael@0 | 671 | // sure we aren't covering up a real bug. |
michael@0 | 672 | if (!module.maybeHeap() || |
michael@0 | 673 | faultingAddress < module.maybeHeap() || |
michael@0 | 674 | faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize) |
michael@0 | 675 | { |
michael@0 | 676 | return false; |
michael@0 | 677 | } |
michael@0 | 678 | |
michael@0 | 679 | const AsmJSHeapAccess *heapAccess = LookupHeapAccess(module, pc); |
michael@0 | 680 | if (!heapAccess) |
michael@0 | 681 | return false; |
michael@0 | 682 | |
michael@0 | 683 | // We now know that this is an out-of-bounds access made by an asm.js |
michael@0 | 684 | // load/store that we should handle. If this is a load, assign the |
michael@0 | 685 | // JS-defined result value to the destination register (ToInt32(undefined) |
michael@0 | 686 | // or ToNumber(undefined), determined by the type of the destination |
michael@0 | 687 | // register) and set the PC to the next op. Upon return from the handler, |
michael@0 | 688 | // execution will resume at this next PC. |
michael@0 | 689 | if (heapAccess->isLoad()) { |
michael@0 | 690 | if (!SetRegisterToCoercedUndefined(rtThread, state.uts.ts64, *heapAccess)) |
michael@0 | 691 | return false; |
michael@0 | 692 | } |
michael@0 | 693 | *ppc += heapAccess->opLength(); |
michael@0 | 694 | |
michael@0 | 695 | // Update the thread state with the new pc. |
michael@0 | 696 | kret = thread_set_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, x86_THREAD_STATE_COUNT); |
michael@0 | 697 | if (kret != KERN_SUCCESS) |
michael@0 | 698 | return false; |
michael@0 | 699 | |
michael@0 | 700 | return true; |
michael@0 | 701 | # else |
michael@0 | 702 | return false; |
michael@0 | 703 | # endif |
michael@0 | 704 | } |
michael@0 | 705 | |
michael@0 | 706 | // Taken from mach_exc in /usr/include/mach/mach_exc.defs. |
michael@0 | 707 | static const mach_msg_id_t sExceptionId = 2405; |
michael@0 | 708 | |
michael@0 | 709 | // The choice of id here is arbitrary, the only constraint is that sQuitId != sExceptionId. |
michael@0 | 710 | static const mach_msg_id_t sQuitId = 42; |
michael@0 | 711 | |
michael@0 | 712 | void |
michael@0 | 713 | AsmJSMachExceptionHandlerThread(void *threadArg) |
michael@0 | 714 | { |
michael@0 | 715 | JSRuntime *rt = reinterpret_cast<JSRuntime*>(threadArg); |
michael@0 | 716 | mach_port_t port = rt->asmJSMachExceptionHandler.port(); |
michael@0 | 717 | kern_return_t kret; |
michael@0 | 718 | |
michael@0 | 719 | while(true) { |
michael@0 | 720 | ExceptionRequest request; |
michael@0 | 721 | kret = mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request), |
michael@0 | 722 | port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); |
michael@0 | 723 | |
michael@0 | 724 | // If we fail even receiving the message, we can't even send a reply! |
michael@0 | 725 | // Rather than hanging the faulting thread (hanging the browser), crash. |
michael@0 | 726 | if (kret != KERN_SUCCESS) { |
michael@0 | 727 | fprintf(stderr, "AsmJSMachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret); |
michael@0 | 728 | MOZ_CRASH(); |
michael@0 | 729 | } |
michael@0 | 730 | |
michael@0 | 731 | // There are only two messages we should be receiving: an exception |
michael@0 | 732 | // message that occurs when the runtime's thread faults and the quit |
michael@0 | 733 | // message sent when the runtime is shutting down. |
michael@0 | 734 | if (request.body.Head.msgh_id == sQuitId) |
michael@0 | 735 | break; |
michael@0 | 736 | if (request.body.Head.msgh_id != sExceptionId) { |
michael@0 | 737 | fprintf(stderr, "Unexpected msg header id %d\n", (int)request.body.Head.msgh_bits); |
michael@0 | 738 | MOZ_CRASH(); |
michael@0 | 739 | } |
michael@0 | 740 | |
michael@0 | 741 | // Some thread just commited an EXC_BAD_ACCESS and has been suspended by |
michael@0 | 742 | // the kernel. The kernel is waiting for us to reply with instructions. |
michael@0 | 743 | // Our default is the "not handled" reply (by setting the RetCode field |
michael@0 | 744 | // of the reply to KERN_FAILURE) which tells the kernel to continue |
michael@0 | 745 | // searching at the process and system level. If this is an asm.js |
michael@0 | 746 | // expected exception, we handle it and return KERN_SUCCESS. |
michael@0 | 747 | bool handled = HandleMachException(rt, request); |
michael@0 | 748 | kern_return_t replyCode = handled ? KERN_SUCCESS : KERN_FAILURE; |
michael@0 | 749 | |
michael@0 | 750 | // This magic incantation to send a reply back to the kernel was derived |
michael@0 | 751 | // from the exc_server generated by 'mig -v /usr/include/mach/mach_exc.defs'. |
michael@0 | 752 | __Reply__exception_raise_t reply; |
michael@0 | 753 | reply.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0); |
michael@0 | 754 | reply.Head.msgh_size = sizeof(reply); |
michael@0 | 755 | reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port; |
michael@0 | 756 | reply.Head.msgh_local_port = MACH_PORT_NULL; |
michael@0 | 757 | reply.Head.msgh_id = request.body.Head.msgh_id + 100; |
michael@0 | 758 | reply.NDR = NDR_record; |
michael@0 | 759 | reply.RetCode = replyCode; |
michael@0 | 760 | mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL, |
michael@0 | 761 | MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); |
michael@0 | 762 | } |
michael@0 | 763 | } |
michael@0 | 764 | |
michael@0 | 765 | AsmJSMachExceptionHandler::AsmJSMachExceptionHandler() |
michael@0 | 766 | : installed_(false), |
michael@0 | 767 | thread_(nullptr), |
michael@0 | 768 | port_(MACH_PORT_NULL) |
michael@0 | 769 | {} |
michael@0 | 770 | |
michael@0 | 771 | void |
michael@0 | 772 | AsmJSMachExceptionHandler::uninstall() |
michael@0 | 773 | { |
michael@0 | 774 | #ifdef JS_THREADSAFE |
michael@0 | 775 | if (installed_) { |
michael@0 | 776 | thread_port_t thread = mach_thread_self(); |
michael@0 | 777 | kern_return_t kret = thread_set_exception_ports(thread, |
michael@0 | 778 | EXC_MASK_BAD_ACCESS, |
michael@0 | 779 | MACH_PORT_NULL, |
michael@0 | 780 | EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, |
michael@0 | 781 | THREAD_STATE_NONE); |
michael@0 | 782 | mach_port_deallocate(mach_task_self(), thread); |
michael@0 | 783 | if (kret != KERN_SUCCESS) |
michael@0 | 784 | MOZ_CRASH(); |
michael@0 | 785 | installed_ = false; |
michael@0 | 786 | } |
michael@0 | 787 | if (thread_ != nullptr) { |
michael@0 | 788 | // Break the handler thread out of the mach_msg loop. |
michael@0 | 789 | mach_msg_header_t msg; |
michael@0 | 790 | msg.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0); |
michael@0 | 791 | msg.msgh_size = sizeof(msg); |
michael@0 | 792 | msg.msgh_remote_port = port_; |
michael@0 | 793 | msg.msgh_local_port = MACH_PORT_NULL; |
michael@0 | 794 | msg.msgh_reserved = 0; |
michael@0 | 795 | msg.msgh_id = sQuitId; |
michael@0 | 796 | kern_return_t kret = mach_msg(&msg, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL, |
michael@0 | 797 | MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); |
michael@0 | 798 | if (kret != KERN_SUCCESS) { |
michael@0 | 799 | fprintf(stderr, "AsmJSMachExceptionHandler: failed to send quit message: %d\n", (int)kret); |
michael@0 | 800 | MOZ_CRASH(); |
michael@0 | 801 | } |
michael@0 | 802 | |
michael@0 | 803 | // Wait for the handler thread to complete before deallocating the port. |
michael@0 | 804 | PR_JoinThread(thread_); |
michael@0 | 805 | thread_ = nullptr; |
michael@0 | 806 | } |
michael@0 | 807 | if (port_ != MACH_PORT_NULL) { |
michael@0 | 808 | DebugOnly<kern_return_t> kret = mach_port_destroy(mach_task_self(), port_); |
michael@0 | 809 | JS_ASSERT(kret == KERN_SUCCESS); |
michael@0 | 810 | port_ = MACH_PORT_NULL; |
michael@0 | 811 | } |
michael@0 | 812 | #else |
michael@0 | 813 | JS_ASSERT(!installed_); |
michael@0 | 814 | #endif |
michael@0 | 815 | } |
michael@0 | 816 | |
michael@0 | 817 | bool |
michael@0 | 818 | AsmJSMachExceptionHandler::install(JSRuntime *rt) |
michael@0 | 819 | { |
michael@0 | 820 | #ifdef JS_THREADSAFE |
michael@0 | 821 | JS_ASSERT(!installed()); |
michael@0 | 822 | kern_return_t kret; |
michael@0 | 823 | mach_port_t thread; |
michael@0 | 824 | |
michael@0 | 825 | // Get a port which can send and receive data. |
michael@0 | 826 | kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port_); |
michael@0 | 827 | if (kret != KERN_SUCCESS) |
michael@0 | 828 | goto error; |
michael@0 | 829 | kret = mach_port_insert_right(mach_task_self(), port_, port_, MACH_MSG_TYPE_MAKE_SEND); |
michael@0 | 830 | if (kret != KERN_SUCCESS) |
michael@0 | 831 | goto error; |
michael@0 | 832 | |
michael@0 | 833 | // Create a thread to block on reading port_. |
michael@0 | 834 | thread_ = PR_CreateThread(PR_USER_THREAD, AsmJSMachExceptionHandlerThread, rt, |
michael@0 | 835 | PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 0); |
michael@0 | 836 | if (!thread_) |
michael@0 | 837 | goto error; |
michael@0 | 838 | |
michael@0 | 839 | // Direct exceptions on this thread to port_ (and thus our handler thread). |
michael@0 | 840 | // Note: we are totally clobbering any existing *thread* exception ports and |
michael@0 | 841 | // not even attempting to forward. Breakpad and gdb both use the *process* |
michael@0 | 842 | // exception ports which are only called if the thread doesn't handle the |
michael@0 | 843 | // exception, so we should be fine. |
michael@0 | 844 | thread = mach_thread_self(); |
michael@0 | 845 | kret = thread_set_exception_ports(thread, |
michael@0 | 846 | EXC_MASK_BAD_ACCESS, |
michael@0 | 847 | port_, |
michael@0 | 848 | EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, |
michael@0 | 849 | THREAD_STATE_NONE); |
michael@0 | 850 | mach_port_deallocate(mach_task_self(), thread); |
michael@0 | 851 | if (kret != KERN_SUCCESS) |
michael@0 | 852 | goto error; |
michael@0 | 853 | |
michael@0 | 854 | installed_ = true; |
michael@0 | 855 | return true; |
michael@0 | 856 | |
michael@0 | 857 | error: |
michael@0 | 858 | uninstall(); |
michael@0 | 859 | return false; |
michael@0 | 860 | #else |
michael@0 | 861 | return false; |
michael@0 | 862 | #endif |
michael@0 | 863 | } |
michael@0 | 864 | |
michael@0 | 865 | #else // If not Windows or Mac, assume Unix |
michael@0 | 866 | |
michael@0 | 867 | // Be very cautious and default to not handling; we don't want to accidentally |
michael@0 | 868 | // silence real crashes from real bugs. |
michael@0 | 869 | static bool |
michael@0 | 870 | HandleSignal(int signum, siginfo_t *info, void *ctx) |
michael@0 | 871 | { |
michael@0 | 872 | CONTEXT *context = (CONTEXT *)ctx; |
michael@0 | 873 | uint8_t **ppc = ContextToPC(context); |
michael@0 | 874 | uint8_t *pc = *ppc; |
michael@0 | 875 | |
michael@0 | 876 | void *faultingAddress = info->si_addr; |
michael@0 | 877 | |
michael@0 | 878 | JSRuntime *rt = RuntimeForCurrentThread(); |
michael@0 | 879 | |
michael@0 | 880 | // Don't allow recursive handling of signals, see AutoSetHandlingSignal. |
michael@0 | 881 | if (!rt || rt->handlingSignal) |
michael@0 | 882 | return false; |
michael@0 | 883 | AutoSetHandlingSignal handling(rt); |
michael@0 | 884 | |
michael@0 | 885 | if (rt->jitRuntime() && rt->jitRuntime()->handleAccessViolation(rt, faultingAddress)) |
michael@0 | 886 | return true; |
michael@0 | 887 | |
michael@0 | 888 | AsmJSActivation *activation = InnermostAsmJSActivation(); |
michael@0 | 889 | if (!activation) |
michael@0 | 890 | return false; |
michael@0 | 891 | |
michael@0 | 892 | const AsmJSModule &module = activation->module(); |
michael@0 | 893 | if (HandleSimulatorInterrupt(rt, activation, faultingAddress)) { |
michael@0 | 894 | JSRuntime::AutoLockForInterrupt lock(rt); |
michael@0 | 895 | module.unprotectCode(rt); |
michael@0 | 896 | return true; |
michael@0 | 897 | } |
michael@0 | 898 | |
michael@0 | 899 | if (!module.containsPC(pc)) |
michael@0 | 900 | return false; |
michael@0 | 901 | |
michael@0 | 902 | // If we faulted trying to execute code in 'module', this must be an |
michael@0 | 903 | // interrupt callback (see RequestInterruptForAsmJSCode). Redirect |
michael@0 | 904 | // execution to a trampoline which will call js::HandleExecutionInterrupt. |
michael@0 | 905 | // The trampoline will jump to activation->resumePC if execution isn't |
michael@0 | 906 | // interrupted. |
michael@0 | 907 | if (module.containsPC(faultingAddress)) { |
michael@0 | 908 | activation->setInterrupted(pc); |
michael@0 | 909 | *ppc = module.interruptExit(); |
michael@0 | 910 | |
michael@0 | 911 | JSRuntime::AutoLockForInterrupt lock(rt); |
michael@0 | 912 | module.unprotectCode(rt); |
michael@0 | 913 | return true; |
michael@0 | 914 | } |
michael@0 | 915 | |
michael@0 | 916 | # if defined(JS_CODEGEN_X64) |
michael@0 | 917 | // These checks aren't necessary, but, since we can, check anyway to make |
michael@0 | 918 | // sure we aren't covering up a real bug. |
michael@0 | 919 | if (!module.maybeHeap() || |
michael@0 | 920 | faultingAddress < module.maybeHeap() || |
michael@0 | 921 | faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize) |
michael@0 | 922 | { |
michael@0 | 923 | return false; |
michael@0 | 924 | } |
michael@0 | 925 | |
michael@0 | 926 | const AsmJSHeapAccess *heapAccess = LookupHeapAccess(module, pc); |
michael@0 | 927 | if (!heapAccess) |
michael@0 | 928 | return false; |
michael@0 | 929 | |
michael@0 | 930 | // We now know that this is an out-of-bounds access made by an asm.js |
michael@0 | 931 | // load/store that we should handle. If this is a load, assign the |
michael@0 | 932 | // JS-defined result value to the destination register (ToInt32(undefined) |
michael@0 | 933 | // or ToNumber(undefined), determined by the type of the destination |
michael@0 | 934 | // register) and set the PC to the next op. Upon return from the handler, |
michael@0 | 935 | // execution will resume at this next PC. |
michael@0 | 936 | if (heapAccess->isLoad()) |
michael@0 | 937 | SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg()); |
michael@0 | 938 | *ppc += heapAccess->opLength(); |
michael@0 | 939 | return true; |
michael@0 | 940 | # else |
michael@0 | 941 | return false; |
michael@0 | 942 | # endif |
michael@0 | 943 | } |
michael@0 | 944 | |
michael@0 | 945 | static struct sigaction sPrevHandler; |
michael@0 | 946 | |
michael@0 | 947 | static void |
michael@0 | 948 | AsmJSFaultHandler(int signum, siginfo_t *info, void *context) |
michael@0 | 949 | { |
michael@0 | 950 | if (HandleSignal(signum, info, context)) |
michael@0 | 951 | return; |
michael@0 | 952 | |
michael@0 | 953 | // This signal is not for any asm.js code we expect, so we need to forward |
michael@0 | 954 | // the signal to the next handler. If there is no next handler (SIG_IGN or |
michael@0 | 955 | // SIG_DFL), then it's time to crash. To do this, we set the signal back to |
michael@0 | 956 | // its original disposition and return. This will cause the faulting op to |
michael@0 | 957 | // be re-executed which will crash in the normal way. The advantage of |
michael@0 | 958 | // doing this to calling _exit() is that we remove ourselves from the crash |
michael@0 | 959 | // stack which improves crash reports. If there is a next handler, call it. |
michael@0 | 960 | // It will either crash synchronously, fix up the instruction so that |
michael@0 | 961 | // execution can continue and return, or trigger a crash by returning the |
michael@0 | 962 | // signal to it's original disposition and returning. |
michael@0 | 963 | // |
michael@0 | 964 | // Note: the order of these tests matter. |
michael@0 | 965 | if (sPrevHandler.sa_flags & SA_SIGINFO) |
michael@0 | 966 | sPrevHandler.sa_sigaction(signum, info, context); |
michael@0 | 967 | else if (sPrevHandler.sa_handler == SIG_DFL || sPrevHandler.sa_handler == SIG_IGN) |
michael@0 | 968 | sigaction(signum, &sPrevHandler, nullptr); |
michael@0 | 969 | else |
michael@0 | 970 | sPrevHandler.sa_handler(signum); |
michael@0 | 971 | } |
michael@0 | 972 | #endif |
michael@0 | 973 | |
michael@0 | 974 | #if !defined(XP_MACOSX) |
michael@0 | 975 | static bool sHandlersInstalled = false; |
michael@0 | 976 | #endif |
michael@0 | 977 | |
michael@0 | 978 | bool |
michael@0 | 979 | js::EnsureAsmJSSignalHandlersInstalled(JSRuntime *rt) |
michael@0 | 980 | { |
michael@0 | 981 | if (IsSignalHandlingBroken()) |
michael@0 | 982 | return false; |
michael@0 | 983 | |
michael@0 | 984 | #if defined(XP_MACOSX) |
michael@0 | 985 | // On OSX, each JSRuntime gets its own handler. |
michael@0 | 986 | return rt->asmJSMachExceptionHandler.installed() || rt->asmJSMachExceptionHandler.install(rt); |
michael@0 | 987 | #else |
michael@0 | 988 | // Assume Windows or Unix. For these platforms, there is a single, |
michael@0 | 989 | // process-wide signal handler installed. Take care to only install it once. |
michael@0 | 990 | if (sHandlersInstalled) |
michael@0 | 991 | return true; |
michael@0 | 992 | |
michael@0 | 993 | # if defined(XP_WIN) |
michael@0 | 994 | if (!AddVectoredExceptionHandler(/* FirstHandler = */true, AsmJSExceptionHandler)) |
michael@0 | 995 | return false; |
michael@0 | 996 | # else |
michael@0 | 997 | // Assume Unix. SA_NODEFER allows us to reenter the signal handler if we |
michael@0 | 998 | // crash while handling the signal, and fall through to the Breakpad |
michael@0 | 999 | // handler by testing handlingSignal. |
michael@0 | 1000 | struct sigaction sigAction; |
michael@0 | 1001 | sigAction.sa_flags = SA_SIGINFO | SA_NODEFER; |
michael@0 | 1002 | sigAction.sa_sigaction = &AsmJSFaultHandler; |
michael@0 | 1003 | sigemptyset(&sigAction.sa_mask); |
michael@0 | 1004 | if (sigaction(SIGSEGV, &sigAction, &sPrevHandler)) |
michael@0 | 1005 | return false; |
michael@0 | 1006 | # endif |
michael@0 | 1007 | |
michael@0 | 1008 | sHandlersInstalled = true; |
michael@0 | 1009 | #endif |
michael@0 | 1010 | return true; |
michael@0 | 1011 | } |
michael@0 | 1012 | |
michael@0 | 1013 | // To interrupt execution of a JSRuntime, any thread may call |
michael@0 | 1014 | // JS_RequestInterruptCallback (JSRuntime::requestInterruptCallback from inside |
michael@0 | 1015 | // the engine). In the simplest case, this sets some state that is polled at |
michael@0 | 1016 | // regular intervals (function prologues, loop headers). For tight loops, this |
michael@0 | 1017 | // poses non-trivial overhead. For asm.js, we can do better: when another |
michael@0 | 1018 | // thread requests an interrupt, we simply mprotect all of the innermost asm.js |
michael@0 | 1019 | // module activation's code. This will trigger a SIGSEGV, taking us into |
michael@0 | 1020 | // AsmJSFaultHandler. From there, we can manually redirect execution to call |
michael@0 | 1021 | // js::HandleExecutionInterrupt. The memory is un-protected from the signal |
michael@0 | 1022 | // handler after control flow is redirected. |
michael@0 | 1023 | void |
michael@0 | 1024 | js::RequestInterruptForAsmJSCode(JSRuntime *rt) |
michael@0 | 1025 | { |
michael@0 | 1026 | JS_ASSERT(rt->currentThreadOwnsInterruptLock()); |
michael@0 | 1027 | |
michael@0 | 1028 | AsmJSActivation *activation = rt->mainThread.asmJSActivationStackFromAnyThread(); |
michael@0 | 1029 | if (!activation) |
michael@0 | 1030 | return; |
michael@0 | 1031 | |
michael@0 | 1032 | activation->module().protectCode(rt); |
michael@0 | 1033 | } |
michael@0 | 1034 | |
michael@0 | 1035 | #if defined(MOZ_ASAN) && defined(JS_STANDALONE) |
michael@0 | 1036 | // Usually, this definition is found in mozglue (see mozglue/build/AsanOptions.cpp). |
michael@0 | 1037 | // However, when doing standalone JS builds, mozglue is not used and we must ensure |
michael@0 | 1038 | // that we still allow custom SIGSEGV handlers for asm.js and ion to work correctly. |
michael@0 | 1039 | extern "C" MOZ_ASAN_BLACKLIST |
michael@0 | 1040 | const char* __asan_default_options() { |
michael@0 | 1041 | return "allow_user_segv_handler=1"; |
michael@0 | 1042 | } |
michael@0 | 1043 | #endif |