Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/AsmJSSignalHandlers.h"
9 #include "mozilla/BinarySearch.h"
11 #include "assembler/assembler/MacroAssembler.h"
12 #include "jit/AsmJSModule.h"
14 using namespace js;
15 using namespace js::jit;
16 using namespace mozilla;
18 using JS::GenericNaN;
20 #if defined(XP_WIN)
21 # define XMM_sig(p,i) ((p)->Xmm##i)
22 # define EIP_sig(p) ((p)->Eip)
23 # define RIP_sig(p) ((p)->Rip)
24 # define RAX_sig(p) ((p)->Rax)
25 # define RCX_sig(p) ((p)->Rcx)
26 # define RDX_sig(p) ((p)->Rdx)
27 # define RBX_sig(p) ((p)->Rbx)
28 # define RSP_sig(p) ((p)->Rsp)
29 # define RBP_sig(p) ((p)->Rbp)
30 # define RSI_sig(p) ((p)->Rsi)
31 # define RDI_sig(p) ((p)->Rdi)
32 # define R8_sig(p) ((p)->R8)
33 # define R9_sig(p) ((p)->R9)
34 # define R10_sig(p) ((p)->R10)
35 # define R11_sig(p) ((p)->R11)
36 # define R12_sig(p) ((p)->R12)
37 # define R13_sig(p) ((p)->R13)
38 # define R14_sig(p) ((p)->R14)
39 # define R15_sig(p) ((p)->R15)
40 #elif defined(__OpenBSD__)
41 # define XMM_sig(p,i) ((p)->sc_fpstate->fx_xmm[i])
42 # define EIP_sig(p) ((p)->sc_eip)
43 # define RIP_sig(p) ((p)->sc_rip)
44 # define RAX_sig(p) ((p)->sc_rax)
45 # define RCX_sig(p) ((p)->sc_rcx)
46 # define RDX_sig(p) ((p)->sc_rdx)
47 # define RBX_sig(p) ((p)->sc_rbx)
48 # define RSP_sig(p) ((p)->sc_rsp)
49 # define RBP_sig(p) ((p)->sc_rbp)
50 # define RSI_sig(p) ((p)->sc_rsi)
51 # define RDI_sig(p) ((p)->sc_rdi)
52 # define R8_sig(p) ((p)->sc_r8)
53 # define R9_sig(p) ((p)->sc_r9)
54 # define R10_sig(p) ((p)->sc_r10)
55 # define R11_sig(p) ((p)->sc_r11)
56 # define R12_sig(p) ((p)->sc_r12)
57 # define R13_sig(p) ((p)->sc_r13)
58 # define R14_sig(p) ((p)->sc_r14)
59 # define R15_sig(p) ((p)->sc_r15)
60 #elif defined(__linux__) || defined(SOLARIS)
61 # if defined(__linux__)
62 # define XMM_sig(p,i) ((p)->uc_mcontext.fpregs->_xmm[i])
63 # define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_EIP])
64 # else
65 # define XMM_sig(p,i) ((p)->uc_mcontext.fpregs.fp_reg_set.fpchip_state.xmm[i])
66 # define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_PC])
67 # endif
68 # define RIP_sig(p) ((p)->uc_mcontext.gregs[REG_RIP])
69 # define RAX_sig(p) ((p)->uc_mcontext.gregs[REG_RAX])
70 # define RCX_sig(p) ((p)->uc_mcontext.gregs[REG_RCX])
71 # define RDX_sig(p) ((p)->uc_mcontext.gregs[REG_RDX])
72 # define RBX_sig(p) ((p)->uc_mcontext.gregs[REG_RBX])
73 # define RSP_sig(p) ((p)->uc_mcontext.gregs[REG_RSP])
74 # define RBP_sig(p) ((p)->uc_mcontext.gregs[REG_RBP])
75 # define RSI_sig(p) ((p)->uc_mcontext.gregs[REG_RSI])
76 # define RDI_sig(p) ((p)->uc_mcontext.gregs[REG_RDI])
77 # define R8_sig(p) ((p)->uc_mcontext.gregs[REG_R8])
78 # define R9_sig(p) ((p)->uc_mcontext.gregs[REG_R9])
79 # define R10_sig(p) ((p)->uc_mcontext.gregs[REG_R10])
80 # define R11_sig(p) ((p)->uc_mcontext.gregs[REG_R11])
81 # define R12_sig(p) ((p)->uc_mcontext.gregs[REG_R12])
82 # define R13_sig(p) ((p)->uc_mcontext.gregs[REG_R13])
83 # define R14_sig(p) ((p)->uc_mcontext.gregs[REG_R14])
84 # if defined(__linux__) && defined(__arm__)
85 # define R15_sig(p) ((p)->uc_mcontext.arm_pc)
86 # else
87 # define R15_sig(p) ((p)->uc_mcontext.gregs[REG_R15])
88 # endif
89 #elif defined(__NetBSD__)
90 # define XMM_sig(p,i) (((struct fxsave64 *)(p)->uc_mcontext.__fpregs)->fx_xmm[i])
91 # define EIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EIP])
92 # define RIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RIP])
93 # define RAX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RAX])
94 # define RCX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RCX])
95 # define RDX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RDX])
96 # define RBX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBX])
97 # define RSP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSP])
98 # define RBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBP])
99 # define RSI_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSI])
100 # define RDI_sig(p) ((p)->uc_mcontext.__gregs[_REG_RDI])
101 # define R8_sig(p) ((p)->uc_mcontext.__gregs[_REG_R8])
102 # define R9_sig(p) ((p)->uc_mcontext.__gregs[_REG_R9])
103 # define R10_sig(p) ((p)->uc_mcontext.__gregs[_REG_R10])
104 # define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
105 # define R12_sig(p) ((p)->uc_mcontext.__gregs[_REG_R12])
106 # define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
107 # define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
108 # define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
109 #elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
110 # if defined(__DragonFly__)
111 # define XMM_sig(p,i) (((union savefpu *)(p)->uc_mcontext.mc_fpregs)->sv_xmm.sv_xmm[i])
112 # else
113 # define XMM_sig(p,i) (((struct savefpu *)(p)->uc_mcontext.mc_fpstate)->sv_xmm[i])
114 # endif
115 # define EIP_sig(p) ((p)->uc_mcontext.mc_eip)
116 # define RIP_sig(p) ((p)->uc_mcontext.mc_rip)
117 # define RAX_sig(p) ((p)->uc_mcontext.mc_rax)
118 # define RCX_sig(p) ((p)->uc_mcontext.mc_rcx)
119 # define RDX_sig(p) ((p)->uc_mcontext.mc_rdx)
120 # define RBX_sig(p) ((p)->uc_mcontext.mc_rbx)
121 # define RSP_sig(p) ((p)->uc_mcontext.mc_rsp)
122 # define RBP_sig(p) ((p)->uc_mcontext.mc_rbp)
123 # define RSI_sig(p) ((p)->uc_mcontext.mc_rsi)
124 # define RDI_sig(p) ((p)->uc_mcontext.mc_rdi)
125 # define R8_sig(p) ((p)->uc_mcontext.mc_r8)
126 # define R9_sig(p) ((p)->uc_mcontext.mc_r9)
127 # define R10_sig(p) ((p)->uc_mcontext.mc_r10)
128 # define R11_sig(p) ((p)->uc_mcontext.mc_r11)
129 # define R12_sig(p) ((p)->uc_mcontext.mc_r12)
130 # define R13_sig(p) ((p)->uc_mcontext.mc_r13)
131 # define R14_sig(p) ((p)->uc_mcontext.mc_r14)
132 # if defined(__FreeBSD__) && defined(__arm__)
133 # define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
134 # else
135 # define R15_sig(p) ((p)->uc_mcontext.mc_r15)
136 # endif
137 #elif defined(XP_MACOSX)
138 // Mach requires special treatment.
139 #else
140 # error "Don't know how to read/write to the thread state via the mcontext_t."
141 #endif
143 // For platforms where the signal/exception handler runs on the same
144 // thread/stack as the victim (Unix and Windows), we can use TLS to find any
145 // currently executing asm.js code.
146 #if !defined(XP_MACOSX)
147 static AsmJSActivation *
148 InnermostAsmJSActivation()
149 {
150 PerThreadData *threadData = TlsPerThreadData.get();
151 if (!threadData)
152 return nullptr;
154 return threadData->asmJSActivationStackFromOwnerThread();
155 }
157 static JSRuntime *
158 RuntimeForCurrentThread()
159 {
160 PerThreadData *threadData = TlsPerThreadData.get();
161 if (!threadData)
162 return nullptr;
164 return threadData->runtimeIfOnOwnerThread();
165 }
166 #endif // !defined(XP_MACOSX)
168 // Crashing inside the signal handler can cause the handler to be recursively
169 // invoked, eventually blowing the stack without actually showing a crash
170 // report dialog via Breakpad. To guard against this we watch for such
171 // recursion and fall through to the next handler immediately rather than
172 // trying to handle it.
173 class AutoSetHandlingSignal
174 {
175 JSRuntime *rt;
177 public:
178 AutoSetHandlingSignal(JSRuntime *rt)
179 : rt(rt)
180 {
181 JS_ASSERT(!rt->handlingSignal);
182 rt->handlingSignal = true;
183 }
185 ~AutoSetHandlingSignal()
186 {
187 JS_ASSERT(rt->handlingSignal);
188 rt->handlingSignal = false;
189 }
190 };
192 #if defined(JS_CODEGEN_X64)
193 template <class T>
194 static void
195 SetXMMRegToNaN(bool isFloat32, T *xmm_reg)
196 {
197 if (isFloat32) {
198 JS_STATIC_ASSERT(sizeof(T) == 4 * sizeof(float));
199 float *floats = reinterpret_cast<float*>(xmm_reg);
200 floats[0] = GenericNaN();
201 floats[1] = 0;
202 floats[2] = 0;
203 floats[3] = 0;
204 } else {
205 JS_STATIC_ASSERT(sizeof(T) == 2 * sizeof(double));
206 double *dbls = reinterpret_cast<double*>(xmm_reg);
207 dbls[0] = GenericNaN();
208 dbls[1] = 0;
209 }
210 }
212 struct GetHeapAccessOffset
213 {
214 const AsmJSModule &module;
215 explicit GetHeapAccessOffset(const AsmJSModule &module) : module(module) {}
216 uintptr_t operator[](size_t index) const {
217 return module.heapAccess(index).offset();
218 }
219 };
221 // Perform a binary search on the projected offsets of the known heap accesses
222 // in the module.
223 static const AsmJSHeapAccess *
224 LookupHeapAccess(const AsmJSModule &module, uint8_t *pc)
225 {
226 JS_ASSERT(module.containsPC(pc));
228 uintptr_t pcOff = pc - module.codeBase();
230 size_t match;
231 if (!BinarySearch(GetHeapAccessOffset(module), 0, module.numHeapAccesses(), pcOff, &match))
232 return nullptr;
234 return &module.heapAccess(match);
235 }
236 #endif
238 #if defined(XP_WIN)
239 # include "jswin.h"
240 #else
241 # include <signal.h>
242 # include <sys/mman.h>
243 #endif
245 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
246 # include <sys/ucontext.h> // for ucontext_t, mcontext_t
247 #endif
249 #if defined(JS_CODEGEN_X64)
250 # if defined(__DragonFly__)
251 # include <machine/npx.h> // for union savefpu
252 # elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
253 defined(__NetBSD__) || defined(__OpenBSD__)
254 # include <machine/fpu.h> // for struct savefpu/fxsave64
255 # endif
256 #endif
258 #if defined(ANDROID)
259 // Not all versions of the Android NDK define ucontext_t or mcontext_t.
260 // Detect this and provide custom but compatible definitions. Note that these
261 // follow the GLibc naming convention to access register values from
262 // mcontext_t.
263 //
264 // See: https://chromiumcodereview.appspot.com/10829122/
265 // See: http://code.google.com/p/android/issues/detail?id=34784
266 # if !defined(__BIONIC_HAVE_UCONTEXT_T)
267 # if defined(__arm__)
269 // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
270 // Old versions of the C library <signal.h> didn't define the type.
271 # if !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
272 # include <asm/sigcontext.h>
273 # endif
275 typedef struct sigcontext mcontext_t;
277 typedef struct ucontext {
278 uint32_t uc_flags;
279 struct ucontext* uc_link;
280 stack_t uc_stack;
281 mcontext_t uc_mcontext;
282 // Other fields are not used so don't define them here.
283 } ucontext_t;
285 # elif defined(__i386__)
286 // x86 version for Android.
287 typedef struct {
288 uint32_t gregs[19];
289 void* fpregs;
290 uint32_t oldmask;
291 uint32_t cr2;
292 } mcontext_t;
294 typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
295 typedef struct ucontext {
296 uint32_t uc_flags;
297 struct ucontext* uc_link;
298 stack_t uc_stack;
299 mcontext_t uc_mcontext;
300 // Other fields are not used by V8, don't define them here.
301 } ucontext_t;
302 enum { REG_EIP = 14 };
303 # endif // defined(__i386__)
304 # endif // !defined(__BIONIC_HAVE_UCONTEXT_T)
305 #endif // defined(ANDROID)
307 #if defined(ANDROID) && defined(MOZ_LINKER)
308 // Apparently, on some Android systems, the signal handler is always passed
309 // nullptr as the faulting address. This would cause the asm.js signal handler
310 // to think that a safe out-of-bounds access was a nullptr-deref. This
311 // brokenness is already detected by ElfLoader (enabled by MOZ_LINKER), so
312 // reuse that check to disable asm.js compilation on systems where the signal
313 // handler is broken.
314 extern "C" MFBT_API bool IsSignalHandlingBroken();
315 #else
316 static bool IsSignalHandlingBroken() { return false; }
317 #endif // defined(MOZ_LINKER)
319 #if !defined(XP_WIN)
320 # define CONTEXT ucontext_t
321 #endif
323 #if defined(JS_CPU_X64)
324 # define PC_sig(p) RIP_sig(p)
325 #elif defined(JS_CPU_X86)
326 # define PC_sig(p) EIP_sig(p)
327 #elif defined(JS_CPU_ARM)
328 # define PC_sig(p) R15_sig(p)
329 #endif
331 static bool
332 HandleSimulatorInterrupt(JSRuntime *rt, AsmJSActivation *activation, void *faultingAddress)
333 {
334 // If the ARM simulator is enabled, the pc is in the simulator C++ code and
335 // not in the generated code, so we check the simulator's pc manually. Also
336 // note that we can't simply use simulator->set_pc() here because the
337 // simulator could be in the middle of an instruction. On ARM, the signal
338 // handlers are currently only used for Odin code, see bug 964258.
340 #ifdef JS_ARM_SIMULATOR
341 const AsmJSModule &module = activation->module();
342 if (module.containsPC((void *)rt->mainThread.simulator()->get_pc()) &&
343 module.containsPC(faultingAddress))
344 {
345 activation->setInterrupted(nullptr);
346 int32_t nextpc = int32_t(module.interruptExit());
347 rt->mainThread.simulator()->set_resume_pc(nextpc);
348 return true;
349 }
350 #endif
351 return false;
352 }
354 #if !defined(XP_MACOSX)
355 static uint8_t **
356 ContextToPC(CONTEXT *context)
357 {
358 JS_STATIC_ASSERT(sizeof(PC_sig(context)) == sizeof(void*));
359 return reinterpret_cast<uint8_t**>(&PC_sig(context));
360 }
362 # if defined(JS_CODEGEN_X64)
363 static void
364 SetRegisterToCoercedUndefined(CONTEXT *context, bool isFloat32, AnyRegister reg)
365 {
366 if (reg.isFloat()) {
367 switch (reg.fpu().code()) {
368 case JSC::X86Registers::xmm0: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 0)); break;
369 case JSC::X86Registers::xmm1: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 1)); break;
370 case JSC::X86Registers::xmm2: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 2)); break;
371 case JSC::X86Registers::xmm3: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 3)); break;
372 case JSC::X86Registers::xmm4: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 4)); break;
373 case JSC::X86Registers::xmm5: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 5)); break;
374 case JSC::X86Registers::xmm6: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 6)); break;
375 case JSC::X86Registers::xmm7: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 7)); break;
376 case JSC::X86Registers::xmm8: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 8)); break;
377 case JSC::X86Registers::xmm9: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 9)); break;
378 case JSC::X86Registers::xmm10: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 10)); break;
379 case JSC::X86Registers::xmm11: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 11)); break;
380 case JSC::X86Registers::xmm12: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 12)); break;
381 case JSC::X86Registers::xmm13: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 13)); break;
382 case JSC::X86Registers::xmm14: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 14)); break;
383 case JSC::X86Registers::xmm15: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 15)); break;
384 default: MOZ_CRASH();
385 }
386 } else {
387 switch (reg.gpr().code()) {
388 case JSC::X86Registers::eax: RAX_sig(context) = 0; break;
389 case JSC::X86Registers::ecx: RCX_sig(context) = 0; break;
390 case JSC::X86Registers::edx: RDX_sig(context) = 0; break;
391 case JSC::X86Registers::ebx: RBX_sig(context) = 0; break;
392 case JSC::X86Registers::esp: RSP_sig(context) = 0; break;
393 case JSC::X86Registers::ebp: RBP_sig(context) = 0; break;
394 case JSC::X86Registers::esi: RSI_sig(context) = 0; break;
395 case JSC::X86Registers::edi: RDI_sig(context) = 0; break;
396 case JSC::X86Registers::r8: R8_sig(context) = 0; break;
397 case JSC::X86Registers::r9: R9_sig(context) = 0; break;
398 case JSC::X86Registers::r10: R10_sig(context) = 0; break;
399 case JSC::X86Registers::r11: R11_sig(context) = 0; break;
400 case JSC::X86Registers::r12: R12_sig(context) = 0; break;
401 case JSC::X86Registers::r13: R13_sig(context) = 0; break;
402 case JSC::X86Registers::r14: R14_sig(context) = 0; break;
403 case JSC::X86Registers::r15: R15_sig(context) = 0; break;
404 default: MOZ_CRASH();
405 }
406 }
407 }
408 # endif // JS_CODEGEN_X64
409 #endif // !XP_MACOSX
411 #if defined(XP_WIN)
413 static bool
414 HandleException(PEXCEPTION_POINTERS exception)
415 {
416 EXCEPTION_RECORD *record = exception->ExceptionRecord;
417 CONTEXT *context = exception->ContextRecord;
419 if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
420 return false;
422 uint8_t **ppc = ContextToPC(context);
423 uint8_t *pc = *ppc;
424 JS_ASSERT(pc == record->ExceptionAddress);
426 if (record->NumberParameters < 2)
427 return false;
429 void *faultingAddress = (void*)record->ExceptionInformation[1];
431 JSRuntime *rt = RuntimeForCurrentThread();
433 // Don't allow recursive handling of signals, see AutoSetHandlingSignal.
434 if (!rt || rt->handlingSignal)
435 return false;
436 AutoSetHandlingSignal handling(rt);
438 if (rt->jitRuntime() && rt->jitRuntime()->handleAccessViolation(rt, faultingAddress))
439 return true;
441 AsmJSActivation *activation = InnermostAsmJSActivation();
442 if (!activation)
443 return false;
445 const AsmJSModule &module = activation->module();
446 if (!module.containsPC(pc))
447 return false;
449 // If we faulted trying to execute code in 'module', this must be an
450 // interrupt callback (see RequestInterruptForAsmJSCode). Redirect
451 // execution to a trampoline which will call js::HandleExecutionInterrupt.
452 // The trampoline will jump to activation->resumePC if execution isn't
453 // interrupted.
454 if (module.containsPC(faultingAddress)) {
455 activation->setInterrupted(pc);
456 *ppc = module.interruptExit();
458 JSRuntime::AutoLockForInterrupt lock(rt);
459 module.unprotectCode(rt);
460 return true;
461 }
463 # if defined(JS_CODEGEN_X64)
464 // These checks aren't necessary, but, since we can, check anyway to make
465 // sure we aren't covering up a real bug.
466 if (!module.maybeHeap() ||
467 faultingAddress < module.maybeHeap() ||
468 faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize)
469 {
470 return false;
471 }
473 const AsmJSHeapAccess *heapAccess = LookupHeapAccess(module, pc);
474 if (!heapAccess)
475 return false;
477 // Also not necessary, but, since we can, do.
478 if (heapAccess->isLoad() != !record->ExceptionInformation[0])
479 return false;
481 // We now know that this is an out-of-bounds access made by an asm.js
482 // load/store that we should handle. If this is a load, assign the
483 // JS-defined result value to the destination register (ToInt32(undefined)
484 // or ToNumber(undefined), determined by the type of the destination
485 // register) and set the PC to the next op. Upon return from the handler,
486 // execution will resume at this next PC.
487 if (heapAccess->isLoad())
488 SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
489 *ppc += heapAccess->opLength();
490 return true;
491 # else
492 return false;
493 # endif
494 }
496 static LONG WINAPI
497 AsmJSExceptionHandler(LPEXCEPTION_POINTERS exception)
498 {
499 if (HandleException(exception))
500 return EXCEPTION_CONTINUE_EXECUTION;
502 // No need to worry about calling other handlers, the OS does this for us.
503 return EXCEPTION_CONTINUE_SEARCH;
504 }
506 #elif defined(XP_MACOSX)
507 # include <mach/exc.h>
509 static uint8_t **
510 ContextToPC(x86_thread_state_t &state)
511 {
512 # if defined(JS_CODEGEN_X64)
513 JS_STATIC_ASSERT(sizeof(state.uts.ts64.__rip) == sizeof(void*));
514 return reinterpret_cast<uint8_t**>(&state.uts.ts64.__rip);
515 # else
516 JS_STATIC_ASSERT(sizeof(state.uts.ts32.__eip) == sizeof(void*));
517 return reinterpret_cast<uint8_t**>(&state.uts.ts32.__eip);
518 # endif
519 }
521 # if defined(JS_CODEGEN_X64)
522 static bool
523 SetRegisterToCoercedUndefined(mach_port_t rtThread, x86_thread_state64_t &state,
524 const AsmJSHeapAccess &heapAccess)
525 {
526 if (heapAccess.loadedReg().isFloat()) {
527 kern_return_t kret;
529 x86_float_state64_t fstate;
530 unsigned int count = x86_FLOAT_STATE64_COUNT;
531 kret = thread_get_state(rtThread, x86_FLOAT_STATE64, (thread_state_t) &fstate, &count);
532 if (kret != KERN_SUCCESS)
533 return false;
535 bool f32 = heapAccess.isFloat32Load();
536 switch (heapAccess.loadedReg().fpu().code()) {
537 case JSC::X86Registers::xmm0: SetXMMRegToNaN(f32, &fstate.__fpu_xmm0); break;
538 case JSC::X86Registers::xmm1: SetXMMRegToNaN(f32, &fstate.__fpu_xmm1); break;
539 case JSC::X86Registers::xmm2: SetXMMRegToNaN(f32, &fstate.__fpu_xmm2); break;
540 case JSC::X86Registers::xmm3: SetXMMRegToNaN(f32, &fstate.__fpu_xmm3); break;
541 case JSC::X86Registers::xmm4: SetXMMRegToNaN(f32, &fstate.__fpu_xmm4); break;
542 case JSC::X86Registers::xmm5: SetXMMRegToNaN(f32, &fstate.__fpu_xmm5); break;
543 case JSC::X86Registers::xmm6: SetXMMRegToNaN(f32, &fstate.__fpu_xmm6); break;
544 case JSC::X86Registers::xmm7: SetXMMRegToNaN(f32, &fstate.__fpu_xmm7); break;
545 case JSC::X86Registers::xmm8: SetXMMRegToNaN(f32, &fstate.__fpu_xmm8); break;
546 case JSC::X86Registers::xmm9: SetXMMRegToNaN(f32, &fstate.__fpu_xmm9); break;
547 case JSC::X86Registers::xmm10: SetXMMRegToNaN(f32, &fstate.__fpu_xmm10); break;
548 case JSC::X86Registers::xmm11: SetXMMRegToNaN(f32, &fstate.__fpu_xmm11); break;
549 case JSC::X86Registers::xmm12: SetXMMRegToNaN(f32, &fstate.__fpu_xmm12); break;
550 case JSC::X86Registers::xmm13: SetXMMRegToNaN(f32, &fstate.__fpu_xmm13); break;
551 case JSC::X86Registers::xmm14: SetXMMRegToNaN(f32, &fstate.__fpu_xmm14); break;
552 case JSC::X86Registers::xmm15: SetXMMRegToNaN(f32, &fstate.__fpu_xmm15); break;
553 default: MOZ_CRASH();
554 }
556 kret = thread_set_state(rtThread, x86_FLOAT_STATE64, (thread_state_t)&fstate, x86_FLOAT_STATE64_COUNT);
557 if (kret != KERN_SUCCESS)
558 return false;
559 } else {
560 switch (heapAccess.loadedReg().gpr().code()) {
561 case JSC::X86Registers::eax: state.__rax = 0; break;
562 case JSC::X86Registers::ecx: state.__rcx = 0; break;
563 case JSC::X86Registers::edx: state.__rdx = 0; break;
564 case JSC::X86Registers::ebx: state.__rbx = 0; break;
565 case JSC::X86Registers::esp: state.__rsp = 0; break;
566 case JSC::X86Registers::ebp: state.__rbp = 0; break;
567 case JSC::X86Registers::esi: state.__rsi = 0; break;
568 case JSC::X86Registers::edi: state.__rdi = 0; break;
569 case JSC::X86Registers::r8: state.__r8 = 0; break;
570 case JSC::X86Registers::r9: state.__r9 = 0; break;
571 case JSC::X86Registers::r10: state.__r10 = 0; break;
572 case JSC::X86Registers::r11: state.__r11 = 0; break;
573 case JSC::X86Registers::r12: state.__r12 = 0; break;
574 case JSC::X86Registers::r13: state.__r13 = 0; break;
575 case JSC::X86Registers::r14: state.__r14 = 0; break;
576 case JSC::X86Registers::r15: state.__r15 = 0; break;
577 default: MOZ_CRASH();
578 }
579 }
580 return true;
581 }
582 # endif
584 // This definition was generated by mig (the Mach Interface Generator) for the
585 // routine 'exception_raise' (exc.defs).
586 #pragma pack(4)
587 typedef struct {
588 mach_msg_header_t Head;
589 /* start of the kernel processed data */
590 mach_msg_body_t msgh_body;
591 mach_msg_port_descriptor_t thread;
592 mach_msg_port_descriptor_t task;
593 /* end of the kernel processed data */
594 NDR_record_t NDR;
595 exception_type_t exception;
596 mach_msg_type_number_t codeCnt;
597 int64_t code[2];
598 } Request__mach_exception_raise_t;
599 #pragma pack()
601 // The full Mach message also includes a trailer.
602 struct ExceptionRequest
603 {
604 Request__mach_exception_raise_t body;
605 mach_msg_trailer_t trailer;
606 };
608 static bool
609 HandleMachException(JSRuntime *rt, const ExceptionRequest &request)
610 {
611 // Don't allow recursive handling of signals, see AutoSetHandlingSignal.
612 if (rt->handlingSignal)
613 return false;
614 AutoSetHandlingSignal handling(rt);
616 // Get the port of the JSRuntime's thread from the message.
617 mach_port_t rtThread = request.body.thread.name;
619 // Read out the JSRuntime thread's register state.
620 x86_thread_state_t state;
621 unsigned int count = x86_THREAD_STATE_COUNT;
622 kern_return_t kret;
623 kret = thread_get_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, &count);
624 if (kret != KERN_SUCCESS)
625 return false;
627 uint8_t **ppc = ContextToPC(state);
628 uint8_t *pc = *ppc;
630 if (request.body.exception != EXC_BAD_ACCESS || request.body.codeCnt != 2)
631 return false;
633 void *faultingAddress = (void*)request.body.code[1];
635 if (rt->jitRuntime() && rt->jitRuntime()->handleAccessViolation(rt, faultingAddress))
636 return true;
638 AsmJSActivation *activation = rt->mainThread.asmJSActivationStackFromAnyThread();
639 if (!activation)
640 return false;
642 const AsmJSModule &module = activation->module();
643 if (HandleSimulatorInterrupt(rt, activation, faultingAddress)) {
644 JSRuntime::AutoLockForInterrupt lock(rt);
645 module.unprotectCode(rt);
646 return true;
647 }
649 if (!module.containsPC(pc))
650 return false;
652 // If we faulted trying to execute code in 'module', this must be an
653 // interrupt callback (see RequestInterruptForAsmJSCode). Redirect
654 // execution to a trampoline which will call js::HandleExecutionInterrupt.
655 // The trampoline will jump to activation->resumePC if execution isn't
656 // interrupted.
657 if (module.containsPC(faultingAddress)) {
658 activation->setInterrupted(pc);
659 *ppc = module.interruptExit();
661 JSRuntime::AutoLockForInterrupt lock(rt);
662 module.unprotectCode(rt);
664 // Update the thread state with the new pc.
665 kret = thread_set_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, x86_THREAD_STATE_COUNT);
666 return kret == KERN_SUCCESS;
667 }
669 # if defined(JS_CODEGEN_X64)
670 // These checks aren't necessary, but, since we can, check anyway to make
671 // sure we aren't covering up a real bug.
672 if (!module.maybeHeap() ||
673 faultingAddress < module.maybeHeap() ||
674 faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize)
675 {
676 return false;
677 }
679 const AsmJSHeapAccess *heapAccess = LookupHeapAccess(module, pc);
680 if (!heapAccess)
681 return false;
683 // We now know that this is an out-of-bounds access made by an asm.js
684 // load/store that we should handle. If this is a load, assign the
685 // JS-defined result value to the destination register (ToInt32(undefined)
686 // or ToNumber(undefined), determined by the type of the destination
687 // register) and set the PC to the next op. Upon return from the handler,
688 // execution will resume at this next PC.
689 if (heapAccess->isLoad()) {
690 if (!SetRegisterToCoercedUndefined(rtThread, state.uts.ts64, *heapAccess))
691 return false;
692 }
693 *ppc += heapAccess->opLength();
695 // Update the thread state with the new pc.
696 kret = thread_set_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, x86_THREAD_STATE_COUNT);
697 if (kret != KERN_SUCCESS)
698 return false;
700 return true;
701 # else
702 return false;
703 # endif
704 }
706 // Taken from mach_exc in /usr/include/mach/mach_exc.defs.
707 static const mach_msg_id_t sExceptionId = 2405;
709 // The choice of id here is arbitrary, the only constraint is that sQuitId != sExceptionId.
710 static const mach_msg_id_t sQuitId = 42;
712 void
713 AsmJSMachExceptionHandlerThread(void *threadArg)
714 {
715 JSRuntime *rt = reinterpret_cast<JSRuntime*>(threadArg);
716 mach_port_t port = rt->asmJSMachExceptionHandler.port();
717 kern_return_t kret;
719 while(true) {
720 ExceptionRequest request;
721 kret = mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request),
722 port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
724 // If we fail even receiving the message, we can't even send a reply!
725 // Rather than hanging the faulting thread (hanging the browser), crash.
726 if (kret != KERN_SUCCESS) {
727 fprintf(stderr, "AsmJSMachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret);
728 MOZ_CRASH();
729 }
731 // There are only two messages we should be receiving: an exception
732 // message that occurs when the runtime's thread faults and the quit
733 // message sent when the runtime is shutting down.
734 if (request.body.Head.msgh_id == sQuitId)
735 break;
736 if (request.body.Head.msgh_id != sExceptionId) {
737 fprintf(stderr, "Unexpected msg header id %d\n", (int)request.body.Head.msgh_bits);
738 MOZ_CRASH();
739 }
741 // Some thread just commited an EXC_BAD_ACCESS and has been suspended by
742 // the kernel. The kernel is waiting for us to reply with instructions.
743 // Our default is the "not handled" reply (by setting the RetCode field
744 // of the reply to KERN_FAILURE) which tells the kernel to continue
745 // searching at the process and system level. If this is an asm.js
746 // expected exception, we handle it and return KERN_SUCCESS.
747 bool handled = HandleMachException(rt, request);
748 kern_return_t replyCode = handled ? KERN_SUCCESS : KERN_FAILURE;
750 // This magic incantation to send a reply back to the kernel was derived
751 // from the exc_server generated by 'mig -v /usr/include/mach/mach_exc.defs'.
752 __Reply__exception_raise_t reply;
753 reply.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0);
754 reply.Head.msgh_size = sizeof(reply);
755 reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port;
756 reply.Head.msgh_local_port = MACH_PORT_NULL;
757 reply.Head.msgh_id = request.body.Head.msgh_id + 100;
758 reply.NDR = NDR_record;
759 reply.RetCode = replyCode;
760 mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL,
761 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
762 }
763 }
765 AsmJSMachExceptionHandler::AsmJSMachExceptionHandler()
766 : installed_(false),
767 thread_(nullptr),
768 port_(MACH_PORT_NULL)
769 {}
771 void
772 AsmJSMachExceptionHandler::uninstall()
773 {
774 #ifdef JS_THREADSAFE
775 if (installed_) {
776 thread_port_t thread = mach_thread_self();
777 kern_return_t kret = thread_set_exception_ports(thread,
778 EXC_MASK_BAD_ACCESS,
779 MACH_PORT_NULL,
780 EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
781 THREAD_STATE_NONE);
782 mach_port_deallocate(mach_task_self(), thread);
783 if (kret != KERN_SUCCESS)
784 MOZ_CRASH();
785 installed_ = false;
786 }
787 if (thread_ != nullptr) {
788 // Break the handler thread out of the mach_msg loop.
789 mach_msg_header_t msg;
790 msg.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
791 msg.msgh_size = sizeof(msg);
792 msg.msgh_remote_port = port_;
793 msg.msgh_local_port = MACH_PORT_NULL;
794 msg.msgh_reserved = 0;
795 msg.msgh_id = sQuitId;
796 kern_return_t kret = mach_msg(&msg, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL,
797 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
798 if (kret != KERN_SUCCESS) {
799 fprintf(stderr, "AsmJSMachExceptionHandler: failed to send quit message: %d\n", (int)kret);
800 MOZ_CRASH();
801 }
803 // Wait for the handler thread to complete before deallocating the port.
804 PR_JoinThread(thread_);
805 thread_ = nullptr;
806 }
807 if (port_ != MACH_PORT_NULL) {
808 DebugOnly<kern_return_t> kret = mach_port_destroy(mach_task_self(), port_);
809 JS_ASSERT(kret == KERN_SUCCESS);
810 port_ = MACH_PORT_NULL;
811 }
812 #else
813 JS_ASSERT(!installed_);
814 #endif
815 }
817 bool
818 AsmJSMachExceptionHandler::install(JSRuntime *rt)
819 {
820 #ifdef JS_THREADSAFE
821 JS_ASSERT(!installed());
822 kern_return_t kret;
823 mach_port_t thread;
825 // Get a port which can send and receive data.
826 kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port_);
827 if (kret != KERN_SUCCESS)
828 goto error;
829 kret = mach_port_insert_right(mach_task_self(), port_, port_, MACH_MSG_TYPE_MAKE_SEND);
830 if (kret != KERN_SUCCESS)
831 goto error;
833 // Create a thread to block on reading port_.
834 thread_ = PR_CreateThread(PR_USER_THREAD, AsmJSMachExceptionHandlerThread, rt,
835 PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 0);
836 if (!thread_)
837 goto error;
839 // Direct exceptions on this thread to port_ (and thus our handler thread).
840 // Note: we are totally clobbering any existing *thread* exception ports and
841 // not even attempting to forward. Breakpad and gdb both use the *process*
842 // exception ports which are only called if the thread doesn't handle the
843 // exception, so we should be fine.
844 thread = mach_thread_self();
845 kret = thread_set_exception_ports(thread,
846 EXC_MASK_BAD_ACCESS,
847 port_,
848 EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
849 THREAD_STATE_NONE);
850 mach_port_deallocate(mach_task_self(), thread);
851 if (kret != KERN_SUCCESS)
852 goto error;
854 installed_ = true;
855 return true;
857 error:
858 uninstall();
859 return false;
860 #else
861 return false;
862 #endif
863 }
865 #else // If not Windows or Mac, assume Unix
867 // Be very cautious and default to not handling; we don't want to accidentally
868 // silence real crashes from real bugs.
869 static bool
870 HandleSignal(int signum, siginfo_t *info, void *ctx)
871 {
872 CONTEXT *context = (CONTEXT *)ctx;
873 uint8_t **ppc = ContextToPC(context);
874 uint8_t *pc = *ppc;
876 void *faultingAddress = info->si_addr;
878 JSRuntime *rt = RuntimeForCurrentThread();
880 // Don't allow recursive handling of signals, see AutoSetHandlingSignal.
881 if (!rt || rt->handlingSignal)
882 return false;
883 AutoSetHandlingSignal handling(rt);
885 if (rt->jitRuntime() && rt->jitRuntime()->handleAccessViolation(rt, faultingAddress))
886 return true;
888 AsmJSActivation *activation = InnermostAsmJSActivation();
889 if (!activation)
890 return false;
892 const AsmJSModule &module = activation->module();
893 if (HandleSimulatorInterrupt(rt, activation, faultingAddress)) {
894 JSRuntime::AutoLockForInterrupt lock(rt);
895 module.unprotectCode(rt);
896 return true;
897 }
899 if (!module.containsPC(pc))
900 return false;
902 // If we faulted trying to execute code in 'module', this must be an
903 // interrupt callback (see RequestInterruptForAsmJSCode). Redirect
904 // execution to a trampoline which will call js::HandleExecutionInterrupt.
905 // The trampoline will jump to activation->resumePC if execution isn't
906 // interrupted.
907 if (module.containsPC(faultingAddress)) {
908 activation->setInterrupted(pc);
909 *ppc = module.interruptExit();
911 JSRuntime::AutoLockForInterrupt lock(rt);
912 module.unprotectCode(rt);
913 return true;
914 }
916 # if defined(JS_CODEGEN_X64)
917 // These checks aren't necessary, but, since we can, check anyway to make
918 // sure we aren't covering up a real bug.
919 if (!module.maybeHeap() ||
920 faultingAddress < module.maybeHeap() ||
921 faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize)
922 {
923 return false;
924 }
926 const AsmJSHeapAccess *heapAccess = LookupHeapAccess(module, pc);
927 if (!heapAccess)
928 return false;
930 // We now know that this is an out-of-bounds access made by an asm.js
931 // load/store that we should handle. If this is a load, assign the
932 // JS-defined result value to the destination register (ToInt32(undefined)
933 // or ToNumber(undefined), determined by the type of the destination
934 // register) and set the PC to the next op. Upon return from the handler,
935 // execution will resume at this next PC.
936 if (heapAccess->isLoad())
937 SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
938 *ppc += heapAccess->opLength();
939 return true;
940 # else
941 return false;
942 # endif
943 }
945 static struct sigaction sPrevHandler;
947 static void
948 AsmJSFaultHandler(int signum, siginfo_t *info, void *context)
949 {
950 if (HandleSignal(signum, info, context))
951 return;
953 // This signal is not for any asm.js code we expect, so we need to forward
954 // the signal to the next handler. If there is no next handler (SIG_IGN or
955 // SIG_DFL), then it's time to crash. To do this, we set the signal back to
956 // its original disposition and return. This will cause the faulting op to
957 // be re-executed which will crash in the normal way. The advantage of
958 // doing this to calling _exit() is that we remove ourselves from the crash
959 // stack which improves crash reports. If there is a next handler, call it.
960 // It will either crash synchronously, fix up the instruction so that
961 // execution can continue and return, or trigger a crash by returning the
962 // signal to it's original disposition and returning.
963 //
964 // Note: the order of these tests matter.
965 if (sPrevHandler.sa_flags & SA_SIGINFO)
966 sPrevHandler.sa_sigaction(signum, info, context);
967 else if (sPrevHandler.sa_handler == SIG_DFL || sPrevHandler.sa_handler == SIG_IGN)
968 sigaction(signum, &sPrevHandler, nullptr);
969 else
970 sPrevHandler.sa_handler(signum);
971 }
972 #endif
974 #if !defined(XP_MACOSX)
975 static bool sHandlersInstalled = false;
976 #endif
978 bool
979 js::EnsureAsmJSSignalHandlersInstalled(JSRuntime *rt)
980 {
981 if (IsSignalHandlingBroken())
982 return false;
984 #if defined(XP_MACOSX)
985 // On OSX, each JSRuntime gets its own handler.
986 return rt->asmJSMachExceptionHandler.installed() || rt->asmJSMachExceptionHandler.install(rt);
987 #else
988 // Assume Windows or Unix. For these platforms, there is a single,
989 // process-wide signal handler installed. Take care to only install it once.
990 if (sHandlersInstalled)
991 return true;
993 # if defined(XP_WIN)
994 if (!AddVectoredExceptionHandler(/* FirstHandler = */true, AsmJSExceptionHandler))
995 return false;
996 # else
997 // Assume Unix. SA_NODEFER allows us to reenter the signal handler if we
998 // crash while handling the signal, and fall through to the Breakpad
999 // handler by testing handlingSignal.
1000 struct sigaction sigAction;
1001 sigAction.sa_flags = SA_SIGINFO | SA_NODEFER;
1002 sigAction.sa_sigaction = &AsmJSFaultHandler;
1003 sigemptyset(&sigAction.sa_mask);
1004 if (sigaction(SIGSEGV, &sigAction, &sPrevHandler))
1005 return false;
1006 # endif
1008 sHandlersInstalled = true;
1009 #endif
1010 return true;
1011 }
1013 // To interrupt execution of a JSRuntime, any thread may call
1014 // JS_RequestInterruptCallback (JSRuntime::requestInterruptCallback from inside
1015 // the engine). In the simplest case, this sets some state that is polled at
1016 // regular intervals (function prologues, loop headers). For tight loops, this
1017 // poses non-trivial overhead. For asm.js, we can do better: when another
1018 // thread requests an interrupt, we simply mprotect all of the innermost asm.js
1019 // module activation's code. This will trigger a SIGSEGV, taking us into
1020 // AsmJSFaultHandler. From there, we can manually redirect execution to call
1021 // js::HandleExecutionInterrupt. The memory is un-protected from the signal
1022 // handler after control flow is redirected.
1023 void
1024 js::RequestInterruptForAsmJSCode(JSRuntime *rt)
1025 {
1026 JS_ASSERT(rt->currentThreadOwnsInterruptLock());
1028 AsmJSActivation *activation = rt->mainThread.asmJSActivationStackFromAnyThread();
1029 if (!activation)
1030 return;
1032 activation->module().protectCode(rt);
1033 }
1035 #if defined(MOZ_ASAN) && defined(JS_STANDALONE)
1036 // Usually, this definition is found in mozglue (see mozglue/build/AsanOptions.cpp).
1037 // However, when doing standalone JS builds, mozglue is not used and we must ensure
1038 // that we still allow custom SIGSEGV handlers for asm.js and ion to work correctly.
1039 extern "C" MOZ_ASAN_BLACKLIST
1040 const char* __asan_default_options() {
1041 return "allow_user_segv_handler=1";
1042 }
1043 #endif