michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jscompartment.h" michael@0: michael@0: #include "jit/Bailouts.h" michael@0: #include "jit/IonFrames.h" michael@0: #include "jit/IonLinker.h" michael@0: #include "jit/IonSpewer.h" michael@0: #include "jit/JitCompartment.h" michael@0: #include "jit/mips/Bailouts-mips.h" michael@0: #include "jit/mips/BaselineHelpers-mips.h" michael@0: #ifdef JS_ION_PERF michael@0: # include "jit/PerfSpewer.h" michael@0: #endif michael@0: #include "jit/VMFunctions.h" michael@0: michael@0: #include "jit/ExecutionMode-inl.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: static_assert(sizeof(uintptr_t) == sizeof(uint32_t), "Not 64-bit clean."); michael@0: michael@0: struct EnterJITRegs michael@0: { michael@0: double f30; michael@0: double f28; michael@0: double f26; michael@0: double f24; michael@0: double f22; michael@0: double f20; michael@0: michael@0: // empty slot for alignment michael@0: uintptr_t align; michael@0: michael@0: // non-volatile registers. michael@0: uintptr_t ra; michael@0: uintptr_t s7; michael@0: uintptr_t s6; michael@0: uintptr_t s5; michael@0: uintptr_t s4; michael@0: uintptr_t s3; michael@0: uintptr_t s2; michael@0: uintptr_t s1; michael@0: uintptr_t s0; michael@0: }; michael@0: michael@0: struct EnterJITArgs michael@0: { michael@0: // First 4 argumet placeholders michael@0: void *jitcode; // <- sp points here when function is entered. michael@0: int maxArgc; michael@0: Value *maxArgv; michael@0: InterpreterFrame *fp; michael@0: michael@0: // Arguments on stack michael@0: CalleeToken calleeToken; michael@0: JSObject *scopeChain; michael@0: size_t numStackValues; michael@0: Value *vp; michael@0: }; michael@0: michael@0: static void michael@0: GenerateReturn(MacroAssembler &masm, int returnCode) michael@0: { michael@0: MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs)); michael@0: michael@0: // Restore non-volatile registers michael@0: masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s0)), s0); michael@0: masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s1)), s1); michael@0: masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s2)), s2); michael@0: masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s3)), s3); michael@0: masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s4)), s4); michael@0: masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s5)), s5); michael@0: masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s6)), s6); michael@0: masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s7)), s7); michael@0: masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, ra)), ra); michael@0: michael@0: // Restore non-volatile floating point registers michael@0: masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f20)), f20); michael@0: masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f22)), f22); michael@0: masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f24)), f24); michael@0: masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f26)), f26); michael@0: masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f28)), f28); michael@0: masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f30)), f30); michael@0: michael@0: masm.freeStack(sizeof(EnterJITRegs)); michael@0: michael@0: masm.branch(ra); michael@0: } michael@0: michael@0: static void michael@0: GeneratePrologue(MacroAssembler &masm) michael@0: { michael@0: // Save non-volatile registers. These must be saved by the trampoline, michael@0: // rather than the JIT'd code, because they are scanned by the conservative michael@0: // scanner. michael@0: masm.reserveStack(sizeof(EnterJITRegs)); michael@0: masm.storePtr(s0, Address(StackPointer, offsetof(EnterJITRegs, s0))); michael@0: masm.storePtr(s1, Address(StackPointer, offsetof(EnterJITRegs, s1))); michael@0: masm.storePtr(s2, Address(StackPointer, offsetof(EnterJITRegs, s2))); michael@0: masm.storePtr(s3, Address(StackPointer, offsetof(EnterJITRegs, s3))); michael@0: masm.storePtr(s4, Address(StackPointer, offsetof(EnterJITRegs, s4))); michael@0: masm.storePtr(s5, Address(StackPointer, offsetof(EnterJITRegs, s5))); michael@0: masm.storePtr(s6, Address(StackPointer, offsetof(EnterJITRegs, s6))); michael@0: masm.storePtr(s7, Address(StackPointer, offsetof(EnterJITRegs, s7))); michael@0: masm.storePtr(ra, Address(StackPointer, offsetof(EnterJITRegs, ra))); michael@0: michael@0: masm.as_sd(f20, StackPointer, offsetof(EnterJITRegs, f20)); michael@0: masm.as_sd(f22, StackPointer, offsetof(EnterJITRegs, f22)); michael@0: masm.as_sd(f24, StackPointer, offsetof(EnterJITRegs, f24)); michael@0: masm.as_sd(f26, StackPointer, offsetof(EnterJITRegs, f26)); michael@0: masm.as_sd(f28, StackPointer, offsetof(EnterJITRegs, f28)); michael@0: masm.as_sd(f30, StackPointer, offsetof(EnterJITRegs, f30)); michael@0: } michael@0: michael@0: michael@0: /* michael@0: * This method generates a trampoline for a c++ function with the following michael@0: * signature: michael@0: * void enter(void *code, int argc, Value *argv, InterpreterFrame *fp, michael@0: * CalleeToken calleeToken, JSObject *scopeChain, Value *vp) michael@0: * ...using standard EABI calling convention michael@0: */ michael@0: JitCode * michael@0: JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type) michael@0: { michael@0: const Register reg_code = a0; michael@0: const Register reg_argc = a1; michael@0: const Register reg_argv = a2; michael@0: const Register reg_frame = a3; michael@0: michael@0: MOZ_ASSERT(OsrFrameReg == reg_frame); michael@0: michael@0: MacroAssembler masm(cx); michael@0: GeneratePrologue(masm); michael@0: michael@0: const Address slotToken(sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, calleeToken)); michael@0: const Address slotVp(sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, vp)); michael@0: michael@0: // Save stack pointer into s4 michael@0: masm.movePtr(StackPointer, s4); michael@0: michael@0: // Load calleeToken into s2. michael@0: masm.loadPtr(slotToken, s2); michael@0: michael@0: // Save stack pointer as baseline frame. michael@0: if (type == EnterJitBaseline) michael@0: masm.movePtr(StackPointer, BaselineFrameReg); michael@0: michael@0: // Load the number of actual arguments into s3. michael@0: masm.loadPtr(slotVp, s3); michael@0: masm.unboxInt32(Address(s3, 0), s3); michael@0: michael@0: /*************************************************************** michael@0: Loop over argv vector, push arguments onto stack in reverse order michael@0: ***************************************************************/ michael@0: michael@0: masm.as_sll(s0, reg_argc, 3); // s0 = argc * 8 michael@0: masm.addPtr(reg_argv, s0); // s0 = argv + argc * 8 michael@0: michael@0: // Loop over arguments, copying them from an unknown buffer onto the Ion michael@0: // stack so they can be accessed from JIT'ed code. michael@0: Label header, footer; michael@0: // If there aren't any arguments, don't do anything michael@0: masm.ma_b(s0, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump); michael@0: { michael@0: masm.bind(&header); michael@0: michael@0: masm.subPtr(Imm32(2 * sizeof(uintptr_t)), s0); michael@0: masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer); michael@0: michael@0: ValueOperand value = ValueOperand(s6, s7); michael@0: masm.loadValue(Address(s0, 0), value); michael@0: masm.storeValue(value, Address(StackPointer, 0)); michael@0: michael@0: masm.ma_b(s0, reg_argv, &header, Assembler::Above, ShortJump); michael@0: } michael@0: masm.bind(&footer); michael@0: michael@0: masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer); michael@0: masm.storePtr(s3, Address(StackPointer, sizeof(uintptr_t))); // actual arguments michael@0: masm.storePtr(s2, Address(StackPointer, 0)); // callee token michael@0: michael@0: masm.subPtr(StackPointer, s4); michael@0: masm.makeFrameDescriptor(s4, JitFrame_Entry); michael@0: masm.push(s4); // descriptor michael@0: michael@0: CodeLabel returnLabel; michael@0: if (type == EnterJitBaseline) { michael@0: // Handle OSR. michael@0: GeneralRegisterSet regs(GeneralRegisterSet::All()); michael@0: regs.take(JSReturnOperand); michael@0: regs.take(OsrFrameReg); michael@0: regs.take(BaselineFrameReg); michael@0: regs.take(reg_code); michael@0: michael@0: const Address slotNumStackValues(BaselineFrameReg, sizeof(EnterJITRegs) + michael@0: offsetof(EnterJITArgs, numStackValues)); michael@0: const Address slotScopeChain(BaselineFrameReg, sizeof(EnterJITRegs) + michael@0: offsetof(EnterJITArgs, scopeChain)); michael@0: michael@0: Label notOsr; michael@0: masm.ma_b(OsrFrameReg, OsrFrameReg, ¬Osr, Assembler::Zero, ShortJump); michael@0: michael@0: Register scratch = regs.takeAny(); michael@0: michael@0: Register numStackValues = regs.takeAny(); michael@0: masm.load32(slotNumStackValues, numStackValues); michael@0: michael@0: // Push return address, previous frame pointer. michael@0: masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer); michael@0: masm.ma_li(scratch, returnLabel.dest()); michael@0: masm.storePtr(scratch, Address(StackPointer, sizeof(uintptr_t))); michael@0: masm.storePtr(BaselineFrameReg, Address(StackPointer, 0)); michael@0: michael@0: // Reserve frame. michael@0: Register framePtr = BaselineFrameReg; michael@0: masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer); michael@0: masm.movePtr(StackPointer, framePtr); michael@0: michael@0: // Reserve space for locals and stack values. michael@0: masm.ma_sll(scratch, numStackValues, Imm32(3)); michael@0: masm.subPtr(scratch, StackPointer); michael@0: michael@0: // Enter exit frame. michael@0: masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), scratch); michael@0: masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS); michael@0: michael@0: // Push frame descriptor and fake return address. michael@0: masm.reserveStack(2 * sizeof(uintptr_t)); michael@0: masm.storePtr(scratch, Address(StackPointer, sizeof(uintptr_t))); // Frame descriptor michael@0: masm.storePtr(zero, Address(StackPointer, 0)); // fake return address michael@0: michael@0: masm.enterFakeExitFrame(); michael@0: michael@0: masm.reserveStack(2 * sizeof(uintptr_t)); michael@0: masm.storePtr(framePtr, Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame michael@0: masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode michael@0: michael@0: masm.setupUnalignedABICall(3, scratch); michael@0: masm.passABIArg(BaselineFrameReg); // BaselineFrame michael@0: masm.passABIArg(OsrFrameReg); // InterpreterFrame michael@0: masm.passABIArg(numStackValues); michael@0: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, jit::InitBaselineFrameForOsr)); michael@0: michael@0: Register jitcode = regs.takeAny(); michael@0: masm.loadPtr(Address(StackPointer, 0), jitcode); michael@0: masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr); michael@0: masm.freeStack(2 * sizeof(uintptr_t)); michael@0: michael@0: MOZ_ASSERT(jitcode != ReturnReg); michael@0: michael@0: Label error; michael@0: masm.freeStack(IonExitFrameLayout::SizeWithFooter()); michael@0: masm.addPtr(Imm32(BaselineFrame::Size()), framePtr); michael@0: masm.branchIfFalseBool(ReturnReg, &error); michael@0: michael@0: masm.jump(jitcode); michael@0: michael@0: // OOM: load error value, discard return address and previous frame michael@0: // pointer and return. michael@0: masm.bind(&error); michael@0: masm.movePtr(framePtr, StackPointer); michael@0: masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer); michael@0: masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand); michael@0: masm.ma_li(scratch, returnLabel.dest()); michael@0: masm.jump(scratch); michael@0: michael@0: masm.bind(¬Osr); michael@0: // Load the scope chain in R1. michael@0: MOZ_ASSERT(R1.scratchReg() != reg_code); michael@0: masm.loadPtr(slotScopeChain, R1.scratchReg()); michael@0: } michael@0: michael@0: // Call the function with pushing return address to stack. michael@0: masm.ma_callIonHalfPush(reg_code); michael@0: michael@0: if (type == EnterJitBaseline) { michael@0: // Baseline OSR will return here. michael@0: masm.bind(returnLabel.src()); michael@0: if (!masm.addCodeLabel(returnLabel)) michael@0: return nullptr; michael@0: } michael@0: michael@0: // Pop arguments off the stack. michael@0: // s0 <- 8*argc (size of all arguments we pushed on the stack) michael@0: masm.pop(s0); michael@0: masm.rshiftPtr(Imm32(4), s0); michael@0: masm.addPtr(s0, StackPointer); michael@0: michael@0: // Store the returned value into the slotVp michael@0: masm.loadPtr(slotVp, s1); michael@0: masm.storeValue(JSReturnOperand, Address(s1, 0)); michael@0: michael@0: // Restore non-volatile registers and return. michael@0: GenerateReturn(masm, ShortJump); michael@0: michael@0: Linker linker(masm); michael@0: AutoFlushICache afc("GenerateEnterJIT"); michael@0: JitCode *code = linker.newCode(cx, JSC::OTHER_CODE); michael@0: michael@0: #ifdef JS_ION_PERF michael@0: writePerfSpewerJitCodeProfile(code, "EnterJIT"); michael@0: #endif michael@0: michael@0: return code; michael@0: } michael@0: michael@0: JitCode * michael@0: JitRuntime::generateInvalidator(JSContext *cx) michael@0: { michael@0: MacroAssembler masm(cx); michael@0: michael@0: // NOTE: Members ionScript_ and osiPointReturnAddress_ of michael@0: // InvalidationBailoutStack are already on the stack. michael@0: static const uint32_t STACK_DATA_SIZE = sizeof(InvalidationBailoutStack) - michael@0: 2 * sizeof(uintptr_t); michael@0: michael@0: // Stack has to be alligned here. If not, we will have to fix it. michael@0: masm.checkStackAlignment(); michael@0: michael@0: // Make room for data on stack. michael@0: masm.subPtr(Imm32(STACK_DATA_SIZE), StackPointer); michael@0: michael@0: // Save general purpose registers michael@0: for (uint32_t i = 0; i < Registers::Total; i++) { michael@0: Address address = Address(StackPointer, InvalidationBailoutStack::offsetOfRegs() + michael@0: i * sizeof(uintptr_t)); michael@0: masm.storePtr(Register::FromCode(i), address); michael@0: } michael@0: michael@0: // Save floating point registers michael@0: // We can use as_sd because stack is alligned. michael@0: for (uint32_t i = 0; i < FloatRegisters::Total; i++) michael@0: masm.as_sd(FloatRegister::FromCode(i), StackPointer, michael@0: InvalidationBailoutStack::offsetOfFpRegs() + i * sizeof(double)); michael@0: michael@0: // Pass pointer to InvalidationBailoutStack structure. michael@0: masm.movePtr(StackPointer, a0); michael@0: michael@0: // Reserve place for return value and BailoutInfo pointer michael@0: masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer); michael@0: // Pass pointer to return value. michael@0: masm.ma_addu(a1, StackPointer, Imm32(sizeof(uintptr_t))); michael@0: // Pass pointer to BailoutInfo michael@0: masm.movePtr(StackPointer, a2); michael@0: michael@0: masm.setupAlignedABICall(3); michael@0: masm.passABIArg(a0); michael@0: masm.passABIArg(a1); michael@0: masm.passABIArg(a2); michael@0: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, InvalidationBailout)); michael@0: michael@0: masm.loadPtr(Address(StackPointer, 0), a2); michael@0: masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), a1); michael@0: // Remove the return address, the IonScript, the register state michael@0: // (InvaliationBailoutStack) and the space that was allocated for the michael@0: // return value. michael@0: masm.addPtr(Imm32(sizeof(InvalidationBailoutStack) + 2 * sizeof(uintptr_t)), StackPointer); michael@0: // remove the space that this frame was using before the bailout michael@0: // (computed by InvalidationBailout) michael@0: masm.addPtr(a1, StackPointer); michael@0: michael@0: // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2. michael@0: JitCode *bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail(); michael@0: masm.branch(bailoutTail); michael@0: michael@0: Linker linker(masm); michael@0: AutoFlushICache afc("Invalidator"); michael@0: JitCode *code = linker.newCode(cx, JSC::OTHER_CODE); michael@0: IonSpew(IonSpew_Invalidate, " invalidation thunk created at %p", (void *) code->raw()); michael@0: michael@0: #ifdef JS_ION_PERF michael@0: writePerfSpewerJitCodeProfile(code, "Invalidator"); michael@0: #endif michael@0: michael@0: return code; michael@0: } michael@0: michael@0: JitCode * michael@0: JitRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void **returnAddrOut) michael@0: { michael@0: MacroAssembler masm(cx); michael@0: michael@0: // ArgumentsRectifierReg contains the |nargs| pushed onto the current michael@0: // frame. Including |this|, there are (|nargs| + 1) arguments to copy. michael@0: MOZ_ASSERT(ArgumentsRectifierReg == s3); michael@0: michael@0: Register numActArgsReg = t6; michael@0: Register calleeTokenReg = t7; michael@0: Register numArgsReg = t5; michael@0: michael@0: // Copy number of actual arguments into numActArgsReg michael@0: masm.loadPtr(Address(StackPointer, IonRectifierFrameLayout::offsetOfNumActualArgs()), michael@0: numActArgsReg); michael@0: michael@0: // Load the number of |undefined|s to push into t1. michael@0: masm.loadPtr(Address(StackPointer, IonRectifierFrameLayout::offsetOfCalleeToken()), michael@0: calleeTokenReg); michael@0: masm.load16ZeroExtend(Address(calleeTokenReg, JSFunction::offsetOfNargs()), numArgsReg); michael@0: michael@0: masm.ma_subu(t1, numArgsReg, s3); michael@0: michael@0: masm.moveValue(UndefinedValue(), ValueOperand(t3, t4)); michael@0: michael@0: masm.movePtr(StackPointer, t2); // Save %sp. michael@0: michael@0: // Push undefined. michael@0: { michael@0: Label undefLoopTop; michael@0: masm.bind(&undefLoopTop); michael@0: michael@0: masm.subPtr(Imm32(sizeof(Value)), StackPointer); michael@0: masm.storeValue(ValueOperand(t3, t4), Address(StackPointer, 0)); michael@0: masm.sub32(Imm32(1), t1); michael@0: michael@0: masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump); michael@0: } michael@0: michael@0: // Get the topmost argument. michael@0: masm.ma_sll(t0, s3, Imm32(3)); // t0 <- nargs * 8 michael@0: masm.addPtr(t0, t2); // t2 <- t2(saved sp) + nargs * 8 michael@0: masm.addPtr(Imm32(sizeof(IonRectifierFrameLayout)), t2); michael@0: michael@0: // Push arguments, |nargs| + 1 times (to include |this|). michael@0: { michael@0: Label copyLoopTop, initialSkip; michael@0: michael@0: masm.ma_b(&initialSkip, ShortJump); michael@0: michael@0: masm.bind(©LoopTop); michael@0: masm.subPtr(Imm32(sizeof(Value)), t2); michael@0: masm.sub32(Imm32(1), s3); michael@0: michael@0: masm.bind(&initialSkip); michael@0: michael@0: MOZ_ASSERT(sizeof(Value) == 2 * sizeof(uint32_t)); michael@0: // Read argument and push to stack. michael@0: masm.subPtr(Imm32(sizeof(Value)), StackPointer); michael@0: masm.load32(Address(t2, NUNBOX32_TYPE_OFFSET), t0); michael@0: masm.store32(t0, Address(StackPointer, NUNBOX32_TYPE_OFFSET)); michael@0: masm.load32(Address(t2, NUNBOX32_PAYLOAD_OFFSET), t0); michael@0: masm.store32(t0, Address(StackPointer, NUNBOX32_PAYLOAD_OFFSET)); michael@0: michael@0: masm.ma_b(s3, s3, ©LoopTop, Assembler::NonZero, ShortJump); michael@0: } michael@0: michael@0: // translate the framesize from values into bytes michael@0: masm.ma_addu(t0, numArgsReg, Imm32(1)); michael@0: masm.lshiftPtr(Imm32(3), t0); michael@0: michael@0: // Construct sizeDescriptor. michael@0: masm.makeFrameDescriptor(t0, JitFrame_Rectifier); michael@0: michael@0: // Construct IonJSFrameLayout. michael@0: masm.subPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer); michael@0: // Push actual arguments. michael@0: masm.storePtr(numActArgsReg, Address(StackPointer, 2 * sizeof(uintptr_t))); michael@0: // Push callee token. michael@0: masm.storePtr(calleeTokenReg, Address(StackPointer, sizeof(uintptr_t))); michael@0: // Push frame descriptor. michael@0: masm.storePtr(t0, Address(StackPointer, 0)); michael@0: michael@0: // Call the target function. michael@0: // Note that this code assumes the function is JITted. michael@0: masm.loadPtr(Address(calleeTokenReg, JSFunction::offsetOfNativeOrScript()), t1); michael@0: masm.loadBaselineOrIonRaw(t1, t1, mode, nullptr); michael@0: masm.ma_callIonHalfPush(t1); michael@0: michael@0: uint32_t returnOffset = masm.currentOffset(); michael@0: michael@0: // arg1 michael@0: // ... michael@0: // argN michael@0: // num actual args michael@0: // callee token michael@0: // sizeDescriptor <- sp now michael@0: // return address michael@0: michael@0: // Remove the rectifier frame. michael@0: // t0 <- descriptor with FrameType. michael@0: masm.loadPtr(Address(StackPointer, 0), t0); michael@0: masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), t0); // t0 <- descriptor. michael@0: michael@0: // Discard descriptor, calleeToken and number of actual arguments. michael@0: masm.addPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer); michael@0: michael@0: // arg1 michael@0: // ... michael@0: // argN <- sp now; t0 <- frame descriptor michael@0: // num actual args michael@0: // callee token michael@0: // sizeDescriptor michael@0: // return address michael@0: michael@0: // Discard pushed arguments. michael@0: masm.addPtr(t0, StackPointer); michael@0: michael@0: masm.ret(); michael@0: Linker linker(masm); michael@0: AutoFlushICache afc("ArgumentsRectifier"); michael@0: JitCode *code = linker.newCode(cx, JSC::OTHER_CODE); michael@0: michael@0: CodeOffsetLabel returnLabel(returnOffset); michael@0: returnLabel.fixup(&masm); michael@0: if (returnAddrOut) michael@0: *returnAddrOut = (void *) (code->raw() + returnLabel.offset()); michael@0: michael@0: #ifdef JS_ION_PERF michael@0: writePerfSpewerJitCodeProfile(code, "ArgumentsRectifier"); michael@0: #endif michael@0: michael@0: return code; michael@0: } michael@0: michael@0: /* There are two different stack layouts when doing bailout. They are michael@0: * represented via class BailoutStack. michael@0: * michael@0: * - First case is when bailout is done trough bailout table. In this case michael@0: * table offset is stored in $ra (look at JitRuntime::generateBailoutTable()) michael@0: * and thunk code should save it on stack. In this case frameClassId_ cannot michael@0: * be NO_FRAME_SIZE_CLASS_ID. Members snapshotOffset_ and padding_ are not on michael@0: * the stack. michael@0: * michael@0: * - Other case is when bailout is done via out of line code (lazy bailout). michael@0: * In this case frame size is stored in $ra (look at michael@0: * CodeGeneratorMIPS::generateOutOfLineCode()) and thunk code should save it michael@0: * on stack. Other difference is that members snapshotOffset_ and padding_ are michael@0: * pushed to the stack by CodeGeneratorMIPS::visitOutOfLineBailout(). Field michael@0: * frameClassId_ is forced to be NO_FRAME_SIZE_CLASS_ID michael@0: * (See: JitRuntime::generateBailoutHandler). michael@0: */ michael@0: static void michael@0: GenerateBailoutThunk(JSContext *cx, MacroAssembler &masm, uint32_t frameClass) michael@0: { michael@0: // NOTE: Members snapshotOffset_ and padding_ of BailoutStack michael@0: // are not stored in this function. michael@0: static const uint32_t bailoutDataSize = sizeof(BailoutStack) - 2 * sizeof(uintptr_t); michael@0: static const uint32_t bailoutInfoOutParamSize = 2 * sizeof(uintptr_t); michael@0: michael@0: // Make sure that alignment is proper. michael@0: masm.checkStackAlignment(); michael@0: michael@0: // Make room for data. michael@0: masm.subPtr(Imm32(bailoutDataSize), StackPointer); michael@0: michael@0: // Save general purpose registers. michael@0: for (uint32_t i = 0; i < Registers::Total; i++) { michael@0: uint32_t off = BailoutStack::offsetOfRegs() + i * sizeof(uintptr_t); michael@0: masm.storePtr(Register::FromCode(i), Address(StackPointer, off)); michael@0: } michael@0: michael@0: // Save floating point registers michael@0: // We can use as_sd because stack is alligned. michael@0: for (uintptr_t i = 0; i < FloatRegisters::Total; i++) michael@0: masm.as_sd(FloatRegister::FromCode(i), StackPointer, michael@0: BailoutStack::offsetOfFpRegs() + i * sizeof(double)); michael@0: michael@0: // Store the frameSize_ or tableOffset_ stored in ra michael@0: // See: JitRuntime::generateBailoutTable() michael@0: // See: CodeGeneratorMIPS::generateOutOfLineCode() michael@0: masm.storePtr(ra, Address(StackPointer, BailoutStack::offsetOfFrameSize())); michael@0: michael@0: // Put frame class to stack michael@0: masm.storePtr(ImmWord(frameClass), Address(StackPointer, BailoutStack::offsetOfFrameClass())); michael@0: michael@0: // Put pointer to BailoutStack as first argument to the Bailout() michael@0: masm.movePtr(StackPointer, a0); michael@0: // Put pointer to BailoutInfo michael@0: masm.subPtr(Imm32(bailoutInfoOutParamSize), StackPointer); michael@0: masm.storePtr(ImmPtr(nullptr), Address(StackPointer, 0)); michael@0: masm.movePtr(StackPointer, a1); michael@0: michael@0: masm.setupAlignedABICall(2); michael@0: masm.passABIArg(a0); michael@0: masm.passABIArg(a1); michael@0: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, Bailout)); michael@0: michael@0: // Get BailoutInfo pointer michael@0: masm.loadPtr(Address(StackPointer, 0), a2); michael@0: michael@0: // Remove both the bailout frame and the topmost Ion frame's stack. michael@0: if (frameClass == NO_FRAME_SIZE_CLASS_ID) { michael@0: // Load frameSize from stack michael@0: masm.loadPtr(Address(StackPointer, michael@0: bailoutInfoOutParamSize + BailoutStack::offsetOfFrameSize()), a1); michael@0: michael@0: // Remove complete BailoutStack class and data after it michael@0: masm.addPtr(Imm32(sizeof(BailoutStack) + bailoutInfoOutParamSize), StackPointer); michael@0: // Remove frame size srom stack michael@0: masm.addPtr(a1, StackPointer); michael@0: } else { michael@0: uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize(); michael@0: // Remove the data this fuction added and frame size. michael@0: masm.addPtr(Imm32(bailoutDataSize + bailoutInfoOutParamSize + frameSize), StackPointer); michael@0: } michael@0: michael@0: // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2. michael@0: JitCode *bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail(); michael@0: masm.branch(bailoutTail); michael@0: } michael@0: michael@0: JitCode * michael@0: JitRuntime::generateBailoutTable(JSContext *cx, uint32_t frameClass) michael@0: { michael@0: MacroAssembler masm(cx); michael@0: michael@0: Label bailout; michael@0: for (size_t i = 0; i < BAILOUT_TABLE_SIZE; i++) { michael@0: // Calculate offset to the end of table michael@0: int32_t offset = (BAILOUT_TABLE_SIZE - i) * BAILOUT_TABLE_ENTRY_SIZE; michael@0: michael@0: // We use the 'ra' as table offset later in GenerateBailoutThunk michael@0: masm.as_bal(BOffImm16(offset)); michael@0: masm.nop(); michael@0: } michael@0: masm.bind(&bailout); michael@0: michael@0: GenerateBailoutThunk(cx, masm, frameClass); michael@0: michael@0: Linker linker(masm); michael@0: AutoFlushICache afc("BailoutTable"); michael@0: JitCode *code = linker.newCode(cx, JSC::OTHER_CODE); michael@0: michael@0: #ifdef JS_ION_PERF michael@0: writePerfSpewerJitCodeProfile(code, "BailoutTable"); michael@0: #endif michael@0: michael@0: return code; michael@0: } michael@0: michael@0: JitCode * michael@0: JitRuntime::generateBailoutHandler(JSContext *cx) michael@0: { michael@0: MacroAssembler masm(cx); michael@0: GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID); michael@0: michael@0: Linker linker(masm); michael@0: AutoFlushICache afc("BailoutHandler"); michael@0: JitCode *code = linker.newCode(cx, JSC::OTHER_CODE); michael@0: michael@0: #ifdef JS_ION_PERF michael@0: writePerfSpewerJitCodeProfile(code, "BailoutHandler"); michael@0: #endif michael@0: michael@0: return code; michael@0: } michael@0: michael@0: JitCode * michael@0: JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f) michael@0: { michael@0: MOZ_ASSERT(functionWrappers_); michael@0: MOZ_ASSERT(functionWrappers_->initialized()); michael@0: VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f); michael@0: if (p) michael@0: return p->value(); michael@0: michael@0: MacroAssembler masm(cx); michael@0: michael@0: GeneralRegisterSet regs = GeneralRegisterSet(Register::Codes::WrapperMask); michael@0: michael@0: static_assert((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0, michael@0: "Wrapper register set should be a superset of Volatile register set."); michael@0: michael@0: // The context is the first argument; a0 is the first argument register. michael@0: Register cxreg = a0; michael@0: regs.take(cxreg); michael@0: michael@0: // We're aligned to an exit frame, so link it up. michael@0: masm.enterExitFrameAndLoadContext(&f, cxreg, regs.getAny(), f.executionMode); michael@0: michael@0: // Save the base of the argument set stored on the stack. michael@0: Register argsBase = InvalidReg; michael@0: if (f.explicitArgs) { michael@0: argsBase = t1; // Use temporary register. michael@0: regs.take(argsBase); michael@0: masm.ma_addu(argsBase, StackPointer, Imm32(IonExitFrameLayout::SizeWithFooter())); michael@0: } michael@0: michael@0: // Reserve space for the outparameter. michael@0: Register outReg = InvalidReg; michael@0: switch (f.outParam) { michael@0: case Type_Value: michael@0: outReg = t0; // Use temporary register. michael@0: regs.take(outReg); michael@0: // Value outparam has to be 8 byte aligned because the called michael@0: // function can use sdc1 or ldc1 instructions to access it. michael@0: masm.reserveStack((StackAlignment - sizeof(uintptr_t)) + sizeof(Value)); michael@0: masm.alignPointerUp(StackPointer, outReg, StackAlignment); michael@0: break; michael@0: michael@0: case Type_Handle: michael@0: outReg = t0; michael@0: regs.take(outReg); michael@0: if (f.outParamRootType == VMFunction::RootValue) { michael@0: // Value outparam has to be 8 byte aligned because the called michael@0: // function can use sdc1 or ldc1 instructions to access it. michael@0: masm.reserveStack((StackAlignment - sizeof(uintptr_t)) + sizeof(Value)); michael@0: masm.alignPointerUp(StackPointer, outReg, StackAlignment); michael@0: masm.storeValue(UndefinedValue(), Address(outReg, 0)); michael@0: } michael@0: else { michael@0: masm.PushEmptyRooted(f.outParamRootType); michael@0: masm.movePtr(StackPointer, outReg); michael@0: } michael@0: break; michael@0: michael@0: case Type_Bool: michael@0: case Type_Int32: michael@0: MOZ_ASSERT(sizeof(uintptr_t) == sizeof(uint32_t)); michael@0: case Type_Pointer: michael@0: outReg = t0; michael@0: regs.take(outReg); michael@0: masm.reserveStack(sizeof(uintptr_t)); michael@0: masm.movePtr(StackPointer, outReg); michael@0: break; michael@0: michael@0: case Type_Double: michael@0: outReg = t0; michael@0: regs.take(outReg); michael@0: // Double outparam has to be 8 byte aligned because the called michael@0: // function can use sdc1 or ldc1 instructions to access it. michael@0: masm.reserveStack((StackAlignment - sizeof(uintptr_t)) + sizeof(double)); michael@0: masm.alignPointerUp(StackPointer, outReg, StackAlignment); michael@0: break; michael@0: michael@0: default: michael@0: MOZ_ASSERT(f.outParam == Type_Void); michael@0: break; michael@0: } michael@0: michael@0: masm.setupUnalignedABICall(f.argc(), regs.getAny()); michael@0: masm.passABIArg(cxreg); michael@0: michael@0: size_t argDisp = 0; michael@0: michael@0: // Copy any arguments. michael@0: for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) { michael@0: MoveOperand from; michael@0: switch (f.argProperties(explicitArg)) { michael@0: case VMFunction::WordByValue: michael@0: masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL); michael@0: argDisp += sizeof(uint32_t); michael@0: break; michael@0: case VMFunction::DoubleByValue: michael@0: // Values should be passed by reference, not by value, so we michael@0: // assert that the argument is a double-precision float. michael@0: MOZ_ASSERT(f.argPassedInFloatReg(explicitArg)); michael@0: masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE); michael@0: argDisp += sizeof(double); michael@0: break; michael@0: case VMFunction::WordByRef: michael@0: masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS), michael@0: MoveOp::GENERAL); michael@0: argDisp += sizeof(uint32_t); michael@0: break; michael@0: case VMFunction::DoubleByRef: michael@0: masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS), michael@0: MoveOp::GENERAL); michael@0: argDisp += sizeof(double); michael@0: break; michael@0: } michael@0: } michael@0: michael@0: // Copy the implicit outparam, if any. michael@0: if (outReg != InvalidReg) michael@0: masm.passABIArg(outReg); michael@0: michael@0: masm.callWithABI(f.wrapped); michael@0: michael@0: // Test for failure. michael@0: switch (f.failType()) { michael@0: case Type_Object: michael@0: masm.branchTestPtr(Assembler::Zero, v0, v0, masm.failureLabel(f.executionMode)); michael@0: break; michael@0: case Type_Bool: michael@0: // Called functions return bools, which are 0/false and non-zero/true michael@0: masm.branchIfFalseBool(v0, masm.failureLabel(f.executionMode)); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unknown failure kind"); michael@0: } michael@0: michael@0: // Load the outparam and free any allocated stack. michael@0: switch (f.outParam) { michael@0: case Type_Handle: michael@0: if (f.outParamRootType == VMFunction::RootValue) { michael@0: masm.alignPointerUp(StackPointer, SecondScratchReg, StackAlignment); michael@0: masm.loadValue(Address(SecondScratchReg, 0), JSReturnOperand); michael@0: masm.freeStack((StackAlignment - sizeof(uintptr_t)) + sizeof(Value)); michael@0: } michael@0: else { michael@0: masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand); michael@0: } michael@0: break; michael@0: michael@0: case Type_Value: michael@0: masm.alignPointerUp(StackPointer, SecondScratchReg, StackAlignment); michael@0: masm.loadValue(Address(SecondScratchReg, 0), JSReturnOperand); michael@0: masm.freeStack((StackAlignment - sizeof(uintptr_t)) + sizeof(Value)); michael@0: break; michael@0: michael@0: case Type_Int32: michael@0: MOZ_ASSERT(sizeof(uintptr_t) == sizeof(uint32_t)); michael@0: case Type_Pointer: michael@0: masm.load32(Address(StackPointer, 0), ReturnReg); michael@0: masm.freeStack(sizeof(uintptr_t)); michael@0: break; michael@0: michael@0: case Type_Bool: michael@0: masm.load8ZeroExtend(Address(StackPointer, 0), ReturnReg); michael@0: masm.freeStack(sizeof(uintptr_t)); michael@0: break; michael@0: michael@0: case Type_Double: michael@0: if (cx->runtime()->jitSupportsFloatingPoint) { michael@0: masm.alignPointerUp(StackPointer, SecondScratchReg, StackAlignment); michael@0: // Address is aligned, so we can use as_ld. michael@0: masm.as_ld(ReturnFloatReg, SecondScratchReg, 0); michael@0: } else { michael@0: masm.assumeUnreachable("Unable to load into float reg, with no FP support."); michael@0: } michael@0: masm.freeStack((StackAlignment - sizeof(uintptr_t)) + sizeof(double)); michael@0: break; michael@0: michael@0: default: michael@0: MOZ_ASSERT(f.outParam == Type_Void); michael@0: break; michael@0: } michael@0: masm.leaveExitFrame(); michael@0: masm.retn(Imm32(sizeof(IonExitFrameLayout) + michael@0: f.explicitStackSlots() * sizeof(uintptr_t) + michael@0: f.extraValuesToPop * sizeof(Value))); michael@0: michael@0: Linker linker(masm); michael@0: AutoFlushICache afc("VMWrapper"); michael@0: JitCode *wrapper = linker.newCode(cx, JSC::OTHER_CODE); michael@0: if (!wrapper) michael@0: return nullptr; michael@0: michael@0: // linker.newCode may trigger a GC and sweep functionWrappers_ so we have michael@0: // to use relookupOrAdd instead of add. michael@0: if (!functionWrappers_->relookupOrAdd(p, &f, wrapper)) michael@0: return nullptr; michael@0: michael@0: #ifdef JS_ION_PERF michael@0: writePerfSpewerJitCodeProfile(wrapper, "VMWrapper"); michael@0: #endif michael@0: michael@0: return wrapper; michael@0: } michael@0: michael@0: JitCode * michael@0: JitRuntime::generatePreBarrier(JSContext *cx, MIRType type) michael@0: { michael@0: MacroAssembler masm(cx); michael@0: michael@0: RegisterSet save; michael@0: if (cx->runtime()->jitSupportsFloatingPoint) { michael@0: save = RegisterSet(GeneralRegisterSet(Registers::VolatileMask), michael@0: FloatRegisterSet(FloatRegisters::VolatileMask)); michael@0: } else { michael@0: save = RegisterSet(GeneralRegisterSet(Registers::VolatileMask), michael@0: FloatRegisterSet()); michael@0: } michael@0: masm.PushRegsInMask(save); michael@0: michael@0: MOZ_ASSERT(PreBarrierReg == a1); michael@0: masm.movePtr(ImmPtr(cx->runtime()), a0); michael@0: michael@0: masm.setupUnalignedABICall(2, a2); michael@0: masm.passABIArg(a0); michael@0: masm.passABIArg(a1); michael@0: michael@0: if (type == MIRType_Value) { michael@0: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, MarkValueFromIon)); michael@0: } else { michael@0: MOZ_ASSERT(type == MIRType_Shape); michael@0: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, MarkShapeFromIon)); michael@0: } michael@0: michael@0: masm.PopRegsInMask(save); michael@0: masm.ret(); michael@0: michael@0: Linker linker(masm); michael@0: AutoFlushICache afc("PreBarrier"); michael@0: JitCode *code = linker.newCode(cx, JSC::OTHER_CODE); michael@0: michael@0: #ifdef JS_ION_PERF michael@0: writePerfSpewerJitCodeProfile(code, "PreBarrier"); michael@0: #endif michael@0: michael@0: return code; michael@0: } michael@0: michael@0: typedef bool (*HandleDebugTrapFn)(JSContext *, BaselineFrame *, uint8_t *, bool *); michael@0: static const VMFunction HandleDebugTrapInfo = FunctionInfo(HandleDebugTrap); michael@0: michael@0: JitCode * michael@0: JitRuntime::generateDebugTrapHandler(JSContext *cx) michael@0: { michael@0: MacroAssembler masm(cx); michael@0: michael@0: Register scratch1 = t0; michael@0: Register scratch2 = t1; michael@0: michael@0: // Load BaselineFrame pointer in scratch1. michael@0: masm.movePtr(s5, scratch1); michael@0: masm.subPtr(Imm32(BaselineFrame::Size()), scratch1); michael@0: michael@0: // Enter a stub frame and call the HandleDebugTrap VM function. Ensure michael@0: // the stub frame has a nullptr ICStub pointer, since this pointer is michael@0: // marked during GC. michael@0: masm.movePtr(ImmPtr(nullptr), BaselineStubReg); michael@0: EmitEnterStubFrame(masm, scratch2); michael@0: michael@0: JitCode *code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo); michael@0: if (!code) michael@0: return nullptr; michael@0: michael@0: masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer); michael@0: masm.storePtr(ra, Address(StackPointer, sizeof(uintptr_t))); michael@0: masm.storePtr(scratch1, Address(StackPointer, 0)); michael@0: michael@0: EmitCallVM(code, masm); michael@0: michael@0: EmitLeaveStubFrame(masm); michael@0: michael@0: // If the stub returns |true|, we have to perform a forced return michael@0: // (return from the JS frame). If the stub returns |false|, just return michael@0: // from the trap stub so that execution continues at the current pc. michael@0: Label forcedReturn; michael@0: masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn); michael@0: michael@0: // ra was restored by EmitLeaveStubFrame michael@0: masm.branch(ra); michael@0: michael@0: masm.bind(&forcedReturn); michael@0: masm.loadValue(Address(s5, BaselineFrame::reverseOffsetOfReturnValue()), michael@0: JSReturnOperand); michael@0: masm.movePtr(s5, StackPointer); michael@0: masm.pop(s5); michael@0: masm.ret(); michael@0: michael@0: Linker linker(masm); michael@0: AutoFlushICache afc("DebugTrapHandler"); michael@0: JitCode *codeDbg = linker.newCode(cx, JSC::OTHER_CODE); michael@0: michael@0: #ifdef JS_ION_PERF michael@0: writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler"); michael@0: #endif michael@0: michael@0: return codeDbg; michael@0: } michael@0: michael@0: michael@0: JitCode * michael@0: JitRuntime::generateExceptionTailStub(JSContext *cx) michael@0: { michael@0: MacroAssembler masm; michael@0: michael@0: masm.handleFailureWithHandlerTail(); michael@0: michael@0: Linker linker(masm); michael@0: AutoFlushICache afc("ExceptionTailStub"); michael@0: JitCode *code = linker.newCode(cx, JSC::OTHER_CODE); michael@0: michael@0: #ifdef JS_ION_PERF michael@0: writePerfSpewerJitCodeProfile(code, "ExceptionTailStub"); michael@0: #endif michael@0: michael@0: return code; michael@0: } michael@0: michael@0: JitCode * michael@0: JitRuntime::generateBailoutTailStub(JSContext *cx) michael@0: { michael@0: MacroAssembler masm; michael@0: michael@0: masm.generateBailoutTail(a1, a2); michael@0: michael@0: Linker linker(masm); michael@0: AutoFlushICache afc("BailoutTailStub"); michael@0: JitCode *code = linker.newCode(cx, JSC::OTHER_CODE); michael@0: michael@0: #ifdef JS_ION_PERF michael@0: writePerfSpewerJitCodeProfile(code, "BailoutTailStub"); michael@0: #endif michael@0: michael@0: return code; michael@0: } michael@0: