michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/shared/CodeGenerator-shared-inl.h" michael@0: michael@0: #include "mozilla/DebugOnly.h" michael@0: michael@0: #include "jit/IonCaches.h" michael@0: #include "jit/IonMacroAssembler.h" michael@0: #include "jit/IonSpewer.h" michael@0: #include "jit/MIR.h" michael@0: #include "jit/MIRGenerator.h" michael@0: #include "jit/ParallelFunctions.h" michael@0: #include "vm/TraceLogging.h" michael@0: michael@0: #include "jit/IonFrames-inl.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: using mozilla::DebugOnly; michael@0: michael@0: namespace js { michael@0: namespace jit { michael@0: michael@0: MacroAssembler & michael@0: CodeGeneratorShared::ensureMasm(MacroAssembler *masmArg) michael@0: { michael@0: if (masmArg) michael@0: return *masmArg; michael@0: maybeMasm_.construct(); michael@0: return maybeMasm_.ref(); michael@0: } michael@0: michael@0: CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masmArg) michael@0: : oolIns(nullptr), michael@0: maybeMasm_(), michael@0: masm(ensureMasm(masmArg)), michael@0: gen(gen), michael@0: graph(*graph), michael@0: current(nullptr), michael@0: snapshots_(), michael@0: recovers_(), michael@0: deoptTable_(nullptr), michael@0: #ifdef DEBUG michael@0: pushedArgs_(0), michael@0: #endif michael@0: lastOsiPointOffset_(0), michael@0: sps_(&GetIonContext()->runtime->spsProfiler(), &lastPC_), michael@0: osrEntryOffset_(0), michael@0: skipArgCheckEntryOffset_(0), michael@0: frameDepth_(graph->paddedLocalSlotsSize() + graph->argumentsSize()) michael@0: { michael@0: if (!gen->compilingAsmJS()) michael@0: masm.setInstrumentation(&sps_); michael@0: michael@0: // Since asm.js uses the system ABI which does not necessarily use a michael@0: // regular array where all slots are sizeof(Value), it maintains the max michael@0: // argument stack depth separately. michael@0: if (gen->compilingAsmJS()) { michael@0: JS_ASSERT(graph->argumentSlotCount() == 0); michael@0: frameDepth_ += gen->maxAsmJSStackArgBytes(); michael@0: michael@0: // An MAsmJSCall does not align the stack pointer at calls sites but instead michael@0: // relies on the a priori stack adjustment (in the prologue) on platforms michael@0: // (like x64) which require the stack to be aligned. michael@0: #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) michael@0: bool forceAlign = true; michael@0: #else michael@0: bool forceAlign = false; michael@0: #endif michael@0: if (gen->performsAsmJSCall() || forceAlign) { michael@0: unsigned alignmentAtCall = AlignmentMidPrologue + frameDepth_; michael@0: if (unsigned rem = alignmentAtCall % StackAlignment) michael@0: frameDepth_ += StackAlignment - rem; michael@0: } michael@0: michael@0: // FrameSizeClass is only used for bailing, which cannot happen in michael@0: // asm.js code. michael@0: frameClass_ = FrameSizeClass::None(); michael@0: } else { michael@0: frameClass_ = FrameSizeClass::FromDepth(frameDepth_); michael@0: } michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::generateOutOfLineCode() michael@0: { michael@0: for (size_t i = 0; i < outOfLineCode_.length(); i++) { michael@0: if (!gen->alloc().ensureBallast()) michael@0: return false; michael@0: masm.setFramePushed(outOfLineCode_[i]->framePushed()); michael@0: lastPC_ = outOfLineCode_[i]->pc(); michael@0: if (!sps_.prepareForOOL()) michael@0: return false; michael@0: sps_.setPushed(outOfLineCode_[i]->script()); michael@0: outOfLineCode_[i]->bind(&masm); michael@0: michael@0: oolIns = outOfLineCode_[i]; michael@0: if (!outOfLineCode_[i]->generate(this)) michael@0: return false; michael@0: sps_.finishOOL(); michael@0: } michael@0: oolIns = nullptr; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::addOutOfLineCode(OutOfLineCode *code) michael@0: { michael@0: code->setFramePushed(masm.framePushed()); michael@0: // If an OOL instruction adds another OOL instruction, then use the original michael@0: // instruction's script/pc instead of the basic block's that we're on michael@0: // because they're probably not relevant any more. michael@0: if (oolIns) michael@0: code->setSource(oolIns->script(), oolIns->pc()); michael@0: else michael@0: code->setSource(current ? current->mir()->info().script() : nullptr, lastPC_); michael@0: JS_ASSERT_IF(code->script(), code->script()->containsPC(code->pc())); michael@0: return outOfLineCode_.append(code); michael@0: } michael@0: michael@0: // see OffsetOfFrameSlot michael@0: static inline int32_t michael@0: ToStackIndex(LAllocation *a) michael@0: { michael@0: if (a->isStackSlot()) { michael@0: JS_ASSERT(a->toStackSlot()->slot() >= 1); michael@0: return a->toStackSlot()->slot(); michael@0: } michael@0: JS_ASSERT(-int32_t(sizeof(IonJSFrameLayout)) <= a->toArgument()->index()); michael@0: return -int32_t(sizeof(IonJSFrameLayout) + a->toArgument()->index()); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::encodeAllocations(LSnapshot *snapshot, MResumePoint *resumePoint, michael@0: uint32_t *startIndex) michael@0: { michael@0: IonSpew(IonSpew_Codegen, "Encoding %u of resume point %p's operands starting from %u", michael@0: resumePoint->numOperands(), (void *) resumePoint, *startIndex); michael@0: for (uint32_t allocno = 0, e = resumePoint->numOperands(); allocno < e; allocno++) { michael@0: uint32_t i = allocno + *startIndex; michael@0: MDefinition *mir = resumePoint->getOperand(allocno); michael@0: michael@0: if (mir->isBox()) michael@0: mir = mir->toBox()->getOperand(0); michael@0: michael@0: MIRType type = mir->isUnused() michael@0: ? MIRType_MagicOptimizedOut michael@0: : mir->type(); michael@0: michael@0: RValueAllocation alloc; michael@0: michael@0: switch (type) { michael@0: case MIRType_Undefined: michael@0: alloc = RValueAllocation::Undefined(); michael@0: break; michael@0: case MIRType_Null: michael@0: alloc = RValueAllocation::Null(); michael@0: break; michael@0: case MIRType_Int32: michael@0: case MIRType_String: michael@0: case MIRType_Object: michael@0: case MIRType_Boolean: michael@0: case MIRType_Double: michael@0: case MIRType_Float32: michael@0: { michael@0: LAllocation *payload = snapshot->payloadOfSlot(i); michael@0: JSValueType valueType = ValueTypeFromMIRType(type); michael@0: if (payload->isMemory()) { michael@0: if (type == MIRType_Float32) michael@0: alloc = RValueAllocation::Float32(ToStackIndex(payload)); michael@0: else michael@0: alloc = RValueAllocation::Typed(valueType, ToStackIndex(payload)); michael@0: } else if (payload->isGeneralReg()) { michael@0: alloc = RValueAllocation::Typed(valueType, ToRegister(payload)); michael@0: } else if (payload->isFloatReg()) { michael@0: FloatRegister reg = ToFloatRegister(payload); michael@0: if (type == MIRType_Float32) michael@0: alloc = RValueAllocation::Float32(reg); michael@0: else michael@0: alloc = RValueAllocation::Double(reg); michael@0: } else { michael@0: MConstant *constant = mir->toConstant(); michael@0: uint32_t index; michael@0: if (!graph.addConstantToPool(constant->value(), &index)) michael@0: return false; michael@0: alloc = RValueAllocation::ConstantPool(index); michael@0: } michael@0: break; michael@0: } michael@0: case MIRType_MagicOptimizedArguments: michael@0: case MIRType_MagicOptimizedOut: michael@0: { michael@0: uint32_t index; michael@0: JSWhyMagic why = (type == MIRType_MagicOptimizedArguments michael@0: ? JS_OPTIMIZED_ARGUMENTS michael@0: : JS_OPTIMIZED_OUT); michael@0: Value v = MagicValue(why); michael@0: if (!graph.addConstantToPool(v, &index)) michael@0: return false; michael@0: alloc = RValueAllocation::ConstantPool(index); michael@0: break; michael@0: } michael@0: default: michael@0: { michael@0: JS_ASSERT(mir->type() == MIRType_Value); michael@0: LAllocation *payload = snapshot->payloadOfSlot(i); michael@0: #ifdef JS_NUNBOX32 michael@0: LAllocation *type = snapshot->typeOfSlot(i); michael@0: if (type->isRegister()) { michael@0: if (payload->isRegister()) michael@0: alloc = RValueAllocation::Untyped(ToRegister(type), ToRegister(payload)); michael@0: else michael@0: alloc = RValueAllocation::Untyped(ToRegister(type), ToStackIndex(payload)); michael@0: } else { michael@0: if (payload->isRegister()) michael@0: alloc = RValueAllocation::Untyped(ToStackIndex(type), ToRegister(payload)); michael@0: else michael@0: alloc = RValueAllocation::Untyped(ToStackIndex(type), ToStackIndex(payload)); michael@0: } michael@0: #elif JS_PUNBOX64 michael@0: if (payload->isRegister()) michael@0: alloc = RValueAllocation::Untyped(ToRegister(payload)); michael@0: else michael@0: alloc = RValueAllocation::Untyped(ToStackIndex(payload)); michael@0: #endif michael@0: break; michael@0: } michael@0: } michael@0: michael@0: snapshots_.add(alloc); michael@0: } michael@0: michael@0: *startIndex += resumePoint->numOperands(); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::encode(LRecoverInfo *recover) michael@0: { michael@0: if (recover->recoverOffset() != INVALID_RECOVER_OFFSET) michael@0: return true; michael@0: michael@0: uint32_t frameCount = recover->mir()->frameCount(); michael@0: IonSpew(IonSpew_Snapshots, "Encoding LRecoverInfo %p (frameCount %u)", michael@0: (void *)recover, frameCount); michael@0: michael@0: MResumePoint::Mode mode = recover->mir()->mode(); michael@0: JS_ASSERT(mode != MResumePoint::Outer); michael@0: bool resumeAfter = (mode == MResumePoint::ResumeAfter); michael@0: michael@0: RecoverOffset offset = recovers_.startRecover(frameCount, resumeAfter); michael@0: michael@0: for (MResumePoint **it = recover->begin(), **end = recover->end(); michael@0: it != end; michael@0: ++it) michael@0: { michael@0: if (!recovers_.writeFrame(*it)) michael@0: return false; michael@0: } michael@0: michael@0: recovers_.endRecover(); michael@0: recover->setRecoverOffset(offset); michael@0: return !recovers_.oom(); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::encode(LSnapshot *snapshot) michael@0: { michael@0: if (snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET) michael@0: return true; michael@0: michael@0: LRecoverInfo *recoverInfo = snapshot->recoverInfo(); michael@0: if (!encode(recoverInfo)) michael@0: return false; michael@0: michael@0: RecoverOffset recoverOffset = recoverInfo->recoverOffset(); michael@0: MOZ_ASSERT(recoverOffset != INVALID_RECOVER_OFFSET); michael@0: michael@0: IonSpew(IonSpew_Snapshots, "Encoding LSnapshot %p (LRecover %p)", michael@0: (void *)snapshot, (void*) recoverInfo); michael@0: michael@0: SnapshotOffset offset = snapshots_.startSnapshot(recoverOffset, snapshot->bailoutKind()); michael@0: michael@0: #ifdef TRACK_SNAPSHOTS michael@0: uint32_t pcOpcode = 0; michael@0: uint32_t lirOpcode = 0; michael@0: uint32_t lirId = 0; michael@0: uint32_t mirOpcode = 0; michael@0: uint32_t mirId = 0; michael@0: michael@0: if (LInstruction *ins = instruction()) { michael@0: lirOpcode = ins->op(); michael@0: lirId = ins->id(); michael@0: if (ins->mirRaw()) { michael@0: mirOpcode = ins->mirRaw()->op(); michael@0: mirId = ins->mirRaw()->id(); michael@0: if (ins->mirRaw()->trackedPc()) michael@0: pcOpcode = *ins->mirRaw()->trackedPc(); michael@0: } michael@0: } michael@0: snapshots_.trackSnapshot(pcOpcode, mirOpcode, mirId, lirOpcode, lirId); michael@0: #endif michael@0: michael@0: uint32_t startIndex = 0; michael@0: for (MResumePoint **it = recoverInfo->begin(), **end = recoverInfo->end(); michael@0: it != end; michael@0: ++it) michael@0: { michael@0: MResumePoint *mir = *it; michael@0: if (!encodeAllocations(snapshot, mir, &startIndex)) michael@0: return false; michael@0: } michael@0: michael@0: MOZ_ASSERT(snapshots_.allocWritten() == snapshot->numSlots()); michael@0: snapshots_.endSnapshot(); michael@0: snapshot->setSnapshotOffset(offset); michael@0: return !snapshots_.oom(); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::assignBailoutId(LSnapshot *snapshot) michael@0: { michael@0: JS_ASSERT(snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET); michael@0: michael@0: // Can we not use bailout tables at all? michael@0: if (!deoptTable_) michael@0: return false; michael@0: michael@0: JS_ASSERT(frameClass_ != FrameSizeClass::None()); michael@0: michael@0: if (snapshot->bailoutId() != INVALID_BAILOUT_ID) michael@0: return true; michael@0: michael@0: // Is the bailout table full? michael@0: if (bailouts_.length() >= BAILOUT_TABLE_SIZE) michael@0: return false; michael@0: michael@0: unsigned bailoutId = bailouts_.length(); michael@0: snapshot->setBailoutId(bailoutId); michael@0: IonSpew(IonSpew_Snapshots, "Assigned snapshot bailout id %u", bailoutId); michael@0: return bailouts_.append(snapshot->snapshotOffset()); michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorShared::encodeSafepoints() michael@0: { michael@0: for (SafepointIndex *it = safepointIndices_.begin(), *end = safepointIndices_.end(); michael@0: it != end; michael@0: ++it) michael@0: { michael@0: LSafepoint *safepoint = it->safepoint(); michael@0: michael@0: if (!safepoint->encoded()) { michael@0: safepoint->fixupOffset(&masm); michael@0: safepoints_.encode(safepoint); michael@0: } michael@0: michael@0: it->resolve(); michael@0: } michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::markSafepoint(LInstruction *ins) michael@0: { michael@0: return markSafepointAt(masm.currentOffset(), ins); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::markSafepointAt(uint32_t offset, LInstruction *ins) michael@0: { michael@0: JS_ASSERT_IF(!safepointIndices_.empty(), michael@0: offset - safepointIndices_.back().displacement() >= sizeof(uint32_t)); michael@0: return safepointIndices_.append(SafepointIndex(offset, ins->safepoint())); michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorShared::ensureOsiSpace() michael@0: { michael@0: // For a refresher, an invalidation point is of the form: michael@0: // 1: call michael@0: // 2: ... michael@0: // 3: michael@0: // michael@0: // The four bytes *before* instruction 2 are overwritten with an offset. michael@0: // Callers must ensure that the instruction itself has enough bytes to michael@0: // support this. michael@0: // michael@0: // The bytes *at* instruction 3 are overwritten with an invalidation jump. michael@0: // jump. These bytes may be in a completely different IR sequence, but michael@0: // represent the join point of the call out of the function. michael@0: // michael@0: // At points where we want to ensure that invalidation won't corrupt an michael@0: // important instruction, we make sure to pad with nops. michael@0: if (masm.currentOffset() - lastOsiPointOffset_ < Assembler::patchWrite_NearCallSize()) { michael@0: int32_t paddingSize = Assembler::patchWrite_NearCallSize(); michael@0: paddingSize -= masm.currentOffset() - lastOsiPointOffset_; michael@0: for (int32_t i = 0; i < paddingSize; ++i) michael@0: masm.nop(); michael@0: } michael@0: JS_ASSERT(masm.currentOffset() - lastOsiPointOffset_ >= Assembler::patchWrite_NearCallSize()); michael@0: lastOsiPointOffset_ = masm.currentOffset(); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::markOsiPoint(LOsiPoint *ins, uint32_t *callPointOffset) michael@0: { michael@0: if (!encode(ins->snapshot())) michael@0: return false; michael@0: michael@0: ensureOsiSpace(); michael@0: michael@0: *callPointOffset = masm.currentOffset(); michael@0: SnapshotOffset so = ins->snapshot()->snapshotOffset(); michael@0: return osiIndices_.append(OsiIndex(*callPointOffset, so)); michael@0: } michael@0: michael@0: #ifdef CHECK_OSIPOINT_REGISTERS michael@0: template michael@0: static void michael@0: HandleRegisterDump(Op op, MacroAssembler &masm, RegisterSet liveRegs, Register activation, michael@0: Register scratch) michael@0: { michael@0: const size_t baseOffset = JitActivation::offsetOfRegs(); michael@0: michael@0: // Handle live GPRs. michael@0: for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); iter++) { michael@0: Register reg = *iter; michael@0: Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg)); michael@0: michael@0: if (reg == activation) { michael@0: // To use the original value of the activation register (that's michael@0: // now on top of the stack), we need the scratch register. michael@0: masm.push(scratch); michael@0: masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), scratch); michael@0: op(scratch, dump); michael@0: masm.pop(scratch); michael@0: } else { michael@0: op(reg, dump); michael@0: } michael@0: } michael@0: michael@0: // Handle live FPRs. michael@0: for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); iter++) { michael@0: FloatRegister reg = *iter; michael@0: Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg)); michael@0: op(reg, dump); michael@0: } michael@0: } michael@0: michael@0: class StoreOp michael@0: { michael@0: MacroAssembler &masm; michael@0: michael@0: public: michael@0: StoreOp(MacroAssembler &masm) michael@0: : masm(masm) michael@0: {} michael@0: michael@0: void operator()(Register reg, Address dump) { michael@0: masm.storePtr(reg, dump); michael@0: } michael@0: void operator()(FloatRegister reg, Address dump) { michael@0: masm.storeDouble(reg, dump); michael@0: } michael@0: }; michael@0: michael@0: static void michael@0: StoreAllLiveRegs(MacroAssembler &masm, RegisterSet liveRegs) michael@0: { michael@0: // Store a copy of all live registers before performing the call. michael@0: // When we reach the OsiPoint, we can use this to check nothing michael@0: // modified them in the meantime. michael@0: michael@0: // Load pointer to the JitActivation in a scratch register. michael@0: GeneralRegisterSet allRegs(GeneralRegisterSet::All()); michael@0: Register scratch = allRegs.takeAny(); michael@0: masm.push(scratch); michael@0: masm.loadJitActivation(scratch); michael@0: michael@0: Address checkRegs(scratch, JitActivation::offsetOfCheckRegs()); michael@0: masm.add32(Imm32(1), checkRegs); michael@0: michael@0: StoreOp op(masm); michael@0: HandleRegisterDump(op, masm, liveRegs, scratch, allRegs.getAny()); michael@0: michael@0: masm.pop(scratch); michael@0: } michael@0: michael@0: class VerifyOp michael@0: { michael@0: MacroAssembler &masm; michael@0: Label *failure_; michael@0: michael@0: public: michael@0: VerifyOp(MacroAssembler &masm, Label *failure) michael@0: : masm(masm), failure_(failure) michael@0: {} michael@0: michael@0: void operator()(Register reg, Address dump) { michael@0: masm.branchPtr(Assembler::NotEqual, dump, reg, failure_); michael@0: } michael@0: void operator()(FloatRegister reg, Address dump) { michael@0: masm.loadDouble(dump, ScratchFloatReg); michael@0: masm.branchDouble(Assembler::DoubleNotEqual, ScratchFloatReg, reg, failure_); michael@0: } michael@0: }; michael@0: michael@0: static void michael@0: OsiPointRegisterCheckFailed() michael@0: { michael@0: // Any live register captured by a safepoint (other than temp registers) michael@0: // must remain unchanged between the call and the OsiPoint instruction. michael@0: MOZ_ASSUME_UNREACHABLE("Modified registers between VM call and OsiPoint"); michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorShared::verifyOsiPointRegs(LSafepoint *safepoint) michael@0: { michael@0: // Ensure the live registers stored by callVM did not change between michael@0: // the call and this OsiPoint. Try-catch relies on this invariant. michael@0: michael@0: // Load pointer to the JitActivation in a scratch register. michael@0: GeneralRegisterSet allRegs(GeneralRegisterSet::All()); michael@0: Register scratch = allRegs.takeAny(); michael@0: masm.push(scratch); michael@0: masm.loadJitActivation(scratch); michael@0: michael@0: // If we should not check registers (because the instruction did not call michael@0: // into the VM, or a GC happened), we're done. michael@0: Label failure, done; michael@0: Address checkRegs(scratch, JitActivation::offsetOfCheckRegs()); michael@0: masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done); michael@0: michael@0: // Having more than one VM function call made in one visit function at michael@0: // runtime is a sec-ciritcal error, because if we conservatively assume that michael@0: // one of the function call can re-enter Ion, then the invalidation process michael@0: // will potentially add a call at a random location, by patching the code michael@0: // before the return address. michael@0: masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure); michael@0: michael@0: // Ignore clobbered registers. Some instructions (like LValueToInt32) modify michael@0: // temps after calling into the VM. This is fine because no other michael@0: // instructions (including this OsiPoint) will depend on them. Also michael@0: // backtracking can also use the same register for an input and an output. michael@0: // These are marked as clobbered and shouldn't get checked. michael@0: RegisterSet liveRegs = safepoint->liveRegs(); michael@0: liveRegs = RegisterSet::Intersect(liveRegs, RegisterSet::Not(safepoint->clobberedRegs())); michael@0: michael@0: VerifyOp op(masm, &failure); michael@0: HandleRegisterDump(op, masm, liveRegs, scratch, allRegs.getAny()); michael@0: michael@0: masm.jump(&done); michael@0: michael@0: // Do not profile the callWithABI that occurs below. This is to avoid a michael@0: // rare corner case that occurs when profiling interacts with itself: michael@0: // michael@0: // When slow profiling assertions are turned on, FunctionBoundary ops michael@0: // (which update the profiler pseudo-stack) may emit a callVM, which michael@0: // forces them to have an osi point associated with them. The michael@0: // FunctionBoundary for inline function entry is added to the caller's michael@0: // graph with a PC from the caller's code, but during codegen it modifies michael@0: // SPS instrumentation to add the callee as the current top-most script. michael@0: // When codegen gets to the OSIPoint, and the callWithABI below is michael@0: // emitted, the codegen thinks that the current frame is the callee, but michael@0: // the PC it's using from the OSIPoint refers to the caller. This causes michael@0: // the profiler instrumentation of the callWithABI below to ASSERT, since michael@0: // the script and pc are mismatched. To avoid this, we simply omit michael@0: // instrumentation for these callWithABIs. michael@0: masm.bind(&failure); michael@0: masm.setupUnalignedABICall(0, scratch); michael@0: masm.callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, OsiPointRegisterCheckFailed)); michael@0: masm.breakpoint(); michael@0: michael@0: masm.bind(&done); michael@0: masm.pop(scratch); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::shouldVerifyOsiPointRegs(LSafepoint *safepoint) michael@0: { michael@0: if (!js_JitOptions.checkOsiPointRegisters) michael@0: return false; michael@0: michael@0: if (gen->info().executionMode() != SequentialExecution) michael@0: return false; michael@0: michael@0: if (safepoint->liveRegs().empty(true) && safepoint->liveRegs().empty(false)) michael@0: return false; // No registers to check. michael@0: michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorShared::resetOsiPointRegs(LSafepoint *safepoint) michael@0: { michael@0: if (!shouldVerifyOsiPointRegs(safepoint)) michael@0: return; michael@0: michael@0: // Set checkRegs to 0. If we perform a VM call, the instruction michael@0: // will set it to 1. michael@0: GeneralRegisterSet allRegs(GeneralRegisterSet::All()); michael@0: Register scratch = allRegs.takeAny(); michael@0: masm.push(scratch); michael@0: masm.loadJitActivation(scratch); michael@0: Address checkRegs(scratch, JitActivation::offsetOfCheckRegs()); michael@0: masm.store32(Imm32(0), checkRegs); michael@0: masm.pop(scratch); michael@0: } michael@0: #endif michael@0: michael@0: // Before doing any call to Cpp, you should ensure that volatile michael@0: // registers are evicted by the register allocator. michael@0: bool michael@0: CodeGeneratorShared::callVM(const VMFunction &fun, LInstruction *ins, const Register *dynStack) michael@0: { michael@0: // Different execution modes have different sets of VM functions. michael@0: JS_ASSERT(fun.executionMode == gen->info().executionMode()); michael@0: michael@0: // If we're calling a function with an out parameter type of double, make michael@0: // sure we have an FPU. michael@0: JS_ASSERT_IF(fun.outParam == Type_Double, GetIonContext()->runtime->jitSupportsFloatingPoint()); michael@0: michael@0: #ifdef DEBUG michael@0: if (ins->mirRaw()) { michael@0: JS_ASSERT(ins->mirRaw()->isInstruction()); michael@0: MInstruction *mir = ins->mirRaw()->toInstruction(); michael@0: JS_ASSERT_IF(mir->isEffectful(), mir->resumePoint()); michael@0: } michael@0: #endif michael@0: michael@0: #ifdef JS_TRACE_LOGGING michael@0: if (!emitTracelogStartEvent(TraceLogger::VM)) michael@0: return false; michael@0: #endif michael@0: michael@0: // Stack is: michael@0: // ... frame ... michael@0: // [args] michael@0: #ifdef DEBUG michael@0: JS_ASSERT(pushedArgs_ == fun.explicitArgs); michael@0: pushedArgs_ = 0; michael@0: #endif michael@0: michael@0: // Get the wrapper of the VM function. michael@0: JitCode *wrapper = gen->jitRuntime()->getVMWrapper(fun); michael@0: if (!wrapper) michael@0: return false; michael@0: michael@0: #ifdef CHECK_OSIPOINT_REGISTERS michael@0: if (shouldVerifyOsiPointRegs(ins->safepoint())) michael@0: StoreAllLiveRegs(masm, ins->safepoint()->liveRegs()); michael@0: #endif michael@0: michael@0: // Call the wrapper function. The wrapper is in charge to unwind the stack michael@0: // when returning from the call. Failures are handled with exceptions based michael@0: // on the return value of the C functions. To guard the outcome of the michael@0: // returned value, use another LIR instruction. michael@0: uint32_t callOffset; michael@0: if (dynStack) michael@0: callOffset = masm.callWithExitFrame(wrapper, *dynStack); michael@0: else michael@0: callOffset = masm.callWithExitFrame(wrapper); michael@0: michael@0: if (!markSafepointAt(callOffset, ins)) michael@0: return false; michael@0: michael@0: // Remove rest of the frame left on the stack. We remove the return address michael@0: // which is implicitly poped when returning. michael@0: int framePop = sizeof(IonExitFrameLayout) - sizeof(void*); michael@0: michael@0: // Pop arguments from framePushed. michael@0: masm.implicitPop(fun.explicitStackSlots() * sizeof(void *) + framePop); michael@0: // Stack is: michael@0: // ... frame ... michael@0: michael@0: #ifdef JS_TRACE_LOGGING michael@0: if (!emitTracelogStopEvent(TraceLogger::VM)) michael@0: return false; michael@0: #endif michael@0: michael@0: return true; michael@0: } michael@0: michael@0: class OutOfLineTruncateSlow : public OutOfLineCodeBase michael@0: { michael@0: FloatRegister src_; michael@0: Register dest_; michael@0: bool needFloat32Conversion_; michael@0: michael@0: public: michael@0: OutOfLineTruncateSlow(FloatRegister src, Register dest, bool needFloat32Conversion = false) michael@0: : src_(src), dest_(dest), needFloat32Conversion_(needFloat32Conversion) michael@0: { } michael@0: michael@0: bool accept(CodeGeneratorShared *codegen) { michael@0: return codegen->visitOutOfLineTruncateSlow(this); michael@0: } michael@0: FloatRegister src() const { michael@0: return src_; michael@0: } michael@0: Register dest() const { michael@0: return dest_; michael@0: } michael@0: bool needFloat32Conversion() const { michael@0: return needFloat32Conversion_; michael@0: } michael@0: michael@0: }; michael@0: michael@0: OutOfLineCode * michael@0: CodeGeneratorShared::oolTruncateDouble(const FloatRegister &src, const Register &dest) michael@0: { michael@0: OutOfLineTruncateSlow *ool = new(alloc()) OutOfLineTruncateSlow(src, dest); michael@0: if (!addOutOfLineCode(ool)) michael@0: return nullptr; michael@0: return ool; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::emitTruncateDouble(const FloatRegister &src, const Register &dest) michael@0: { michael@0: OutOfLineCode *ool = oolTruncateDouble(src, dest); michael@0: if (!ool) michael@0: return false; michael@0: michael@0: masm.branchTruncateDouble(src, dest, ool->entry()); michael@0: masm.bind(ool->rejoin()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::emitTruncateFloat32(const FloatRegister &src, const Register &dest) michael@0: { michael@0: OutOfLineTruncateSlow *ool = new(alloc()) OutOfLineTruncateSlow(src, dest, true); michael@0: if (!addOutOfLineCode(ool)) michael@0: return false; michael@0: michael@0: masm.branchTruncateFloat32(src, dest, ool->entry()); michael@0: masm.bind(ool->rejoin()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::visitOutOfLineTruncateSlow(OutOfLineTruncateSlow *ool) michael@0: { michael@0: FloatRegister src = ool->src(); michael@0: Register dest = ool->dest(); michael@0: michael@0: saveVolatile(dest); michael@0: michael@0: if (ool->needFloat32Conversion()) { michael@0: masm.push(src); michael@0: masm.convertFloat32ToDouble(src, src); michael@0: } michael@0: michael@0: masm.setupUnalignedABICall(1, dest); michael@0: masm.passABIArg(src, MoveOp::DOUBLE); michael@0: if (gen->compilingAsmJS()) michael@0: masm.callWithABI(AsmJSImm_ToInt32); michael@0: else michael@0: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, js::ToInt32)); michael@0: masm.storeCallResult(dest); michael@0: michael@0: if (ool->needFloat32Conversion()) michael@0: masm.pop(src); michael@0: michael@0: restoreVolatile(dest); michael@0: michael@0: masm.jump(ool->rejoin()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::omitOverRecursedCheck() const michael@0: { michael@0: // If the current function makes no calls (which means it isn't recursive) michael@0: // and it uses only a small amount of stack space, it doesn't need a michael@0: // stack overflow check. Note that the actual number here is somewhat michael@0: // arbitrary, and codegen actually uses small bounded amounts of michael@0: // additional stack space in some cases too. michael@0: return frameSize() < 64 && !gen->performsCall(); michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorShared::emitPreBarrier(Register base, const LAllocation *index, MIRType type) michael@0: { michael@0: if (index->isConstant()) { michael@0: Address address(base, ToInt32(index) * sizeof(Value)); michael@0: masm.patchableCallPreBarrier(address, type); michael@0: } else { michael@0: BaseIndex address(base, ToRegister(index), TimesEight); michael@0: masm.patchableCallPreBarrier(address, type); michael@0: } michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorShared::emitPreBarrier(Address address, MIRType type) michael@0: { michael@0: masm.patchableCallPreBarrier(address, type); michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorShared::dropArguments(unsigned argc) michael@0: { michael@0: pushedArgumentSlots_.shrinkBy(argc); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::markArgumentSlots(LSafepoint *safepoint) michael@0: { michael@0: for (size_t i = 0; i < pushedArgumentSlots_.length(); i++) { michael@0: if (!safepoint->addValueSlot(pushedArgumentSlots_[i])) michael@0: return false; michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: OutOfLineAbortPar * michael@0: CodeGeneratorShared::oolAbortPar(ParallelBailoutCause cause, MBasicBlock *basicBlock, michael@0: jsbytecode *bytecode) michael@0: { michael@0: OutOfLineAbortPar *ool = new(alloc()) OutOfLineAbortPar(cause, basicBlock, bytecode); michael@0: if (!ool || !addOutOfLineCode(ool)) michael@0: return nullptr; michael@0: return ool; michael@0: } michael@0: michael@0: OutOfLineAbortPar * michael@0: CodeGeneratorShared::oolAbortPar(ParallelBailoutCause cause, LInstruction *lir) michael@0: { michael@0: MDefinition *mir = lir->mirRaw(); michael@0: MBasicBlock *block = mir->block(); michael@0: jsbytecode *pc = mir->trackedPc(); michael@0: if (!pc) { michael@0: if (lir->snapshot()) michael@0: pc = lir->snapshot()->mir()->pc(); michael@0: else michael@0: pc = block->pc(); michael@0: } michael@0: return oolAbortPar(cause, block, pc); michael@0: } michael@0: michael@0: OutOfLinePropagateAbortPar * michael@0: CodeGeneratorShared::oolPropagateAbortPar(LInstruction *lir) michael@0: { michael@0: OutOfLinePropagateAbortPar *ool = new(alloc()) OutOfLinePropagateAbortPar(lir); michael@0: if (!ool || !addOutOfLineCode(ool)) michael@0: return nullptr; michael@0: return ool; michael@0: } michael@0: michael@0: bool michael@0: OutOfLineAbortPar::generate(CodeGeneratorShared *codegen) michael@0: { michael@0: codegen->callTraceLIR(0xDEADBEEF, nullptr, "AbortPar"); michael@0: return codegen->visitOutOfLineAbortPar(this); michael@0: } michael@0: michael@0: bool michael@0: OutOfLinePropagateAbortPar::generate(CodeGeneratorShared *codegen) michael@0: { michael@0: codegen->callTraceLIR(0xDEADBEEF, nullptr, "AbortPar"); michael@0: return codegen->visitOutOfLinePropagateAbortPar(this); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::callTraceLIR(uint32_t blockIndex, LInstruction *lir, michael@0: const char *bailoutName) michael@0: { michael@0: JS_ASSERT_IF(!lir, bailoutName); michael@0: michael@0: if (!IonSpewEnabled(IonSpew_Trace)) michael@0: return true; michael@0: michael@0: uint32_t execMode = (uint32_t) gen->info().executionMode(); michael@0: uint32_t lirIndex; michael@0: const char *lirOpName; michael@0: const char *mirOpName; michael@0: JSScript *script; michael@0: jsbytecode *pc; michael@0: michael@0: masm.PushRegsInMask(RegisterSet::Volatile()); michael@0: masm.reserveStack(sizeof(IonLIRTraceData)); michael@0: michael@0: // This first move is here so that when you scan the disassembly, michael@0: // you can easily pick out where each instruction begins. The michael@0: // next few items indicate to you the Basic Block / LIR. michael@0: masm.move32(Imm32(0xDEADBEEF), CallTempReg0); michael@0: michael@0: if (lir) { michael@0: lirIndex = lir->id(); michael@0: lirOpName = lir->opName(); michael@0: if (MDefinition *mir = lir->mirRaw()) { michael@0: mirOpName = mir->opName(); michael@0: script = mir->block()->info().script(); michael@0: pc = mir->trackedPc(); michael@0: } else { michael@0: mirOpName = nullptr; michael@0: script = nullptr; michael@0: pc = nullptr; michael@0: } michael@0: } else { michael@0: blockIndex = lirIndex = 0xDEADBEEF; michael@0: lirOpName = mirOpName = bailoutName; michael@0: script = nullptr; michael@0: pc = nullptr; michael@0: } michael@0: michael@0: masm.store32(Imm32(blockIndex), michael@0: Address(StackPointer, offsetof(IonLIRTraceData, blockIndex))); michael@0: masm.store32(Imm32(lirIndex), michael@0: Address(StackPointer, offsetof(IonLIRTraceData, lirIndex))); michael@0: masm.store32(Imm32(execMode), michael@0: Address(StackPointer, offsetof(IonLIRTraceData, execModeInt))); michael@0: masm.storePtr(ImmPtr(lirOpName), michael@0: Address(StackPointer, offsetof(IonLIRTraceData, lirOpName))); michael@0: masm.storePtr(ImmPtr(mirOpName), michael@0: Address(StackPointer, offsetof(IonLIRTraceData, mirOpName))); michael@0: masm.storePtr(ImmGCPtr(script), michael@0: Address(StackPointer, offsetof(IonLIRTraceData, script))); michael@0: masm.storePtr(ImmPtr(pc), michael@0: Address(StackPointer, offsetof(IonLIRTraceData, pc))); michael@0: michael@0: masm.movePtr(StackPointer, CallTempReg0); michael@0: masm.setupUnalignedABICall(1, CallTempReg1); michael@0: masm.passABIArg(CallTempReg0); michael@0: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, TraceLIR)); michael@0: michael@0: masm.freeStack(sizeof(IonLIRTraceData)); michael@0: masm.PopRegsInMask(RegisterSet::Volatile()); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: typedef bool (*InterruptCheckFn)(JSContext *); michael@0: const VMFunction InterruptCheckInfo = FunctionInfo(InterruptCheck); michael@0: michael@0: Label * michael@0: CodeGeneratorShared::labelForBackedgeWithImplicitCheck(MBasicBlock *mir) michael@0: { michael@0: // If this is a loop backedge to a loop header with an implicit interrupt michael@0: // check, use a patchable jump. Skip this search if compiling without a michael@0: // script for asm.js, as there will be no interrupt check instruction. michael@0: // Due to critical edge unsplitting there may no longer be unique loop michael@0: // backedges, so just look for any edge going to an earlier block in RPO. michael@0: if (!gen->compilingAsmJS() && mir->isLoopHeader() && mir->id() <= current->mir()->id()) { michael@0: for (LInstructionIterator iter = mir->lir()->begin(); iter != mir->lir()->end(); iter++) { michael@0: if (iter->isLabel() || iter->isMoveGroup()) { michael@0: // Continue searching for an interrupt check. michael@0: } else if (iter->isInterruptCheckImplicit()) { michael@0: return iter->toInterruptCheckImplicit()->oolEntry(); michael@0: } else { michael@0: // The interrupt check should be the first instruction in the michael@0: // loop header other than the initial label and move groups. michael@0: JS_ASSERT(iter->isInterruptCheck() || iter->isInterruptCheckPar()); michael@0: return nullptr; michael@0: } michael@0: } michael@0: } michael@0: michael@0: return nullptr; michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorShared::jumpToBlock(MBasicBlock *mir) michael@0: { michael@0: // No jump necessary if we can fall through to the next block. michael@0: if (isNextBlock(mir->lir())) michael@0: return; michael@0: michael@0: if (Label *oolEntry = labelForBackedgeWithImplicitCheck(mir)) { michael@0: // Note: the backedge is initially a jump to the next instruction. michael@0: // It will be patched to the target block's label during link(). michael@0: RepatchLabel rejoin; michael@0: CodeOffsetJump backedge = masm.jumpWithPatch(&rejoin); michael@0: masm.bind(&rejoin); michael@0: michael@0: masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry))); michael@0: } else { michael@0: masm.jump(mir->lir()->label()); michael@0: } michael@0: } michael@0: michael@0: // This function is not used for MIPS. MIPS has branchToBlock. michael@0: #ifndef JS_CODEGEN_MIPS michael@0: void michael@0: CodeGeneratorShared::jumpToBlock(MBasicBlock *mir, Assembler::Condition cond) michael@0: { michael@0: if (Label *oolEntry = labelForBackedgeWithImplicitCheck(mir)) { michael@0: // Note: the backedge is initially a jump to the next instruction. michael@0: // It will be patched to the target block's label during link(). michael@0: RepatchLabel rejoin; michael@0: CodeOffsetJump backedge = masm.jumpWithPatch(&rejoin, cond); michael@0: masm.bind(&rejoin); michael@0: michael@0: masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry))); michael@0: } else { michael@0: masm.j(cond, mir->lir()->label()); michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: size_t michael@0: CodeGeneratorShared::addCacheLocations(const CacheLocationList &locs, size_t *numLocs) michael@0: { michael@0: size_t firstIndex = runtimeData_.length(); michael@0: size_t numLocations = 0; michael@0: for (CacheLocationList::iterator iter = locs.begin(); iter != locs.end(); iter++) { michael@0: // allocateData() ensures that sizeof(CacheLocation) is word-aligned. michael@0: // If this changes, we will need to pad to ensure alignment. michael@0: size_t curIndex = allocateData(sizeof(CacheLocation)); michael@0: new (&runtimeData_[curIndex]) CacheLocation(iter->pc, iter->script); michael@0: numLocations++; michael@0: } michael@0: JS_ASSERT(numLocations != 0); michael@0: *numLocs = numLocations; michael@0: return firstIndex; michael@0: } michael@0: michael@0: ReciprocalMulConstants michael@0: CodeGeneratorShared::computeDivisionConstants(int d) { michael@0: // In what follows, d is positive and is not a power of 2. michael@0: JS_ASSERT(d > 0 && (d & (d - 1)) != 0); michael@0: michael@0: // Speeding up division by non power-of-2 constants is possible by michael@0: // calculating, during compilation, a value M such that high-order michael@0: // bits of M*n correspond to the result of the division. Formally, michael@0: // we compute values 0 <= M < 2^32 and 0 <= s < 31 such that michael@0: // (M * n) >> (32 + s) = floor(n/d) if n >= 0 michael@0: // (M * n) >> (32 + s) = ceil(n/d) - 1 if n < 0. michael@0: // The original presentation of this technique appears in Hacker's michael@0: // Delight, a book by Henry S. Warren, Jr.. A proof of correctness michael@0: // for our version follows. michael@0: michael@0: // Define p = 32 + s, M = ceil(2^p/d), and assume that s satisfies michael@0: // M - 2^p/d <= 2^(s+1)/d. (1) michael@0: // (Observe that s = FloorLog32(d) satisfies this, because in this michael@0: // case d <= 2^(s+1) and so the RHS of (1) is at least one). Then, michael@0: // michael@0: // a) If s <= FloorLog32(d), then M <= 2^32 - 1. michael@0: // Proof: Indeed, M is monotone in s and, for s = FloorLog32(d), michael@0: // the inequalities 2^31 > d >= 2^s + 1 readily imply michael@0: // 2^p / d = 2^p/(d - 1) * (d - 1)/d michael@0: // <= 2^32 * (1 - 1/d) < 2 * (2^31 - 1) = 2^32 - 2. michael@0: // The claim follows by applying the ceiling function. michael@0: // michael@0: // b) For any 0 <= n < 2^31, floor(Mn/2^p) = floor(n/d). michael@0: // Proof: Put x = floor(Mn/2^p); it's the unique integer for which michael@0: // Mn/2^p - 1 < x <= Mn/2^p. (2) michael@0: // Using M >= 2^p/d on the LHS and (1) on the RHS, we get michael@0: // n/d - 1 < x <= n/d + n/(2^31 d) < n/d + 1/d. michael@0: // Since x is an integer, it's not in the interval (n/d, (n+1)/d), michael@0: // and so n/d - 1 < x <= n/d, which implies x = floor(n/d). michael@0: // michael@0: // c) For any -2^31 <= n < 0, floor(Mn/2^p) + 1 = ceil(n/d). michael@0: // Proof: The proof is similar. Equation (2) holds as above. Using michael@0: // M > 2^p/d (d isn't a power of 2) on the RHS and (1) on the LHS, michael@0: // n/d + n/(2^31 d) - 1 < x < n/d. michael@0: // Using n >= -2^31 and summing 1, michael@0: // n/d - 1/d < x + 1 < n/d + 1. michael@0: // Since x + 1 is an integer, this implies n/d <= x + 1 < n/d + 1. michael@0: // In other words, x + 1 = ceil(n/d). michael@0: // michael@0: // Condition (1) isn't necessary for the existence of M and s with michael@0: // the properties above. Hacker's Delight provides a slightly less michael@0: // restrictive condition when d >= 196611, at the cost of a 3-page michael@0: // proof of correctness. michael@0: michael@0: // Note that, since d*M - 2^p = d - (2^p)%d, (1) can be written as michael@0: // 2^(s+1) >= d - (2^p)%d. michael@0: // We now compute the least s with this property... michael@0: michael@0: int32_t shift = 0; michael@0: while ((int64_t(1) << (shift+1)) + (int64_t(1) << (shift+32)) % d < d) michael@0: shift++; michael@0: michael@0: // ...and the corresponding M. This may not fit in a signed 32-bit michael@0: // integer; we will compute (M - 2^32) * n + (2^32 * n) instead of michael@0: // M * n if this is the case (cf. item (a) above). michael@0: ReciprocalMulConstants rmc; michael@0: rmc.multiplier = int32_t((int64_t(1) << (shift+32))/d + 1); michael@0: rmc.shiftAmount = shift; michael@0: michael@0: return rmc; michael@0: } michael@0: michael@0: michael@0: #ifdef JS_TRACE_LOGGING michael@0: michael@0: bool michael@0: CodeGeneratorShared::emitTracelogScript(bool isStart) michael@0: { michael@0: RegisterSet regs = RegisterSet::Volatile(); michael@0: Register logger = regs.takeGeneral(); michael@0: Register script = regs.takeGeneral(); michael@0: michael@0: masm.Push(logger); michael@0: masm.Push(script); michael@0: michael@0: CodeOffsetLabel patchLogger = masm.movWithPatch(ImmPtr(nullptr), logger); michael@0: if (!patchableTraceLoggers_.append(patchLogger)) michael@0: return false; michael@0: michael@0: CodeOffsetLabel patchScript = masm.movWithPatch(ImmWord(0), script); michael@0: if (!patchableTLScripts_.append(patchScript)) michael@0: return false; michael@0: michael@0: if (isStart) michael@0: masm.tracelogStart(logger, script); michael@0: else michael@0: masm.tracelogStop(logger, script); michael@0: michael@0: masm.Pop(script); michael@0: masm.Pop(logger); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorShared::emitTracelogTree(bool isStart, uint32_t textId) michael@0: { michael@0: if (!TraceLogTextIdEnabled(textId)) michael@0: return true; michael@0: michael@0: RegisterSet regs = RegisterSet::Volatile(); michael@0: Register logger = regs.takeGeneral(); michael@0: michael@0: masm.Push(logger); michael@0: michael@0: CodeOffsetLabel patchLocation = masm.movWithPatch(ImmPtr(nullptr), logger); michael@0: if (!patchableTraceLoggers_.append(patchLocation)) michael@0: return false; michael@0: michael@0: if (isStart) { michael@0: masm.tracelogStart(logger, textId); michael@0: } else { michael@0: #ifdef DEBUG michael@0: masm.tracelogStop(logger, textId); michael@0: #else michael@0: masm.tracelogStop(logger); michael@0: #endif michael@0: } michael@0: michael@0: masm.Pop(logger); michael@0: return true; michael@0: } michael@0: #endif michael@0: michael@0: } // namespace jit michael@0: } // namespace js