michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/x64/Assembler-x64.h" michael@0: michael@0: #include "gc/Marking.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: ABIArgGenerator::ABIArgGenerator() michael@0: : michael@0: #if defined(XP_WIN) michael@0: regIndex_(0), michael@0: stackOffset_(ShadowStackSpace), michael@0: #else michael@0: intRegIndex_(0), michael@0: floatRegIndex_(0), michael@0: stackOffset_(0), michael@0: #endif michael@0: current_() michael@0: {} michael@0: michael@0: ABIArg michael@0: ABIArgGenerator::next(MIRType type) michael@0: { michael@0: #if defined(XP_WIN) michael@0: JS_STATIC_ASSERT(NumIntArgRegs == NumFloatArgRegs); michael@0: if (regIndex_ == NumIntArgRegs) { michael@0: current_ = ABIArg(stackOffset_); michael@0: stackOffset_ += sizeof(uint64_t); michael@0: return current_; michael@0: } michael@0: switch (type) { michael@0: case MIRType_Int32: michael@0: case MIRType_Pointer: michael@0: current_ = ABIArg(IntArgRegs[regIndex_++]); michael@0: break; michael@0: case MIRType_Float32: michael@0: case MIRType_Double: michael@0: current_ = ABIArg(FloatArgRegs[regIndex_++]); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected argument type"); michael@0: } michael@0: return current_; michael@0: #else michael@0: switch (type) { michael@0: case MIRType_Int32: michael@0: case MIRType_Pointer: michael@0: if (intRegIndex_ == NumIntArgRegs) { michael@0: current_ = ABIArg(stackOffset_); michael@0: stackOffset_ += sizeof(uint64_t); michael@0: break; michael@0: } michael@0: current_ = ABIArg(IntArgRegs[intRegIndex_++]); michael@0: break; michael@0: case MIRType_Double: michael@0: case MIRType_Float32: michael@0: if (floatRegIndex_ == NumFloatArgRegs) { michael@0: current_ = ABIArg(stackOffset_); michael@0: stackOffset_ += sizeof(uint64_t); michael@0: break; michael@0: } michael@0: current_ = ABIArg(FloatArgRegs[floatRegIndex_++]); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected argument type"); michael@0: } michael@0: return current_; michael@0: #endif michael@0: } michael@0: michael@0: // Avoid r11, which is the MacroAssembler's ScratchReg. michael@0: const Register ABIArgGenerator::NonArgReturnVolatileReg0 = r10; michael@0: const Register ABIArgGenerator::NonArgReturnVolatileReg1 = r12; michael@0: const Register ABIArgGenerator::NonVolatileReg = r13; michael@0: michael@0: void michael@0: Assembler::writeRelocation(JmpSrc src, Relocation::Kind reloc) michael@0: { michael@0: if (!jumpRelocations_.length()) { michael@0: // The jump relocation table starts with a fixed-width integer pointing michael@0: // to the start of the extended jump table. But, we don't know the michael@0: // actual extended jump table offset yet, so write a 0 which we'll michael@0: // patch later. michael@0: jumpRelocations_.writeFixedUint32_t(0); michael@0: } michael@0: if (reloc == Relocation::JITCODE) { michael@0: jumpRelocations_.writeUnsigned(src.offset()); michael@0: jumpRelocations_.writeUnsigned(jumps_.length()); michael@0: } michael@0: } michael@0: michael@0: void michael@0: Assembler::addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc) michael@0: { michael@0: JS_ASSERT(target.value != nullptr); michael@0: michael@0: // Emit reloc before modifying the jump table, since it computes a 0-based michael@0: // index. This jump is not patchable at runtime. michael@0: if (reloc == Relocation::JITCODE) michael@0: writeRelocation(src, reloc); michael@0: enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, reloc)); michael@0: } michael@0: michael@0: size_t michael@0: Assembler::addPatchableJump(JmpSrc src, Relocation::Kind reloc) michael@0: { michael@0: // This jump is patchable at runtime so we always need to make sure the michael@0: // jump table is emitted. michael@0: writeRelocation(src, reloc); michael@0: michael@0: size_t index = jumps_.length(); michael@0: enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), nullptr, reloc)); michael@0: return index; michael@0: } michael@0: michael@0: /* static */ michael@0: uint8_t * michael@0: Assembler::PatchableJumpAddress(JitCode *code, size_t index) michael@0: { michael@0: // The assembler stashed the offset into the code of the fragments used michael@0: // for far jumps at the start of the relocation table. michael@0: uint32_t jumpOffset = * (uint32_t *) code->jumpRelocTable(); michael@0: jumpOffset += index * SizeOfJumpTableEntry; michael@0: michael@0: JS_ASSERT(jumpOffset + SizeOfExtendedJump <= code->instructionsSize()); michael@0: return code->raw() + jumpOffset; michael@0: } michael@0: michael@0: /* static */ michael@0: void michael@0: Assembler::PatchJumpEntry(uint8_t *entry, uint8_t *target) michael@0: { michael@0: uint8_t **index = (uint8_t **) (entry + SizeOfExtendedJump - sizeof(void*)); michael@0: *index = target; michael@0: } michael@0: michael@0: void michael@0: Assembler::finish() michael@0: { michael@0: if (!jumps_.length() || oom()) michael@0: return; michael@0: michael@0: // Emit the jump table. michael@0: masm.align(SizeOfJumpTableEntry); michael@0: extendedJumpTable_ = masm.size(); michael@0: michael@0: // Now that we know the offset to the jump table, squirrel it into the michael@0: // jump relocation buffer if any JitCode references exist and must be michael@0: // tracked for GC. michael@0: JS_ASSERT_IF(jumpRelocations_.length(), jumpRelocations_.length() >= sizeof(uint32_t)); michael@0: if (jumpRelocations_.length()) michael@0: *(uint32_t *)jumpRelocations_.buffer() = extendedJumpTable_; michael@0: michael@0: // Zero the extended jumps table. michael@0: for (size_t i = 0; i < jumps_.length(); i++) { michael@0: #ifdef DEBUG michael@0: size_t oldSize = masm.size(); michael@0: #endif michael@0: masm.jmp_rip(2); michael@0: JS_ASSERT(masm.size() - oldSize == 6); michael@0: // Following an indirect branch with ud2 hints to the hardware that michael@0: // there's no fall-through. This also aligns the 64-bit immediate. michael@0: masm.ud2(); michael@0: JS_ASSERT(masm.size() - oldSize == 8); michael@0: masm.immediate64(0); michael@0: JS_ASSERT(masm.size() - oldSize == SizeOfExtendedJump); michael@0: JS_ASSERT(masm.size() - oldSize == SizeOfJumpTableEntry); michael@0: } michael@0: } michael@0: michael@0: void michael@0: Assembler::executableCopy(uint8_t *buffer) michael@0: { michael@0: AssemblerX86Shared::executableCopy(buffer); michael@0: michael@0: for (size_t i = 0; i < jumps_.length(); i++) { michael@0: RelativePatch &rp = jumps_[i]; michael@0: uint8_t *src = buffer + rp.offset; michael@0: if (!rp.target) { michael@0: // The patch target is nullptr for jumps that have been linked to michael@0: // a label within the same code block, but may be repatched later michael@0: // to jump to a different code block. michael@0: continue; michael@0: } michael@0: if (JSC::X86Assembler::canRelinkJump(src, rp.target)) { michael@0: JSC::X86Assembler::setRel32(src, rp.target); michael@0: } else { michael@0: // An extended jump table must exist, and its offset must be in michael@0: // range. michael@0: JS_ASSERT(extendedJumpTable_); michael@0: JS_ASSERT((extendedJumpTable_ + i * SizeOfJumpTableEntry) <= size() - SizeOfJumpTableEntry); michael@0: michael@0: // Patch the jump to go to the extended jump entry. michael@0: uint8_t *entry = buffer + extendedJumpTable_ + i * SizeOfJumpTableEntry; michael@0: JSC::X86Assembler::setRel32(src, entry); michael@0: michael@0: // Now patch the pointer, note that we need to align it to michael@0: // *after* the extended jump, i.e. after the 64-bit immedate. michael@0: JSC::X86Assembler::repatchPointer(entry + SizeOfExtendedJump, rp.target); michael@0: } michael@0: } michael@0: } michael@0: michael@0: class RelocationIterator michael@0: { michael@0: CompactBufferReader reader_; michael@0: uint32_t tableStart_; michael@0: uint32_t offset_; michael@0: uint32_t extOffset_; michael@0: michael@0: public: michael@0: RelocationIterator(CompactBufferReader &reader) michael@0: : reader_(reader) michael@0: { michael@0: tableStart_ = reader_.readFixedUint32_t(); michael@0: } michael@0: michael@0: bool read() { michael@0: if (!reader_.more()) michael@0: return false; michael@0: offset_ = reader_.readUnsigned(); michael@0: extOffset_ = reader_.readUnsigned(); michael@0: return true; michael@0: } michael@0: michael@0: uint32_t offset() const { michael@0: return offset_; michael@0: } michael@0: uint32_t extendedOffset() const { michael@0: return extOffset_; michael@0: } michael@0: }; michael@0: michael@0: JitCode * michael@0: Assembler::CodeFromJump(JitCode *code, uint8_t *jump) michael@0: { michael@0: uint8_t *target = (uint8_t *)JSC::X86Assembler::getRel32Target(jump); michael@0: if (target >= code->raw() && target < code->raw() + code->instructionsSize()) { michael@0: // This jump is within the code buffer, so it has been redirected to michael@0: // the extended jump table. michael@0: JS_ASSERT(target + SizeOfJumpTableEntry <= code->raw() + code->instructionsSize()); michael@0: michael@0: target = (uint8_t *)JSC::X86Assembler::getPointer(target + SizeOfExtendedJump); michael@0: } michael@0: michael@0: return JitCode::FromExecutable(target); michael@0: } michael@0: michael@0: void michael@0: Assembler::TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader) michael@0: { michael@0: RelocationIterator iter(reader); michael@0: while (iter.read()) { michael@0: JitCode *child = CodeFromJump(code, code->raw() + iter.offset()); michael@0: MarkJitCodeUnbarriered(trc, &child, "rel32"); michael@0: JS_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset())); michael@0: } michael@0: } michael@0: