js/src/jit/x64/Assembler-x64.cpp

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #include "jit/x64/Assembler-x64.h"
michael@0 8
michael@0 9 #include "gc/Marking.h"
michael@0 10
michael@0 11 using namespace js;
michael@0 12 using namespace js::jit;
michael@0 13
michael@0 14 ABIArgGenerator::ABIArgGenerator()
michael@0 15 :
michael@0 16 #if defined(XP_WIN)
michael@0 17 regIndex_(0),
michael@0 18 stackOffset_(ShadowStackSpace),
michael@0 19 #else
michael@0 20 intRegIndex_(0),
michael@0 21 floatRegIndex_(0),
michael@0 22 stackOffset_(0),
michael@0 23 #endif
michael@0 24 current_()
michael@0 25 {}
michael@0 26
michael@0 27 ABIArg
michael@0 28 ABIArgGenerator::next(MIRType type)
michael@0 29 {
michael@0 30 #if defined(XP_WIN)
michael@0 31 JS_STATIC_ASSERT(NumIntArgRegs == NumFloatArgRegs);
michael@0 32 if (regIndex_ == NumIntArgRegs) {
michael@0 33 current_ = ABIArg(stackOffset_);
michael@0 34 stackOffset_ += sizeof(uint64_t);
michael@0 35 return current_;
michael@0 36 }
michael@0 37 switch (type) {
michael@0 38 case MIRType_Int32:
michael@0 39 case MIRType_Pointer:
michael@0 40 current_ = ABIArg(IntArgRegs[regIndex_++]);
michael@0 41 break;
michael@0 42 case MIRType_Float32:
michael@0 43 case MIRType_Double:
michael@0 44 current_ = ABIArg(FloatArgRegs[regIndex_++]);
michael@0 45 break;
michael@0 46 default:
michael@0 47 MOZ_ASSUME_UNREACHABLE("Unexpected argument type");
michael@0 48 }
michael@0 49 return current_;
michael@0 50 #else
michael@0 51 switch (type) {
michael@0 52 case MIRType_Int32:
michael@0 53 case MIRType_Pointer:
michael@0 54 if (intRegIndex_ == NumIntArgRegs) {
michael@0 55 current_ = ABIArg(stackOffset_);
michael@0 56 stackOffset_ += sizeof(uint64_t);
michael@0 57 break;
michael@0 58 }
michael@0 59 current_ = ABIArg(IntArgRegs[intRegIndex_++]);
michael@0 60 break;
michael@0 61 case MIRType_Double:
michael@0 62 case MIRType_Float32:
michael@0 63 if (floatRegIndex_ == NumFloatArgRegs) {
michael@0 64 current_ = ABIArg(stackOffset_);
michael@0 65 stackOffset_ += sizeof(uint64_t);
michael@0 66 break;
michael@0 67 }
michael@0 68 current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
michael@0 69 break;
michael@0 70 default:
michael@0 71 MOZ_ASSUME_UNREACHABLE("Unexpected argument type");
michael@0 72 }
michael@0 73 return current_;
michael@0 74 #endif
michael@0 75 }
michael@0 76
michael@0 77 // Avoid r11, which is the MacroAssembler's ScratchReg.
michael@0 78 const Register ABIArgGenerator::NonArgReturnVolatileReg0 = r10;
michael@0 79 const Register ABIArgGenerator::NonArgReturnVolatileReg1 = r12;
michael@0 80 const Register ABIArgGenerator::NonVolatileReg = r13;
michael@0 81
michael@0 82 void
michael@0 83 Assembler::writeRelocation(JmpSrc src, Relocation::Kind reloc)
michael@0 84 {
michael@0 85 if (!jumpRelocations_.length()) {
michael@0 86 // The jump relocation table starts with a fixed-width integer pointing
michael@0 87 // to the start of the extended jump table. But, we don't know the
michael@0 88 // actual extended jump table offset yet, so write a 0 which we'll
michael@0 89 // patch later.
michael@0 90 jumpRelocations_.writeFixedUint32_t(0);
michael@0 91 }
michael@0 92 if (reloc == Relocation::JITCODE) {
michael@0 93 jumpRelocations_.writeUnsigned(src.offset());
michael@0 94 jumpRelocations_.writeUnsigned(jumps_.length());
michael@0 95 }
michael@0 96 }
michael@0 97
michael@0 98 void
michael@0 99 Assembler::addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc)
michael@0 100 {
michael@0 101 JS_ASSERT(target.value != nullptr);
michael@0 102
michael@0 103 // Emit reloc before modifying the jump table, since it computes a 0-based
michael@0 104 // index. This jump is not patchable at runtime.
michael@0 105 if (reloc == Relocation::JITCODE)
michael@0 106 writeRelocation(src, reloc);
michael@0 107 enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, reloc));
michael@0 108 }
michael@0 109
michael@0 110 size_t
michael@0 111 Assembler::addPatchableJump(JmpSrc src, Relocation::Kind reloc)
michael@0 112 {
michael@0 113 // This jump is patchable at runtime so we always need to make sure the
michael@0 114 // jump table is emitted.
michael@0 115 writeRelocation(src, reloc);
michael@0 116
michael@0 117 size_t index = jumps_.length();
michael@0 118 enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), nullptr, reloc));
michael@0 119 return index;
michael@0 120 }
michael@0 121
michael@0 122 /* static */
michael@0 123 uint8_t *
michael@0 124 Assembler::PatchableJumpAddress(JitCode *code, size_t index)
michael@0 125 {
michael@0 126 // The assembler stashed the offset into the code of the fragments used
michael@0 127 // for far jumps at the start of the relocation table.
michael@0 128 uint32_t jumpOffset = * (uint32_t *) code->jumpRelocTable();
michael@0 129 jumpOffset += index * SizeOfJumpTableEntry;
michael@0 130
michael@0 131 JS_ASSERT(jumpOffset + SizeOfExtendedJump <= code->instructionsSize());
michael@0 132 return code->raw() + jumpOffset;
michael@0 133 }
michael@0 134
michael@0 135 /* static */
michael@0 136 void
michael@0 137 Assembler::PatchJumpEntry(uint8_t *entry, uint8_t *target)
michael@0 138 {
michael@0 139 uint8_t **index = (uint8_t **) (entry + SizeOfExtendedJump - sizeof(void*));
michael@0 140 *index = target;
michael@0 141 }
michael@0 142
michael@0 143 void
michael@0 144 Assembler::finish()
michael@0 145 {
michael@0 146 if (!jumps_.length() || oom())
michael@0 147 return;
michael@0 148
michael@0 149 // Emit the jump table.
michael@0 150 masm.align(SizeOfJumpTableEntry);
michael@0 151 extendedJumpTable_ = masm.size();
michael@0 152
michael@0 153 // Now that we know the offset to the jump table, squirrel it into the
michael@0 154 // jump relocation buffer if any JitCode references exist and must be
michael@0 155 // tracked for GC.
michael@0 156 JS_ASSERT_IF(jumpRelocations_.length(), jumpRelocations_.length() >= sizeof(uint32_t));
michael@0 157 if (jumpRelocations_.length())
michael@0 158 *(uint32_t *)jumpRelocations_.buffer() = extendedJumpTable_;
michael@0 159
michael@0 160 // Zero the extended jumps table.
michael@0 161 for (size_t i = 0; i < jumps_.length(); i++) {
michael@0 162 #ifdef DEBUG
michael@0 163 size_t oldSize = masm.size();
michael@0 164 #endif
michael@0 165 masm.jmp_rip(2);
michael@0 166 JS_ASSERT(masm.size() - oldSize == 6);
michael@0 167 // Following an indirect branch with ud2 hints to the hardware that
michael@0 168 // there's no fall-through. This also aligns the 64-bit immediate.
michael@0 169 masm.ud2();
michael@0 170 JS_ASSERT(masm.size() - oldSize == 8);
michael@0 171 masm.immediate64(0);
michael@0 172 JS_ASSERT(masm.size() - oldSize == SizeOfExtendedJump);
michael@0 173 JS_ASSERT(masm.size() - oldSize == SizeOfJumpTableEntry);
michael@0 174 }
michael@0 175 }
michael@0 176
michael@0 177 void
michael@0 178 Assembler::executableCopy(uint8_t *buffer)
michael@0 179 {
michael@0 180 AssemblerX86Shared::executableCopy(buffer);
michael@0 181
michael@0 182 for (size_t i = 0; i < jumps_.length(); i++) {
michael@0 183 RelativePatch &rp = jumps_[i];
michael@0 184 uint8_t *src = buffer + rp.offset;
michael@0 185 if (!rp.target) {
michael@0 186 // The patch target is nullptr for jumps that have been linked to
michael@0 187 // a label within the same code block, but may be repatched later
michael@0 188 // to jump to a different code block.
michael@0 189 continue;
michael@0 190 }
michael@0 191 if (JSC::X86Assembler::canRelinkJump(src, rp.target)) {
michael@0 192 JSC::X86Assembler::setRel32(src, rp.target);
michael@0 193 } else {
michael@0 194 // An extended jump table must exist, and its offset must be in
michael@0 195 // range.
michael@0 196 JS_ASSERT(extendedJumpTable_);
michael@0 197 JS_ASSERT((extendedJumpTable_ + i * SizeOfJumpTableEntry) <= size() - SizeOfJumpTableEntry);
michael@0 198
michael@0 199 // Patch the jump to go to the extended jump entry.
michael@0 200 uint8_t *entry = buffer + extendedJumpTable_ + i * SizeOfJumpTableEntry;
michael@0 201 JSC::X86Assembler::setRel32(src, entry);
michael@0 202
michael@0 203 // Now patch the pointer, note that we need to align it to
michael@0 204 // *after* the extended jump, i.e. after the 64-bit immedate.
michael@0 205 JSC::X86Assembler::repatchPointer(entry + SizeOfExtendedJump, rp.target);
michael@0 206 }
michael@0 207 }
michael@0 208 }
michael@0 209
michael@0 210 class RelocationIterator
michael@0 211 {
michael@0 212 CompactBufferReader reader_;
michael@0 213 uint32_t tableStart_;
michael@0 214 uint32_t offset_;
michael@0 215 uint32_t extOffset_;
michael@0 216
michael@0 217 public:
michael@0 218 RelocationIterator(CompactBufferReader &reader)
michael@0 219 : reader_(reader)
michael@0 220 {
michael@0 221 tableStart_ = reader_.readFixedUint32_t();
michael@0 222 }
michael@0 223
michael@0 224 bool read() {
michael@0 225 if (!reader_.more())
michael@0 226 return false;
michael@0 227 offset_ = reader_.readUnsigned();
michael@0 228 extOffset_ = reader_.readUnsigned();
michael@0 229 return true;
michael@0 230 }
michael@0 231
michael@0 232 uint32_t offset() const {
michael@0 233 return offset_;
michael@0 234 }
michael@0 235 uint32_t extendedOffset() const {
michael@0 236 return extOffset_;
michael@0 237 }
michael@0 238 };
michael@0 239
michael@0 240 JitCode *
michael@0 241 Assembler::CodeFromJump(JitCode *code, uint8_t *jump)
michael@0 242 {
michael@0 243 uint8_t *target = (uint8_t *)JSC::X86Assembler::getRel32Target(jump);
michael@0 244 if (target >= code->raw() && target < code->raw() + code->instructionsSize()) {
michael@0 245 // This jump is within the code buffer, so it has been redirected to
michael@0 246 // the extended jump table.
michael@0 247 JS_ASSERT(target + SizeOfJumpTableEntry <= code->raw() + code->instructionsSize());
michael@0 248
michael@0 249 target = (uint8_t *)JSC::X86Assembler::getPointer(target + SizeOfExtendedJump);
michael@0 250 }
michael@0 251
michael@0 252 return JitCode::FromExecutable(target);
michael@0 253 }
michael@0 254
michael@0 255 void
michael@0 256 Assembler::TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
michael@0 257 {
michael@0 258 RelocationIterator iter(reader);
michael@0 259 while (iter.read()) {
michael@0 260 JitCode *child = CodeFromJump(code, code->raw() + iter.offset());
michael@0 261 MarkJitCodeUnbarriered(trc, &child, "rel32");
michael@0 262 JS_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset()));
michael@0 263 }
michael@0 264 }
michael@0 265

mercurial