Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/x64/Assembler-x64.h"
9 #include "gc/Marking.h"
11 using namespace js;
12 using namespace js::jit;
14 ABIArgGenerator::ABIArgGenerator()
15 :
16 #if defined(XP_WIN)
17 regIndex_(0),
18 stackOffset_(ShadowStackSpace),
19 #else
20 intRegIndex_(0),
21 floatRegIndex_(0),
22 stackOffset_(0),
23 #endif
24 current_()
25 {}
27 ABIArg
28 ABIArgGenerator::next(MIRType type)
29 {
30 #if defined(XP_WIN)
31 JS_STATIC_ASSERT(NumIntArgRegs == NumFloatArgRegs);
32 if (regIndex_ == NumIntArgRegs) {
33 current_ = ABIArg(stackOffset_);
34 stackOffset_ += sizeof(uint64_t);
35 return current_;
36 }
37 switch (type) {
38 case MIRType_Int32:
39 case MIRType_Pointer:
40 current_ = ABIArg(IntArgRegs[regIndex_++]);
41 break;
42 case MIRType_Float32:
43 case MIRType_Double:
44 current_ = ABIArg(FloatArgRegs[regIndex_++]);
45 break;
46 default:
47 MOZ_ASSUME_UNREACHABLE("Unexpected argument type");
48 }
49 return current_;
50 #else
51 switch (type) {
52 case MIRType_Int32:
53 case MIRType_Pointer:
54 if (intRegIndex_ == NumIntArgRegs) {
55 current_ = ABIArg(stackOffset_);
56 stackOffset_ += sizeof(uint64_t);
57 break;
58 }
59 current_ = ABIArg(IntArgRegs[intRegIndex_++]);
60 break;
61 case MIRType_Double:
62 case MIRType_Float32:
63 if (floatRegIndex_ == NumFloatArgRegs) {
64 current_ = ABIArg(stackOffset_);
65 stackOffset_ += sizeof(uint64_t);
66 break;
67 }
68 current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
69 break;
70 default:
71 MOZ_ASSUME_UNREACHABLE("Unexpected argument type");
72 }
73 return current_;
74 #endif
75 }
77 // Avoid r11, which is the MacroAssembler's ScratchReg.
78 const Register ABIArgGenerator::NonArgReturnVolatileReg0 = r10;
79 const Register ABIArgGenerator::NonArgReturnVolatileReg1 = r12;
80 const Register ABIArgGenerator::NonVolatileReg = r13;
82 void
83 Assembler::writeRelocation(JmpSrc src, Relocation::Kind reloc)
84 {
85 if (!jumpRelocations_.length()) {
86 // The jump relocation table starts with a fixed-width integer pointing
87 // to the start of the extended jump table. But, we don't know the
88 // actual extended jump table offset yet, so write a 0 which we'll
89 // patch later.
90 jumpRelocations_.writeFixedUint32_t(0);
91 }
92 if (reloc == Relocation::JITCODE) {
93 jumpRelocations_.writeUnsigned(src.offset());
94 jumpRelocations_.writeUnsigned(jumps_.length());
95 }
96 }
98 void
99 Assembler::addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc)
100 {
101 JS_ASSERT(target.value != nullptr);
103 // Emit reloc before modifying the jump table, since it computes a 0-based
104 // index. This jump is not patchable at runtime.
105 if (reloc == Relocation::JITCODE)
106 writeRelocation(src, reloc);
107 enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, reloc));
108 }
110 size_t
111 Assembler::addPatchableJump(JmpSrc src, Relocation::Kind reloc)
112 {
113 // This jump is patchable at runtime so we always need to make sure the
114 // jump table is emitted.
115 writeRelocation(src, reloc);
117 size_t index = jumps_.length();
118 enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), nullptr, reloc));
119 return index;
120 }
122 /* static */
123 uint8_t *
124 Assembler::PatchableJumpAddress(JitCode *code, size_t index)
125 {
126 // The assembler stashed the offset into the code of the fragments used
127 // for far jumps at the start of the relocation table.
128 uint32_t jumpOffset = * (uint32_t *) code->jumpRelocTable();
129 jumpOffset += index * SizeOfJumpTableEntry;
131 JS_ASSERT(jumpOffset + SizeOfExtendedJump <= code->instructionsSize());
132 return code->raw() + jumpOffset;
133 }
135 /* static */
136 void
137 Assembler::PatchJumpEntry(uint8_t *entry, uint8_t *target)
138 {
139 uint8_t **index = (uint8_t **) (entry + SizeOfExtendedJump - sizeof(void*));
140 *index = target;
141 }
143 void
144 Assembler::finish()
145 {
146 if (!jumps_.length() || oom())
147 return;
149 // Emit the jump table.
150 masm.align(SizeOfJumpTableEntry);
151 extendedJumpTable_ = masm.size();
153 // Now that we know the offset to the jump table, squirrel it into the
154 // jump relocation buffer if any JitCode references exist and must be
155 // tracked for GC.
156 JS_ASSERT_IF(jumpRelocations_.length(), jumpRelocations_.length() >= sizeof(uint32_t));
157 if (jumpRelocations_.length())
158 *(uint32_t *)jumpRelocations_.buffer() = extendedJumpTable_;
160 // Zero the extended jumps table.
161 for (size_t i = 0; i < jumps_.length(); i++) {
162 #ifdef DEBUG
163 size_t oldSize = masm.size();
164 #endif
165 masm.jmp_rip(2);
166 JS_ASSERT(masm.size() - oldSize == 6);
167 // Following an indirect branch with ud2 hints to the hardware that
168 // there's no fall-through. This also aligns the 64-bit immediate.
169 masm.ud2();
170 JS_ASSERT(masm.size() - oldSize == 8);
171 masm.immediate64(0);
172 JS_ASSERT(masm.size() - oldSize == SizeOfExtendedJump);
173 JS_ASSERT(masm.size() - oldSize == SizeOfJumpTableEntry);
174 }
175 }
177 void
178 Assembler::executableCopy(uint8_t *buffer)
179 {
180 AssemblerX86Shared::executableCopy(buffer);
182 for (size_t i = 0; i < jumps_.length(); i++) {
183 RelativePatch &rp = jumps_[i];
184 uint8_t *src = buffer + rp.offset;
185 if (!rp.target) {
186 // The patch target is nullptr for jumps that have been linked to
187 // a label within the same code block, but may be repatched later
188 // to jump to a different code block.
189 continue;
190 }
191 if (JSC::X86Assembler::canRelinkJump(src, rp.target)) {
192 JSC::X86Assembler::setRel32(src, rp.target);
193 } else {
194 // An extended jump table must exist, and its offset must be in
195 // range.
196 JS_ASSERT(extendedJumpTable_);
197 JS_ASSERT((extendedJumpTable_ + i * SizeOfJumpTableEntry) <= size() - SizeOfJumpTableEntry);
199 // Patch the jump to go to the extended jump entry.
200 uint8_t *entry = buffer + extendedJumpTable_ + i * SizeOfJumpTableEntry;
201 JSC::X86Assembler::setRel32(src, entry);
203 // Now patch the pointer, note that we need to align it to
204 // *after* the extended jump, i.e. after the 64-bit immedate.
205 JSC::X86Assembler::repatchPointer(entry + SizeOfExtendedJump, rp.target);
206 }
207 }
208 }
210 class RelocationIterator
211 {
212 CompactBufferReader reader_;
213 uint32_t tableStart_;
214 uint32_t offset_;
215 uint32_t extOffset_;
217 public:
218 RelocationIterator(CompactBufferReader &reader)
219 : reader_(reader)
220 {
221 tableStart_ = reader_.readFixedUint32_t();
222 }
224 bool read() {
225 if (!reader_.more())
226 return false;
227 offset_ = reader_.readUnsigned();
228 extOffset_ = reader_.readUnsigned();
229 return true;
230 }
232 uint32_t offset() const {
233 return offset_;
234 }
235 uint32_t extendedOffset() const {
236 return extOffset_;
237 }
238 };
240 JitCode *
241 Assembler::CodeFromJump(JitCode *code, uint8_t *jump)
242 {
243 uint8_t *target = (uint8_t *)JSC::X86Assembler::getRel32Target(jump);
244 if (target >= code->raw() && target < code->raw() + code->instructionsSize()) {
245 // This jump is within the code buffer, so it has been redirected to
246 // the extended jump table.
247 JS_ASSERT(target + SizeOfJumpTableEntry <= code->raw() + code->instructionsSize());
249 target = (uint8_t *)JSC::X86Assembler::getPointer(target + SizeOfExtendedJump);
250 }
252 return JitCode::FromExecutable(target);
253 }
255 void
256 Assembler::TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
257 {
258 RelocationIterator iter(reader);
259 while (iter.read()) {
260 JitCode *child = CodeFromJump(code, code->raw() + iter.offset());
261 MarkJitCodeUnbarriered(trc, &child, "rel32");
262 JS_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset()));
263 }
264 }