js/src/jit/shared/CodeGenerator-shared.cpp

Thu, 22 Jan 2015 13:21:57 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 22 Jan 2015 13:21:57 +0100
branch
TOR_BUG_9701
changeset 15
b8a032363ba2
permissions
-rw-r--r--

Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #include "jit/shared/CodeGenerator-shared-inl.h"
michael@0 8
michael@0 9 #include "mozilla/DebugOnly.h"
michael@0 10
michael@0 11 #include "jit/IonCaches.h"
michael@0 12 #include "jit/IonMacroAssembler.h"
michael@0 13 #include "jit/IonSpewer.h"
michael@0 14 #include "jit/MIR.h"
michael@0 15 #include "jit/MIRGenerator.h"
michael@0 16 #include "jit/ParallelFunctions.h"
michael@0 17 #include "vm/TraceLogging.h"
michael@0 18
michael@0 19 #include "jit/IonFrames-inl.h"
michael@0 20
michael@0 21 using namespace js;
michael@0 22 using namespace js::jit;
michael@0 23
michael@0 24 using mozilla::DebugOnly;
michael@0 25
michael@0 26 namespace js {
michael@0 27 namespace jit {
michael@0 28
michael@0 29 MacroAssembler &
michael@0 30 CodeGeneratorShared::ensureMasm(MacroAssembler *masmArg)
michael@0 31 {
michael@0 32 if (masmArg)
michael@0 33 return *masmArg;
michael@0 34 maybeMasm_.construct();
michael@0 35 return maybeMasm_.ref();
michael@0 36 }
michael@0 37
michael@0 38 CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masmArg)
michael@0 39 : oolIns(nullptr),
michael@0 40 maybeMasm_(),
michael@0 41 masm(ensureMasm(masmArg)),
michael@0 42 gen(gen),
michael@0 43 graph(*graph),
michael@0 44 current(nullptr),
michael@0 45 snapshots_(),
michael@0 46 recovers_(),
michael@0 47 deoptTable_(nullptr),
michael@0 48 #ifdef DEBUG
michael@0 49 pushedArgs_(0),
michael@0 50 #endif
michael@0 51 lastOsiPointOffset_(0),
michael@0 52 sps_(&GetIonContext()->runtime->spsProfiler(), &lastPC_),
michael@0 53 osrEntryOffset_(0),
michael@0 54 skipArgCheckEntryOffset_(0),
michael@0 55 frameDepth_(graph->paddedLocalSlotsSize() + graph->argumentsSize())
michael@0 56 {
michael@0 57 if (!gen->compilingAsmJS())
michael@0 58 masm.setInstrumentation(&sps_);
michael@0 59
michael@0 60 // Since asm.js uses the system ABI which does not necessarily use a
michael@0 61 // regular array where all slots are sizeof(Value), it maintains the max
michael@0 62 // argument stack depth separately.
michael@0 63 if (gen->compilingAsmJS()) {
michael@0 64 JS_ASSERT(graph->argumentSlotCount() == 0);
michael@0 65 frameDepth_ += gen->maxAsmJSStackArgBytes();
michael@0 66
michael@0 67 // An MAsmJSCall does not align the stack pointer at calls sites but instead
michael@0 68 // relies on the a priori stack adjustment (in the prologue) on platforms
michael@0 69 // (like x64) which require the stack to be aligned.
michael@0 70 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
michael@0 71 bool forceAlign = true;
michael@0 72 #else
michael@0 73 bool forceAlign = false;
michael@0 74 #endif
michael@0 75 if (gen->performsAsmJSCall() || forceAlign) {
michael@0 76 unsigned alignmentAtCall = AlignmentMidPrologue + frameDepth_;
michael@0 77 if (unsigned rem = alignmentAtCall % StackAlignment)
michael@0 78 frameDepth_ += StackAlignment - rem;
michael@0 79 }
michael@0 80
michael@0 81 // FrameSizeClass is only used for bailing, which cannot happen in
michael@0 82 // asm.js code.
michael@0 83 frameClass_ = FrameSizeClass::None();
michael@0 84 } else {
michael@0 85 frameClass_ = FrameSizeClass::FromDepth(frameDepth_);
michael@0 86 }
michael@0 87 }
michael@0 88
michael@0 89 bool
michael@0 90 CodeGeneratorShared::generateOutOfLineCode()
michael@0 91 {
michael@0 92 for (size_t i = 0; i < outOfLineCode_.length(); i++) {
michael@0 93 if (!gen->alloc().ensureBallast())
michael@0 94 return false;
michael@0 95 masm.setFramePushed(outOfLineCode_[i]->framePushed());
michael@0 96 lastPC_ = outOfLineCode_[i]->pc();
michael@0 97 if (!sps_.prepareForOOL())
michael@0 98 return false;
michael@0 99 sps_.setPushed(outOfLineCode_[i]->script());
michael@0 100 outOfLineCode_[i]->bind(&masm);
michael@0 101
michael@0 102 oolIns = outOfLineCode_[i];
michael@0 103 if (!outOfLineCode_[i]->generate(this))
michael@0 104 return false;
michael@0 105 sps_.finishOOL();
michael@0 106 }
michael@0 107 oolIns = nullptr;
michael@0 108
michael@0 109 return true;
michael@0 110 }
michael@0 111
michael@0 112 bool
michael@0 113 CodeGeneratorShared::addOutOfLineCode(OutOfLineCode *code)
michael@0 114 {
michael@0 115 code->setFramePushed(masm.framePushed());
michael@0 116 // If an OOL instruction adds another OOL instruction, then use the original
michael@0 117 // instruction's script/pc instead of the basic block's that we're on
michael@0 118 // because they're probably not relevant any more.
michael@0 119 if (oolIns)
michael@0 120 code->setSource(oolIns->script(), oolIns->pc());
michael@0 121 else
michael@0 122 code->setSource(current ? current->mir()->info().script() : nullptr, lastPC_);
michael@0 123 JS_ASSERT_IF(code->script(), code->script()->containsPC(code->pc()));
michael@0 124 return outOfLineCode_.append(code);
michael@0 125 }
michael@0 126
michael@0 127 // see OffsetOfFrameSlot
michael@0 128 static inline int32_t
michael@0 129 ToStackIndex(LAllocation *a)
michael@0 130 {
michael@0 131 if (a->isStackSlot()) {
michael@0 132 JS_ASSERT(a->toStackSlot()->slot() >= 1);
michael@0 133 return a->toStackSlot()->slot();
michael@0 134 }
michael@0 135 JS_ASSERT(-int32_t(sizeof(IonJSFrameLayout)) <= a->toArgument()->index());
michael@0 136 return -int32_t(sizeof(IonJSFrameLayout) + a->toArgument()->index());
michael@0 137 }
michael@0 138
michael@0 139 bool
michael@0 140 CodeGeneratorShared::encodeAllocations(LSnapshot *snapshot, MResumePoint *resumePoint,
michael@0 141 uint32_t *startIndex)
michael@0 142 {
michael@0 143 IonSpew(IonSpew_Codegen, "Encoding %u of resume point %p's operands starting from %u",
michael@0 144 resumePoint->numOperands(), (void *) resumePoint, *startIndex);
michael@0 145 for (uint32_t allocno = 0, e = resumePoint->numOperands(); allocno < e; allocno++) {
michael@0 146 uint32_t i = allocno + *startIndex;
michael@0 147 MDefinition *mir = resumePoint->getOperand(allocno);
michael@0 148
michael@0 149 if (mir->isBox())
michael@0 150 mir = mir->toBox()->getOperand(0);
michael@0 151
michael@0 152 MIRType type = mir->isUnused()
michael@0 153 ? MIRType_MagicOptimizedOut
michael@0 154 : mir->type();
michael@0 155
michael@0 156 RValueAllocation alloc;
michael@0 157
michael@0 158 switch (type) {
michael@0 159 case MIRType_Undefined:
michael@0 160 alloc = RValueAllocation::Undefined();
michael@0 161 break;
michael@0 162 case MIRType_Null:
michael@0 163 alloc = RValueAllocation::Null();
michael@0 164 break;
michael@0 165 case MIRType_Int32:
michael@0 166 case MIRType_String:
michael@0 167 case MIRType_Object:
michael@0 168 case MIRType_Boolean:
michael@0 169 case MIRType_Double:
michael@0 170 case MIRType_Float32:
michael@0 171 {
michael@0 172 LAllocation *payload = snapshot->payloadOfSlot(i);
michael@0 173 JSValueType valueType = ValueTypeFromMIRType(type);
michael@0 174 if (payload->isMemory()) {
michael@0 175 if (type == MIRType_Float32)
michael@0 176 alloc = RValueAllocation::Float32(ToStackIndex(payload));
michael@0 177 else
michael@0 178 alloc = RValueAllocation::Typed(valueType, ToStackIndex(payload));
michael@0 179 } else if (payload->isGeneralReg()) {
michael@0 180 alloc = RValueAllocation::Typed(valueType, ToRegister(payload));
michael@0 181 } else if (payload->isFloatReg()) {
michael@0 182 FloatRegister reg = ToFloatRegister(payload);
michael@0 183 if (type == MIRType_Float32)
michael@0 184 alloc = RValueAllocation::Float32(reg);
michael@0 185 else
michael@0 186 alloc = RValueAllocation::Double(reg);
michael@0 187 } else {
michael@0 188 MConstant *constant = mir->toConstant();
michael@0 189 uint32_t index;
michael@0 190 if (!graph.addConstantToPool(constant->value(), &index))
michael@0 191 return false;
michael@0 192 alloc = RValueAllocation::ConstantPool(index);
michael@0 193 }
michael@0 194 break;
michael@0 195 }
michael@0 196 case MIRType_MagicOptimizedArguments:
michael@0 197 case MIRType_MagicOptimizedOut:
michael@0 198 {
michael@0 199 uint32_t index;
michael@0 200 JSWhyMagic why = (type == MIRType_MagicOptimizedArguments
michael@0 201 ? JS_OPTIMIZED_ARGUMENTS
michael@0 202 : JS_OPTIMIZED_OUT);
michael@0 203 Value v = MagicValue(why);
michael@0 204 if (!graph.addConstantToPool(v, &index))
michael@0 205 return false;
michael@0 206 alloc = RValueAllocation::ConstantPool(index);
michael@0 207 break;
michael@0 208 }
michael@0 209 default:
michael@0 210 {
michael@0 211 JS_ASSERT(mir->type() == MIRType_Value);
michael@0 212 LAllocation *payload = snapshot->payloadOfSlot(i);
michael@0 213 #ifdef JS_NUNBOX32
michael@0 214 LAllocation *type = snapshot->typeOfSlot(i);
michael@0 215 if (type->isRegister()) {
michael@0 216 if (payload->isRegister())
michael@0 217 alloc = RValueAllocation::Untyped(ToRegister(type), ToRegister(payload));
michael@0 218 else
michael@0 219 alloc = RValueAllocation::Untyped(ToRegister(type), ToStackIndex(payload));
michael@0 220 } else {
michael@0 221 if (payload->isRegister())
michael@0 222 alloc = RValueAllocation::Untyped(ToStackIndex(type), ToRegister(payload));
michael@0 223 else
michael@0 224 alloc = RValueAllocation::Untyped(ToStackIndex(type), ToStackIndex(payload));
michael@0 225 }
michael@0 226 #elif JS_PUNBOX64
michael@0 227 if (payload->isRegister())
michael@0 228 alloc = RValueAllocation::Untyped(ToRegister(payload));
michael@0 229 else
michael@0 230 alloc = RValueAllocation::Untyped(ToStackIndex(payload));
michael@0 231 #endif
michael@0 232 break;
michael@0 233 }
michael@0 234 }
michael@0 235
michael@0 236 snapshots_.add(alloc);
michael@0 237 }
michael@0 238
michael@0 239 *startIndex += resumePoint->numOperands();
michael@0 240 return true;
michael@0 241 }
michael@0 242
michael@0 243 bool
michael@0 244 CodeGeneratorShared::encode(LRecoverInfo *recover)
michael@0 245 {
michael@0 246 if (recover->recoverOffset() != INVALID_RECOVER_OFFSET)
michael@0 247 return true;
michael@0 248
michael@0 249 uint32_t frameCount = recover->mir()->frameCount();
michael@0 250 IonSpew(IonSpew_Snapshots, "Encoding LRecoverInfo %p (frameCount %u)",
michael@0 251 (void *)recover, frameCount);
michael@0 252
michael@0 253 MResumePoint::Mode mode = recover->mir()->mode();
michael@0 254 JS_ASSERT(mode != MResumePoint::Outer);
michael@0 255 bool resumeAfter = (mode == MResumePoint::ResumeAfter);
michael@0 256
michael@0 257 RecoverOffset offset = recovers_.startRecover(frameCount, resumeAfter);
michael@0 258
michael@0 259 for (MResumePoint **it = recover->begin(), **end = recover->end();
michael@0 260 it != end;
michael@0 261 ++it)
michael@0 262 {
michael@0 263 if (!recovers_.writeFrame(*it))
michael@0 264 return false;
michael@0 265 }
michael@0 266
michael@0 267 recovers_.endRecover();
michael@0 268 recover->setRecoverOffset(offset);
michael@0 269 return !recovers_.oom();
michael@0 270 }
michael@0 271
michael@0 272 bool
michael@0 273 CodeGeneratorShared::encode(LSnapshot *snapshot)
michael@0 274 {
michael@0 275 if (snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET)
michael@0 276 return true;
michael@0 277
michael@0 278 LRecoverInfo *recoverInfo = snapshot->recoverInfo();
michael@0 279 if (!encode(recoverInfo))
michael@0 280 return false;
michael@0 281
michael@0 282 RecoverOffset recoverOffset = recoverInfo->recoverOffset();
michael@0 283 MOZ_ASSERT(recoverOffset != INVALID_RECOVER_OFFSET);
michael@0 284
michael@0 285 IonSpew(IonSpew_Snapshots, "Encoding LSnapshot %p (LRecover %p)",
michael@0 286 (void *)snapshot, (void*) recoverInfo);
michael@0 287
michael@0 288 SnapshotOffset offset = snapshots_.startSnapshot(recoverOffset, snapshot->bailoutKind());
michael@0 289
michael@0 290 #ifdef TRACK_SNAPSHOTS
michael@0 291 uint32_t pcOpcode = 0;
michael@0 292 uint32_t lirOpcode = 0;
michael@0 293 uint32_t lirId = 0;
michael@0 294 uint32_t mirOpcode = 0;
michael@0 295 uint32_t mirId = 0;
michael@0 296
michael@0 297 if (LInstruction *ins = instruction()) {
michael@0 298 lirOpcode = ins->op();
michael@0 299 lirId = ins->id();
michael@0 300 if (ins->mirRaw()) {
michael@0 301 mirOpcode = ins->mirRaw()->op();
michael@0 302 mirId = ins->mirRaw()->id();
michael@0 303 if (ins->mirRaw()->trackedPc())
michael@0 304 pcOpcode = *ins->mirRaw()->trackedPc();
michael@0 305 }
michael@0 306 }
michael@0 307 snapshots_.trackSnapshot(pcOpcode, mirOpcode, mirId, lirOpcode, lirId);
michael@0 308 #endif
michael@0 309
michael@0 310 uint32_t startIndex = 0;
michael@0 311 for (MResumePoint **it = recoverInfo->begin(), **end = recoverInfo->end();
michael@0 312 it != end;
michael@0 313 ++it)
michael@0 314 {
michael@0 315 MResumePoint *mir = *it;
michael@0 316 if (!encodeAllocations(snapshot, mir, &startIndex))
michael@0 317 return false;
michael@0 318 }
michael@0 319
michael@0 320 MOZ_ASSERT(snapshots_.allocWritten() == snapshot->numSlots());
michael@0 321 snapshots_.endSnapshot();
michael@0 322 snapshot->setSnapshotOffset(offset);
michael@0 323 return !snapshots_.oom();
michael@0 324 }
michael@0 325
michael@0 326 bool
michael@0 327 CodeGeneratorShared::assignBailoutId(LSnapshot *snapshot)
michael@0 328 {
michael@0 329 JS_ASSERT(snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET);
michael@0 330
michael@0 331 // Can we not use bailout tables at all?
michael@0 332 if (!deoptTable_)
michael@0 333 return false;
michael@0 334
michael@0 335 JS_ASSERT(frameClass_ != FrameSizeClass::None());
michael@0 336
michael@0 337 if (snapshot->bailoutId() != INVALID_BAILOUT_ID)
michael@0 338 return true;
michael@0 339
michael@0 340 // Is the bailout table full?
michael@0 341 if (bailouts_.length() >= BAILOUT_TABLE_SIZE)
michael@0 342 return false;
michael@0 343
michael@0 344 unsigned bailoutId = bailouts_.length();
michael@0 345 snapshot->setBailoutId(bailoutId);
michael@0 346 IonSpew(IonSpew_Snapshots, "Assigned snapshot bailout id %u", bailoutId);
michael@0 347 return bailouts_.append(snapshot->snapshotOffset());
michael@0 348 }
michael@0 349
michael@0 350 void
michael@0 351 CodeGeneratorShared::encodeSafepoints()
michael@0 352 {
michael@0 353 for (SafepointIndex *it = safepointIndices_.begin(), *end = safepointIndices_.end();
michael@0 354 it != end;
michael@0 355 ++it)
michael@0 356 {
michael@0 357 LSafepoint *safepoint = it->safepoint();
michael@0 358
michael@0 359 if (!safepoint->encoded()) {
michael@0 360 safepoint->fixupOffset(&masm);
michael@0 361 safepoints_.encode(safepoint);
michael@0 362 }
michael@0 363
michael@0 364 it->resolve();
michael@0 365 }
michael@0 366 }
michael@0 367
michael@0 368 bool
michael@0 369 CodeGeneratorShared::markSafepoint(LInstruction *ins)
michael@0 370 {
michael@0 371 return markSafepointAt(masm.currentOffset(), ins);
michael@0 372 }
michael@0 373
michael@0 374 bool
michael@0 375 CodeGeneratorShared::markSafepointAt(uint32_t offset, LInstruction *ins)
michael@0 376 {
michael@0 377 JS_ASSERT_IF(!safepointIndices_.empty(),
michael@0 378 offset - safepointIndices_.back().displacement() >= sizeof(uint32_t));
michael@0 379 return safepointIndices_.append(SafepointIndex(offset, ins->safepoint()));
michael@0 380 }
michael@0 381
michael@0 382 void
michael@0 383 CodeGeneratorShared::ensureOsiSpace()
michael@0 384 {
michael@0 385 // For a refresher, an invalidation point is of the form:
michael@0 386 // 1: call <target>
michael@0 387 // 2: ...
michael@0 388 // 3: <osipoint>
michael@0 389 //
michael@0 390 // The four bytes *before* instruction 2 are overwritten with an offset.
michael@0 391 // Callers must ensure that the instruction itself has enough bytes to
michael@0 392 // support this.
michael@0 393 //
michael@0 394 // The bytes *at* instruction 3 are overwritten with an invalidation jump.
michael@0 395 // jump. These bytes may be in a completely different IR sequence, but
michael@0 396 // represent the join point of the call out of the function.
michael@0 397 //
michael@0 398 // At points where we want to ensure that invalidation won't corrupt an
michael@0 399 // important instruction, we make sure to pad with nops.
michael@0 400 if (masm.currentOffset() - lastOsiPointOffset_ < Assembler::patchWrite_NearCallSize()) {
michael@0 401 int32_t paddingSize = Assembler::patchWrite_NearCallSize();
michael@0 402 paddingSize -= masm.currentOffset() - lastOsiPointOffset_;
michael@0 403 for (int32_t i = 0; i < paddingSize; ++i)
michael@0 404 masm.nop();
michael@0 405 }
michael@0 406 JS_ASSERT(masm.currentOffset() - lastOsiPointOffset_ >= Assembler::patchWrite_NearCallSize());
michael@0 407 lastOsiPointOffset_ = masm.currentOffset();
michael@0 408 }
michael@0 409
michael@0 410 bool
michael@0 411 CodeGeneratorShared::markOsiPoint(LOsiPoint *ins, uint32_t *callPointOffset)
michael@0 412 {
michael@0 413 if (!encode(ins->snapshot()))
michael@0 414 return false;
michael@0 415
michael@0 416 ensureOsiSpace();
michael@0 417
michael@0 418 *callPointOffset = masm.currentOffset();
michael@0 419 SnapshotOffset so = ins->snapshot()->snapshotOffset();
michael@0 420 return osiIndices_.append(OsiIndex(*callPointOffset, so));
michael@0 421 }
michael@0 422
michael@0 423 #ifdef CHECK_OSIPOINT_REGISTERS
michael@0 424 template <class Op>
michael@0 425 static void
michael@0 426 HandleRegisterDump(Op op, MacroAssembler &masm, RegisterSet liveRegs, Register activation,
michael@0 427 Register scratch)
michael@0 428 {
michael@0 429 const size_t baseOffset = JitActivation::offsetOfRegs();
michael@0 430
michael@0 431 // Handle live GPRs.
michael@0 432 for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); iter++) {
michael@0 433 Register reg = *iter;
michael@0 434 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
michael@0 435
michael@0 436 if (reg == activation) {
michael@0 437 // To use the original value of the activation register (that's
michael@0 438 // now on top of the stack), we need the scratch register.
michael@0 439 masm.push(scratch);
michael@0 440 masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), scratch);
michael@0 441 op(scratch, dump);
michael@0 442 masm.pop(scratch);
michael@0 443 } else {
michael@0 444 op(reg, dump);
michael@0 445 }
michael@0 446 }
michael@0 447
michael@0 448 // Handle live FPRs.
michael@0 449 for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); iter++) {
michael@0 450 FloatRegister reg = *iter;
michael@0 451 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
michael@0 452 op(reg, dump);
michael@0 453 }
michael@0 454 }
michael@0 455
michael@0 456 class StoreOp
michael@0 457 {
michael@0 458 MacroAssembler &masm;
michael@0 459
michael@0 460 public:
michael@0 461 StoreOp(MacroAssembler &masm)
michael@0 462 : masm(masm)
michael@0 463 {}
michael@0 464
michael@0 465 void operator()(Register reg, Address dump) {
michael@0 466 masm.storePtr(reg, dump);
michael@0 467 }
michael@0 468 void operator()(FloatRegister reg, Address dump) {
michael@0 469 masm.storeDouble(reg, dump);
michael@0 470 }
michael@0 471 };
michael@0 472
michael@0 473 static void
michael@0 474 StoreAllLiveRegs(MacroAssembler &masm, RegisterSet liveRegs)
michael@0 475 {
michael@0 476 // Store a copy of all live registers before performing the call.
michael@0 477 // When we reach the OsiPoint, we can use this to check nothing
michael@0 478 // modified them in the meantime.
michael@0 479
michael@0 480 // Load pointer to the JitActivation in a scratch register.
michael@0 481 GeneralRegisterSet allRegs(GeneralRegisterSet::All());
michael@0 482 Register scratch = allRegs.takeAny();
michael@0 483 masm.push(scratch);
michael@0 484 masm.loadJitActivation(scratch);
michael@0 485
michael@0 486 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
michael@0 487 masm.add32(Imm32(1), checkRegs);
michael@0 488
michael@0 489 StoreOp op(masm);
michael@0 490 HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
michael@0 491
michael@0 492 masm.pop(scratch);
michael@0 493 }
michael@0 494
michael@0 495 class VerifyOp
michael@0 496 {
michael@0 497 MacroAssembler &masm;
michael@0 498 Label *failure_;
michael@0 499
michael@0 500 public:
michael@0 501 VerifyOp(MacroAssembler &masm, Label *failure)
michael@0 502 : masm(masm), failure_(failure)
michael@0 503 {}
michael@0 504
michael@0 505 void operator()(Register reg, Address dump) {
michael@0 506 masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
michael@0 507 }
michael@0 508 void operator()(FloatRegister reg, Address dump) {
michael@0 509 masm.loadDouble(dump, ScratchFloatReg);
michael@0 510 masm.branchDouble(Assembler::DoubleNotEqual, ScratchFloatReg, reg, failure_);
michael@0 511 }
michael@0 512 };
michael@0 513
michael@0 514 static void
michael@0 515 OsiPointRegisterCheckFailed()
michael@0 516 {
michael@0 517 // Any live register captured by a safepoint (other than temp registers)
michael@0 518 // must remain unchanged between the call and the OsiPoint instruction.
michael@0 519 MOZ_ASSUME_UNREACHABLE("Modified registers between VM call and OsiPoint");
michael@0 520 }
michael@0 521
michael@0 522 void
michael@0 523 CodeGeneratorShared::verifyOsiPointRegs(LSafepoint *safepoint)
michael@0 524 {
michael@0 525 // Ensure the live registers stored by callVM did not change between
michael@0 526 // the call and this OsiPoint. Try-catch relies on this invariant.
michael@0 527
michael@0 528 // Load pointer to the JitActivation in a scratch register.
michael@0 529 GeneralRegisterSet allRegs(GeneralRegisterSet::All());
michael@0 530 Register scratch = allRegs.takeAny();
michael@0 531 masm.push(scratch);
michael@0 532 masm.loadJitActivation(scratch);
michael@0 533
michael@0 534 // If we should not check registers (because the instruction did not call
michael@0 535 // into the VM, or a GC happened), we're done.
michael@0 536 Label failure, done;
michael@0 537 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
michael@0 538 masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
michael@0 539
michael@0 540 // Having more than one VM function call made in one visit function at
michael@0 541 // runtime is a sec-ciritcal error, because if we conservatively assume that
michael@0 542 // one of the function call can re-enter Ion, then the invalidation process
michael@0 543 // will potentially add a call at a random location, by patching the code
michael@0 544 // before the return address.
michael@0 545 masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
michael@0 546
michael@0 547 // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
michael@0 548 // temps after calling into the VM. This is fine because no other
michael@0 549 // instructions (including this OsiPoint) will depend on them. Also
michael@0 550 // backtracking can also use the same register for an input and an output.
michael@0 551 // These are marked as clobbered and shouldn't get checked.
michael@0 552 RegisterSet liveRegs = safepoint->liveRegs();
michael@0 553 liveRegs = RegisterSet::Intersect(liveRegs, RegisterSet::Not(safepoint->clobberedRegs()));
michael@0 554
michael@0 555 VerifyOp op(masm, &failure);
michael@0 556 HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
michael@0 557
michael@0 558 masm.jump(&done);
michael@0 559
michael@0 560 // Do not profile the callWithABI that occurs below. This is to avoid a
michael@0 561 // rare corner case that occurs when profiling interacts with itself:
michael@0 562 //
michael@0 563 // When slow profiling assertions are turned on, FunctionBoundary ops
michael@0 564 // (which update the profiler pseudo-stack) may emit a callVM, which
michael@0 565 // forces them to have an osi point associated with them. The
michael@0 566 // FunctionBoundary for inline function entry is added to the caller's
michael@0 567 // graph with a PC from the caller's code, but during codegen it modifies
michael@0 568 // SPS instrumentation to add the callee as the current top-most script.
michael@0 569 // When codegen gets to the OSIPoint, and the callWithABI below is
michael@0 570 // emitted, the codegen thinks that the current frame is the callee, but
michael@0 571 // the PC it's using from the OSIPoint refers to the caller. This causes
michael@0 572 // the profiler instrumentation of the callWithABI below to ASSERT, since
michael@0 573 // the script and pc are mismatched. To avoid this, we simply omit
michael@0 574 // instrumentation for these callWithABIs.
michael@0 575 masm.bind(&failure);
michael@0 576 masm.setupUnalignedABICall(0, scratch);
michael@0 577 masm.callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, OsiPointRegisterCheckFailed));
michael@0 578 masm.breakpoint();
michael@0 579
michael@0 580 masm.bind(&done);
michael@0 581 masm.pop(scratch);
michael@0 582 }
michael@0 583
michael@0 584 bool
michael@0 585 CodeGeneratorShared::shouldVerifyOsiPointRegs(LSafepoint *safepoint)
michael@0 586 {
michael@0 587 if (!js_JitOptions.checkOsiPointRegisters)
michael@0 588 return false;
michael@0 589
michael@0 590 if (gen->info().executionMode() != SequentialExecution)
michael@0 591 return false;
michael@0 592
michael@0 593 if (safepoint->liveRegs().empty(true) && safepoint->liveRegs().empty(false))
michael@0 594 return false; // No registers to check.
michael@0 595
michael@0 596 return true;
michael@0 597 }
michael@0 598
michael@0 599 void
michael@0 600 CodeGeneratorShared::resetOsiPointRegs(LSafepoint *safepoint)
michael@0 601 {
michael@0 602 if (!shouldVerifyOsiPointRegs(safepoint))
michael@0 603 return;
michael@0 604
michael@0 605 // Set checkRegs to 0. If we perform a VM call, the instruction
michael@0 606 // will set it to 1.
michael@0 607 GeneralRegisterSet allRegs(GeneralRegisterSet::All());
michael@0 608 Register scratch = allRegs.takeAny();
michael@0 609 masm.push(scratch);
michael@0 610 masm.loadJitActivation(scratch);
michael@0 611 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
michael@0 612 masm.store32(Imm32(0), checkRegs);
michael@0 613 masm.pop(scratch);
michael@0 614 }
michael@0 615 #endif
michael@0 616
michael@0 617 // Before doing any call to Cpp, you should ensure that volatile
michael@0 618 // registers are evicted by the register allocator.
michael@0 619 bool
michael@0 620 CodeGeneratorShared::callVM(const VMFunction &fun, LInstruction *ins, const Register *dynStack)
michael@0 621 {
michael@0 622 // Different execution modes have different sets of VM functions.
michael@0 623 JS_ASSERT(fun.executionMode == gen->info().executionMode());
michael@0 624
michael@0 625 // If we're calling a function with an out parameter type of double, make
michael@0 626 // sure we have an FPU.
michael@0 627 JS_ASSERT_IF(fun.outParam == Type_Double, GetIonContext()->runtime->jitSupportsFloatingPoint());
michael@0 628
michael@0 629 #ifdef DEBUG
michael@0 630 if (ins->mirRaw()) {
michael@0 631 JS_ASSERT(ins->mirRaw()->isInstruction());
michael@0 632 MInstruction *mir = ins->mirRaw()->toInstruction();
michael@0 633 JS_ASSERT_IF(mir->isEffectful(), mir->resumePoint());
michael@0 634 }
michael@0 635 #endif
michael@0 636
michael@0 637 #ifdef JS_TRACE_LOGGING
michael@0 638 if (!emitTracelogStartEvent(TraceLogger::VM))
michael@0 639 return false;
michael@0 640 #endif
michael@0 641
michael@0 642 // Stack is:
michael@0 643 // ... frame ...
michael@0 644 // [args]
michael@0 645 #ifdef DEBUG
michael@0 646 JS_ASSERT(pushedArgs_ == fun.explicitArgs);
michael@0 647 pushedArgs_ = 0;
michael@0 648 #endif
michael@0 649
michael@0 650 // Get the wrapper of the VM function.
michael@0 651 JitCode *wrapper = gen->jitRuntime()->getVMWrapper(fun);
michael@0 652 if (!wrapper)
michael@0 653 return false;
michael@0 654
michael@0 655 #ifdef CHECK_OSIPOINT_REGISTERS
michael@0 656 if (shouldVerifyOsiPointRegs(ins->safepoint()))
michael@0 657 StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
michael@0 658 #endif
michael@0 659
michael@0 660 // Call the wrapper function. The wrapper is in charge to unwind the stack
michael@0 661 // when returning from the call. Failures are handled with exceptions based
michael@0 662 // on the return value of the C functions. To guard the outcome of the
michael@0 663 // returned value, use another LIR instruction.
michael@0 664 uint32_t callOffset;
michael@0 665 if (dynStack)
michael@0 666 callOffset = masm.callWithExitFrame(wrapper, *dynStack);
michael@0 667 else
michael@0 668 callOffset = masm.callWithExitFrame(wrapper);
michael@0 669
michael@0 670 if (!markSafepointAt(callOffset, ins))
michael@0 671 return false;
michael@0 672
michael@0 673 // Remove rest of the frame left on the stack. We remove the return address
michael@0 674 // which is implicitly poped when returning.
michael@0 675 int framePop = sizeof(IonExitFrameLayout) - sizeof(void*);
michael@0 676
michael@0 677 // Pop arguments from framePushed.
michael@0 678 masm.implicitPop(fun.explicitStackSlots() * sizeof(void *) + framePop);
michael@0 679 // Stack is:
michael@0 680 // ... frame ...
michael@0 681
michael@0 682 #ifdef JS_TRACE_LOGGING
michael@0 683 if (!emitTracelogStopEvent(TraceLogger::VM))
michael@0 684 return false;
michael@0 685 #endif
michael@0 686
michael@0 687 return true;
michael@0 688 }
michael@0 689
michael@0 690 class OutOfLineTruncateSlow : public OutOfLineCodeBase<CodeGeneratorShared>
michael@0 691 {
michael@0 692 FloatRegister src_;
michael@0 693 Register dest_;
michael@0 694 bool needFloat32Conversion_;
michael@0 695
michael@0 696 public:
michael@0 697 OutOfLineTruncateSlow(FloatRegister src, Register dest, bool needFloat32Conversion = false)
michael@0 698 : src_(src), dest_(dest), needFloat32Conversion_(needFloat32Conversion)
michael@0 699 { }
michael@0 700
michael@0 701 bool accept(CodeGeneratorShared *codegen) {
michael@0 702 return codegen->visitOutOfLineTruncateSlow(this);
michael@0 703 }
michael@0 704 FloatRegister src() const {
michael@0 705 return src_;
michael@0 706 }
michael@0 707 Register dest() const {
michael@0 708 return dest_;
michael@0 709 }
michael@0 710 bool needFloat32Conversion() const {
michael@0 711 return needFloat32Conversion_;
michael@0 712 }
michael@0 713
michael@0 714 };
michael@0 715
michael@0 716 OutOfLineCode *
michael@0 717 CodeGeneratorShared::oolTruncateDouble(const FloatRegister &src, const Register &dest)
michael@0 718 {
michael@0 719 OutOfLineTruncateSlow *ool = new(alloc()) OutOfLineTruncateSlow(src, dest);
michael@0 720 if (!addOutOfLineCode(ool))
michael@0 721 return nullptr;
michael@0 722 return ool;
michael@0 723 }
michael@0 724
michael@0 725 bool
michael@0 726 CodeGeneratorShared::emitTruncateDouble(const FloatRegister &src, const Register &dest)
michael@0 727 {
michael@0 728 OutOfLineCode *ool = oolTruncateDouble(src, dest);
michael@0 729 if (!ool)
michael@0 730 return false;
michael@0 731
michael@0 732 masm.branchTruncateDouble(src, dest, ool->entry());
michael@0 733 masm.bind(ool->rejoin());
michael@0 734 return true;
michael@0 735 }
michael@0 736
michael@0 737 bool
michael@0 738 CodeGeneratorShared::emitTruncateFloat32(const FloatRegister &src, const Register &dest)
michael@0 739 {
michael@0 740 OutOfLineTruncateSlow *ool = new(alloc()) OutOfLineTruncateSlow(src, dest, true);
michael@0 741 if (!addOutOfLineCode(ool))
michael@0 742 return false;
michael@0 743
michael@0 744 masm.branchTruncateFloat32(src, dest, ool->entry());
michael@0 745 masm.bind(ool->rejoin());
michael@0 746 return true;
michael@0 747 }
michael@0 748
michael@0 749 bool
michael@0 750 CodeGeneratorShared::visitOutOfLineTruncateSlow(OutOfLineTruncateSlow *ool)
michael@0 751 {
michael@0 752 FloatRegister src = ool->src();
michael@0 753 Register dest = ool->dest();
michael@0 754
michael@0 755 saveVolatile(dest);
michael@0 756
michael@0 757 if (ool->needFloat32Conversion()) {
michael@0 758 masm.push(src);
michael@0 759 masm.convertFloat32ToDouble(src, src);
michael@0 760 }
michael@0 761
michael@0 762 masm.setupUnalignedABICall(1, dest);
michael@0 763 masm.passABIArg(src, MoveOp::DOUBLE);
michael@0 764 if (gen->compilingAsmJS())
michael@0 765 masm.callWithABI(AsmJSImm_ToInt32);
michael@0 766 else
michael@0 767 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, js::ToInt32));
michael@0 768 masm.storeCallResult(dest);
michael@0 769
michael@0 770 if (ool->needFloat32Conversion())
michael@0 771 masm.pop(src);
michael@0 772
michael@0 773 restoreVolatile(dest);
michael@0 774
michael@0 775 masm.jump(ool->rejoin());
michael@0 776 return true;
michael@0 777 }
michael@0 778
michael@0 779 bool
michael@0 780 CodeGeneratorShared::omitOverRecursedCheck() const
michael@0 781 {
michael@0 782 // If the current function makes no calls (which means it isn't recursive)
michael@0 783 // and it uses only a small amount of stack space, it doesn't need a
michael@0 784 // stack overflow check. Note that the actual number here is somewhat
michael@0 785 // arbitrary, and codegen actually uses small bounded amounts of
michael@0 786 // additional stack space in some cases too.
michael@0 787 return frameSize() < 64 && !gen->performsCall();
michael@0 788 }
michael@0 789
michael@0 790 void
michael@0 791 CodeGeneratorShared::emitPreBarrier(Register base, const LAllocation *index, MIRType type)
michael@0 792 {
michael@0 793 if (index->isConstant()) {
michael@0 794 Address address(base, ToInt32(index) * sizeof(Value));
michael@0 795 masm.patchableCallPreBarrier(address, type);
michael@0 796 } else {
michael@0 797 BaseIndex address(base, ToRegister(index), TimesEight);
michael@0 798 masm.patchableCallPreBarrier(address, type);
michael@0 799 }
michael@0 800 }
michael@0 801
michael@0 802 void
michael@0 803 CodeGeneratorShared::emitPreBarrier(Address address, MIRType type)
michael@0 804 {
michael@0 805 masm.patchableCallPreBarrier(address, type);
michael@0 806 }
michael@0 807
michael@0 808 void
michael@0 809 CodeGeneratorShared::dropArguments(unsigned argc)
michael@0 810 {
michael@0 811 pushedArgumentSlots_.shrinkBy(argc);
michael@0 812 }
michael@0 813
michael@0 814 bool
michael@0 815 CodeGeneratorShared::markArgumentSlots(LSafepoint *safepoint)
michael@0 816 {
michael@0 817 for (size_t i = 0; i < pushedArgumentSlots_.length(); i++) {
michael@0 818 if (!safepoint->addValueSlot(pushedArgumentSlots_[i]))
michael@0 819 return false;
michael@0 820 }
michael@0 821 return true;
michael@0 822 }
michael@0 823
michael@0 824 OutOfLineAbortPar *
michael@0 825 CodeGeneratorShared::oolAbortPar(ParallelBailoutCause cause, MBasicBlock *basicBlock,
michael@0 826 jsbytecode *bytecode)
michael@0 827 {
michael@0 828 OutOfLineAbortPar *ool = new(alloc()) OutOfLineAbortPar(cause, basicBlock, bytecode);
michael@0 829 if (!ool || !addOutOfLineCode(ool))
michael@0 830 return nullptr;
michael@0 831 return ool;
michael@0 832 }
michael@0 833
michael@0 834 OutOfLineAbortPar *
michael@0 835 CodeGeneratorShared::oolAbortPar(ParallelBailoutCause cause, LInstruction *lir)
michael@0 836 {
michael@0 837 MDefinition *mir = lir->mirRaw();
michael@0 838 MBasicBlock *block = mir->block();
michael@0 839 jsbytecode *pc = mir->trackedPc();
michael@0 840 if (!pc) {
michael@0 841 if (lir->snapshot())
michael@0 842 pc = lir->snapshot()->mir()->pc();
michael@0 843 else
michael@0 844 pc = block->pc();
michael@0 845 }
michael@0 846 return oolAbortPar(cause, block, pc);
michael@0 847 }
michael@0 848
michael@0 849 OutOfLinePropagateAbortPar *
michael@0 850 CodeGeneratorShared::oolPropagateAbortPar(LInstruction *lir)
michael@0 851 {
michael@0 852 OutOfLinePropagateAbortPar *ool = new(alloc()) OutOfLinePropagateAbortPar(lir);
michael@0 853 if (!ool || !addOutOfLineCode(ool))
michael@0 854 return nullptr;
michael@0 855 return ool;
michael@0 856 }
michael@0 857
michael@0 858 bool
michael@0 859 OutOfLineAbortPar::generate(CodeGeneratorShared *codegen)
michael@0 860 {
michael@0 861 codegen->callTraceLIR(0xDEADBEEF, nullptr, "AbortPar");
michael@0 862 return codegen->visitOutOfLineAbortPar(this);
michael@0 863 }
michael@0 864
michael@0 865 bool
michael@0 866 OutOfLinePropagateAbortPar::generate(CodeGeneratorShared *codegen)
michael@0 867 {
michael@0 868 codegen->callTraceLIR(0xDEADBEEF, nullptr, "AbortPar");
michael@0 869 return codegen->visitOutOfLinePropagateAbortPar(this);
michael@0 870 }
michael@0 871
michael@0 872 bool
michael@0 873 CodeGeneratorShared::callTraceLIR(uint32_t blockIndex, LInstruction *lir,
michael@0 874 const char *bailoutName)
michael@0 875 {
michael@0 876 JS_ASSERT_IF(!lir, bailoutName);
michael@0 877
michael@0 878 if (!IonSpewEnabled(IonSpew_Trace))
michael@0 879 return true;
michael@0 880
michael@0 881 uint32_t execMode = (uint32_t) gen->info().executionMode();
michael@0 882 uint32_t lirIndex;
michael@0 883 const char *lirOpName;
michael@0 884 const char *mirOpName;
michael@0 885 JSScript *script;
michael@0 886 jsbytecode *pc;
michael@0 887
michael@0 888 masm.PushRegsInMask(RegisterSet::Volatile());
michael@0 889 masm.reserveStack(sizeof(IonLIRTraceData));
michael@0 890
michael@0 891 // This first move is here so that when you scan the disassembly,
michael@0 892 // you can easily pick out where each instruction begins. The
michael@0 893 // next few items indicate to you the Basic Block / LIR.
michael@0 894 masm.move32(Imm32(0xDEADBEEF), CallTempReg0);
michael@0 895
michael@0 896 if (lir) {
michael@0 897 lirIndex = lir->id();
michael@0 898 lirOpName = lir->opName();
michael@0 899 if (MDefinition *mir = lir->mirRaw()) {
michael@0 900 mirOpName = mir->opName();
michael@0 901 script = mir->block()->info().script();
michael@0 902 pc = mir->trackedPc();
michael@0 903 } else {
michael@0 904 mirOpName = nullptr;
michael@0 905 script = nullptr;
michael@0 906 pc = nullptr;
michael@0 907 }
michael@0 908 } else {
michael@0 909 blockIndex = lirIndex = 0xDEADBEEF;
michael@0 910 lirOpName = mirOpName = bailoutName;
michael@0 911 script = nullptr;
michael@0 912 pc = nullptr;
michael@0 913 }
michael@0 914
michael@0 915 masm.store32(Imm32(blockIndex),
michael@0 916 Address(StackPointer, offsetof(IonLIRTraceData, blockIndex)));
michael@0 917 masm.store32(Imm32(lirIndex),
michael@0 918 Address(StackPointer, offsetof(IonLIRTraceData, lirIndex)));
michael@0 919 masm.store32(Imm32(execMode),
michael@0 920 Address(StackPointer, offsetof(IonLIRTraceData, execModeInt)));
michael@0 921 masm.storePtr(ImmPtr(lirOpName),
michael@0 922 Address(StackPointer, offsetof(IonLIRTraceData, lirOpName)));
michael@0 923 masm.storePtr(ImmPtr(mirOpName),
michael@0 924 Address(StackPointer, offsetof(IonLIRTraceData, mirOpName)));
michael@0 925 masm.storePtr(ImmGCPtr(script),
michael@0 926 Address(StackPointer, offsetof(IonLIRTraceData, script)));
michael@0 927 masm.storePtr(ImmPtr(pc),
michael@0 928 Address(StackPointer, offsetof(IonLIRTraceData, pc)));
michael@0 929
michael@0 930 masm.movePtr(StackPointer, CallTempReg0);
michael@0 931 masm.setupUnalignedABICall(1, CallTempReg1);
michael@0 932 masm.passABIArg(CallTempReg0);
michael@0 933 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, TraceLIR));
michael@0 934
michael@0 935 masm.freeStack(sizeof(IonLIRTraceData));
michael@0 936 masm.PopRegsInMask(RegisterSet::Volatile());
michael@0 937
michael@0 938 return true;
michael@0 939 }
michael@0 940
michael@0 941 typedef bool (*InterruptCheckFn)(JSContext *);
michael@0 942 const VMFunction InterruptCheckInfo = FunctionInfo<InterruptCheckFn>(InterruptCheck);
michael@0 943
michael@0 944 Label *
michael@0 945 CodeGeneratorShared::labelForBackedgeWithImplicitCheck(MBasicBlock *mir)
michael@0 946 {
michael@0 947 // If this is a loop backedge to a loop header with an implicit interrupt
michael@0 948 // check, use a patchable jump. Skip this search if compiling without a
michael@0 949 // script for asm.js, as there will be no interrupt check instruction.
michael@0 950 // Due to critical edge unsplitting there may no longer be unique loop
michael@0 951 // backedges, so just look for any edge going to an earlier block in RPO.
michael@0 952 if (!gen->compilingAsmJS() && mir->isLoopHeader() && mir->id() <= current->mir()->id()) {
michael@0 953 for (LInstructionIterator iter = mir->lir()->begin(); iter != mir->lir()->end(); iter++) {
michael@0 954 if (iter->isLabel() || iter->isMoveGroup()) {
michael@0 955 // Continue searching for an interrupt check.
michael@0 956 } else if (iter->isInterruptCheckImplicit()) {
michael@0 957 return iter->toInterruptCheckImplicit()->oolEntry();
michael@0 958 } else {
michael@0 959 // The interrupt check should be the first instruction in the
michael@0 960 // loop header other than the initial label and move groups.
michael@0 961 JS_ASSERT(iter->isInterruptCheck() || iter->isInterruptCheckPar());
michael@0 962 return nullptr;
michael@0 963 }
michael@0 964 }
michael@0 965 }
michael@0 966
michael@0 967 return nullptr;
michael@0 968 }
michael@0 969
michael@0 970 void
michael@0 971 CodeGeneratorShared::jumpToBlock(MBasicBlock *mir)
michael@0 972 {
michael@0 973 // No jump necessary if we can fall through to the next block.
michael@0 974 if (isNextBlock(mir->lir()))
michael@0 975 return;
michael@0 976
michael@0 977 if (Label *oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
michael@0 978 // Note: the backedge is initially a jump to the next instruction.
michael@0 979 // It will be patched to the target block's label during link().
michael@0 980 RepatchLabel rejoin;
michael@0 981 CodeOffsetJump backedge = masm.jumpWithPatch(&rejoin);
michael@0 982 masm.bind(&rejoin);
michael@0 983
michael@0 984 masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry)));
michael@0 985 } else {
michael@0 986 masm.jump(mir->lir()->label());
michael@0 987 }
michael@0 988 }
michael@0 989
michael@0 990 // This function is not used for MIPS. MIPS has branchToBlock.
michael@0 991 #ifndef JS_CODEGEN_MIPS
michael@0 992 void
michael@0 993 CodeGeneratorShared::jumpToBlock(MBasicBlock *mir, Assembler::Condition cond)
michael@0 994 {
michael@0 995 if (Label *oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
michael@0 996 // Note: the backedge is initially a jump to the next instruction.
michael@0 997 // It will be patched to the target block's label during link().
michael@0 998 RepatchLabel rejoin;
michael@0 999 CodeOffsetJump backedge = masm.jumpWithPatch(&rejoin, cond);
michael@0 1000 masm.bind(&rejoin);
michael@0 1001
michael@0 1002 masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry)));
michael@0 1003 } else {
michael@0 1004 masm.j(cond, mir->lir()->label());
michael@0 1005 }
michael@0 1006 }
michael@0 1007 #endif
michael@0 1008
michael@0 1009 size_t
michael@0 1010 CodeGeneratorShared::addCacheLocations(const CacheLocationList &locs, size_t *numLocs)
michael@0 1011 {
michael@0 1012 size_t firstIndex = runtimeData_.length();
michael@0 1013 size_t numLocations = 0;
michael@0 1014 for (CacheLocationList::iterator iter = locs.begin(); iter != locs.end(); iter++) {
michael@0 1015 // allocateData() ensures that sizeof(CacheLocation) is word-aligned.
michael@0 1016 // If this changes, we will need to pad to ensure alignment.
michael@0 1017 size_t curIndex = allocateData(sizeof(CacheLocation));
michael@0 1018 new (&runtimeData_[curIndex]) CacheLocation(iter->pc, iter->script);
michael@0 1019 numLocations++;
michael@0 1020 }
michael@0 1021 JS_ASSERT(numLocations != 0);
michael@0 1022 *numLocs = numLocations;
michael@0 1023 return firstIndex;
michael@0 1024 }
michael@0 1025
michael@0 1026 ReciprocalMulConstants
michael@0 1027 CodeGeneratorShared::computeDivisionConstants(int d) {
michael@0 1028 // In what follows, d is positive and is not a power of 2.
michael@0 1029 JS_ASSERT(d > 0 && (d & (d - 1)) != 0);
michael@0 1030
michael@0 1031 // Speeding up division by non power-of-2 constants is possible by
michael@0 1032 // calculating, during compilation, a value M such that high-order
michael@0 1033 // bits of M*n correspond to the result of the division. Formally,
michael@0 1034 // we compute values 0 <= M < 2^32 and 0 <= s < 31 such that
michael@0 1035 // (M * n) >> (32 + s) = floor(n/d) if n >= 0
michael@0 1036 // (M * n) >> (32 + s) = ceil(n/d) - 1 if n < 0.
michael@0 1037 // The original presentation of this technique appears in Hacker's
michael@0 1038 // Delight, a book by Henry S. Warren, Jr.. A proof of correctness
michael@0 1039 // for our version follows.
michael@0 1040
michael@0 1041 // Define p = 32 + s, M = ceil(2^p/d), and assume that s satisfies
michael@0 1042 // M - 2^p/d <= 2^(s+1)/d. (1)
michael@0 1043 // (Observe that s = FloorLog32(d) satisfies this, because in this
michael@0 1044 // case d <= 2^(s+1) and so the RHS of (1) is at least one). Then,
michael@0 1045 //
michael@0 1046 // a) If s <= FloorLog32(d), then M <= 2^32 - 1.
michael@0 1047 // Proof: Indeed, M is monotone in s and, for s = FloorLog32(d),
michael@0 1048 // the inequalities 2^31 > d >= 2^s + 1 readily imply
michael@0 1049 // 2^p / d = 2^p/(d - 1) * (d - 1)/d
michael@0 1050 // <= 2^32 * (1 - 1/d) < 2 * (2^31 - 1) = 2^32 - 2.
michael@0 1051 // The claim follows by applying the ceiling function.
michael@0 1052 //
michael@0 1053 // b) For any 0 <= n < 2^31, floor(Mn/2^p) = floor(n/d).
michael@0 1054 // Proof: Put x = floor(Mn/2^p); it's the unique integer for which
michael@0 1055 // Mn/2^p - 1 < x <= Mn/2^p. (2)
michael@0 1056 // Using M >= 2^p/d on the LHS and (1) on the RHS, we get
michael@0 1057 // n/d - 1 < x <= n/d + n/(2^31 d) < n/d + 1/d.
michael@0 1058 // Since x is an integer, it's not in the interval (n/d, (n+1)/d),
michael@0 1059 // and so n/d - 1 < x <= n/d, which implies x = floor(n/d).
michael@0 1060 //
michael@0 1061 // c) For any -2^31 <= n < 0, floor(Mn/2^p) + 1 = ceil(n/d).
michael@0 1062 // Proof: The proof is similar. Equation (2) holds as above. Using
michael@0 1063 // M > 2^p/d (d isn't a power of 2) on the RHS and (1) on the LHS,
michael@0 1064 // n/d + n/(2^31 d) - 1 < x < n/d.
michael@0 1065 // Using n >= -2^31 and summing 1,
michael@0 1066 // n/d - 1/d < x + 1 < n/d + 1.
michael@0 1067 // Since x + 1 is an integer, this implies n/d <= x + 1 < n/d + 1.
michael@0 1068 // In other words, x + 1 = ceil(n/d).
michael@0 1069 //
michael@0 1070 // Condition (1) isn't necessary for the existence of M and s with
michael@0 1071 // the properties above. Hacker's Delight provides a slightly less
michael@0 1072 // restrictive condition when d >= 196611, at the cost of a 3-page
michael@0 1073 // proof of correctness.
michael@0 1074
michael@0 1075 // Note that, since d*M - 2^p = d - (2^p)%d, (1) can be written as
michael@0 1076 // 2^(s+1) >= d - (2^p)%d.
michael@0 1077 // We now compute the least s with this property...
michael@0 1078
michael@0 1079 int32_t shift = 0;
michael@0 1080 while ((int64_t(1) << (shift+1)) + (int64_t(1) << (shift+32)) % d < d)
michael@0 1081 shift++;
michael@0 1082
michael@0 1083 // ...and the corresponding M. This may not fit in a signed 32-bit
michael@0 1084 // integer; we will compute (M - 2^32) * n + (2^32 * n) instead of
michael@0 1085 // M * n if this is the case (cf. item (a) above).
michael@0 1086 ReciprocalMulConstants rmc;
michael@0 1087 rmc.multiplier = int32_t((int64_t(1) << (shift+32))/d + 1);
michael@0 1088 rmc.shiftAmount = shift;
michael@0 1089
michael@0 1090 return rmc;
michael@0 1091 }
michael@0 1092
michael@0 1093
michael@0 1094 #ifdef JS_TRACE_LOGGING
michael@0 1095
michael@0 1096 bool
michael@0 1097 CodeGeneratorShared::emitTracelogScript(bool isStart)
michael@0 1098 {
michael@0 1099 RegisterSet regs = RegisterSet::Volatile();
michael@0 1100 Register logger = regs.takeGeneral();
michael@0 1101 Register script = regs.takeGeneral();
michael@0 1102
michael@0 1103 masm.Push(logger);
michael@0 1104 masm.Push(script);
michael@0 1105
michael@0 1106 CodeOffsetLabel patchLogger = masm.movWithPatch(ImmPtr(nullptr), logger);
michael@0 1107 if (!patchableTraceLoggers_.append(patchLogger))
michael@0 1108 return false;
michael@0 1109
michael@0 1110 CodeOffsetLabel patchScript = masm.movWithPatch(ImmWord(0), script);
michael@0 1111 if (!patchableTLScripts_.append(patchScript))
michael@0 1112 return false;
michael@0 1113
michael@0 1114 if (isStart)
michael@0 1115 masm.tracelogStart(logger, script);
michael@0 1116 else
michael@0 1117 masm.tracelogStop(logger, script);
michael@0 1118
michael@0 1119 masm.Pop(script);
michael@0 1120 masm.Pop(logger);
michael@0 1121 return true;
michael@0 1122 }
michael@0 1123
michael@0 1124 bool
michael@0 1125 CodeGeneratorShared::emitTracelogTree(bool isStart, uint32_t textId)
michael@0 1126 {
michael@0 1127 if (!TraceLogTextIdEnabled(textId))
michael@0 1128 return true;
michael@0 1129
michael@0 1130 RegisterSet regs = RegisterSet::Volatile();
michael@0 1131 Register logger = regs.takeGeneral();
michael@0 1132
michael@0 1133 masm.Push(logger);
michael@0 1134
michael@0 1135 CodeOffsetLabel patchLocation = masm.movWithPatch(ImmPtr(nullptr), logger);
michael@0 1136 if (!patchableTraceLoggers_.append(patchLocation))
michael@0 1137 return false;
michael@0 1138
michael@0 1139 if (isStart) {
michael@0 1140 masm.tracelogStart(logger, textId);
michael@0 1141 } else {
michael@0 1142 #ifdef DEBUG
michael@0 1143 masm.tracelogStop(logger, textId);
michael@0 1144 #else
michael@0 1145 masm.tracelogStop(logger);
michael@0 1146 #endif
michael@0 1147 }
michael@0 1148
michael@0 1149 masm.Pop(logger);
michael@0 1150 return true;
michael@0 1151 }
michael@0 1152 #endif
michael@0 1153
michael@0 1154 } // namespace jit
michael@0 1155 } // namespace js

mercurial