js/src/jit/shared/CodeGenerator-x86-shared.cpp

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #include "jit/shared/CodeGenerator-x86-shared.h"
michael@0 8
michael@0 9 #include "mozilla/DebugOnly.h"
michael@0 10 #include "mozilla/MathAlgorithms.h"
michael@0 11
michael@0 12 #include "jsmath.h"
michael@0 13
michael@0 14 #include "jit/IonFrames.h"
michael@0 15 #include "jit/IonLinker.h"
michael@0 16 #include "jit/JitCompartment.h"
michael@0 17 #include "jit/RangeAnalysis.h"
michael@0 18 #include "vm/TraceLogging.h"
michael@0 19
michael@0 20 #include "jit/shared/CodeGenerator-shared-inl.h"
michael@0 21
michael@0 22 using namespace js;
michael@0 23 using namespace js::jit;
michael@0 24
michael@0 25 using mozilla::Abs;
michael@0 26 using mozilla::FloatingPoint;
michael@0 27 using mozilla::FloorLog2;
michael@0 28 using mozilla::NegativeInfinity;
michael@0 29 using mozilla::SpecificNaN;
michael@0 30
michael@0 31 namespace js {
michael@0 32 namespace jit {
michael@0 33
michael@0 34 CodeGeneratorX86Shared::CodeGeneratorX86Shared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm)
michael@0 35 : CodeGeneratorShared(gen, graph, masm)
michael@0 36 {
michael@0 37 }
michael@0 38
michael@0 39 bool
michael@0 40 CodeGeneratorX86Shared::generatePrologue()
michael@0 41 {
michael@0 42 JS_ASSERT(!gen->compilingAsmJS());
michael@0 43
michael@0 44 // Note that this automatically sets MacroAssembler::framePushed().
michael@0 45 masm.reserveStack(frameSize());
michael@0 46
michael@0 47 return true;
michael@0 48 }
michael@0 49
michael@0 50 bool
michael@0 51 CodeGeneratorX86Shared::generateAsmJSPrologue(Label *stackOverflowLabel)
michael@0 52 {
michael@0 53 JS_ASSERT(gen->compilingAsmJS());
michael@0 54
michael@0 55 // The asm.js over-recursed handler wants to be able to assume that SP
michael@0 56 // points to the return address, so perform the check before pushing
michael@0 57 // frameDepth.
michael@0 58 if (!omitOverRecursedCheck()) {
michael@0 59 masm.branchPtr(Assembler::AboveOrEqual,
michael@0 60 AsmJSAbsoluteAddress(AsmJSImm_StackLimit),
michael@0 61 StackPointer,
michael@0 62 stackOverflowLabel);
michael@0 63 }
michael@0 64
michael@0 65 // Note that this automatically sets MacroAssembler::framePushed().
michael@0 66 masm.reserveStack(frameSize());
michael@0 67 return true;
michael@0 68 }
michael@0 69
michael@0 70 bool
michael@0 71 CodeGeneratorX86Shared::generateEpilogue()
michael@0 72 {
michael@0 73 masm.bind(&returnLabel_);
michael@0 74
michael@0 75 #ifdef JS_TRACE_LOGGING
michael@0 76 if (!gen->compilingAsmJS() && gen->info().executionMode() == SequentialExecution) {
michael@0 77 if (!emitTracelogStopEvent(TraceLogger::IonMonkey))
michael@0 78 return false;
michael@0 79 if (!emitTracelogScriptStop())
michael@0 80 return false;
michael@0 81 }
michael@0 82 #endif
michael@0 83
michael@0 84 // Pop the stack we allocated at the start of the function.
michael@0 85 masm.freeStack(frameSize());
michael@0 86 JS_ASSERT(masm.framePushed() == 0);
michael@0 87
michael@0 88 masm.ret();
michael@0 89 return true;
michael@0 90 }
michael@0 91
michael@0 92 bool
michael@0 93 OutOfLineBailout::accept(CodeGeneratorX86Shared *codegen)
michael@0 94 {
michael@0 95 return codegen->visitOutOfLineBailout(this);
michael@0 96 }
michael@0 97
michael@0 98 void
michael@0 99 CodeGeneratorX86Shared::emitBranch(Assembler::Condition cond, MBasicBlock *mirTrue,
michael@0 100 MBasicBlock *mirFalse, Assembler::NaNCond ifNaN)
michael@0 101 {
michael@0 102 if (ifNaN == Assembler::NaN_IsFalse)
michael@0 103 jumpToBlock(mirFalse, Assembler::Parity);
michael@0 104 else if (ifNaN == Assembler::NaN_IsTrue)
michael@0 105 jumpToBlock(mirTrue, Assembler::Parity);
michael@0 106
michael@0 107 if (isNextBlock(mirFalse->lir())) {
michael@0 108 jumpToBlock(mirTrue, cond);
michael@0 109 } else {
michael@0 110 jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
michael@0 111 jumpToBlock(mirTrue);
michael@0 112 }
michael@0 113 }
michael@0 114
michael@0 115 bool
michael@0 116 CodeGeneratorX86Shared::visitDouble(LDouble *ins)
michael@0 117 {
michael@0 118 const LDefinition *out = ins->getDef(0);
michael@0 119 masm.loadConstantDouble(ins->getDouble(), ToFloatRegister(out));
michael@0 120 return true;
michael@0 121 }
michael@0 122
michael@0 123 bool
michael@0 124 CodeGeneratorX86Shared::visitFloat32(LFloat32 *ins)
michael@0 125 {
michael@0 126 const LDefinition *out = ins->getDef(0);
michael@0 127 masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out));
michael@0 128 return true;
michael@0 129 }
michael@0 130
michael@0 131 bool
michael@0 132 CodeGeneratorX86Shared::visitTestIAndBranch(LTestIAndBranch *test)
michael@0 133 {
michael@0 134 const LAllocation *opd = test->input();
michael@0 135
michael@0 136 // Test the operand
michael@0 137 masm.testl(ToRegister(opd), ToRegister(opd));
michael@0 138 emitBranch(Assembler::NonZero, test->ifTrue(), test->ifFalse());
michael@0 139 return true;
michael@0 140 }
michael@0 141
michael@0 142 bool
michael@0 143 CodeGeneratorX86Shared::visitTestDAndBranch(LTestDAndBranch *test)
michael@0 144 {
michael@0 145 const LAllocation *opd = test->input();
michael@0 146
michael@0 147 // ucomisd flags:
michael@0 148 // Z P C
michael@0 149 // ---------
michael@0 150 // NaN 1 1 1
michael@0 151 // > 0 0 0
michael@0 152 // < 0 0 1
michael@0 153 // = 1 0 0
michael@0 154 //
michael@0 155 // NaN is falsey, so comparing against 0 and then using the Z flag is
michael@0 156 // enough to determine which branch to take.
michael@0 157 masm.xorpd(ScratchFloatReg, ScratchFloatReg);
michael@0 158 masm.ucomisd(ToFloatRegister(opd), ScratchFloatReg);
michael@0 159 emitBranch(Assembler::NotEqual, test->ifTrue(), test->ifFalse());
michael@0 160 return true;
michael@0 161 }
michael@0 162
michael@0 163 bool
michael@0 164 CodeGeneratorX86Shared::visitTestFAndBranch(LTestFAndBranch *test)
michael@0 165 {
michael@0 166 const LAllocation *opd = test->input();
michael@0 167 // ucomiss flags are the same as doubles; see comment above
michael@0 168 masm.xorps(ScratchFloatReg, ScratchFloatReg);
michael@0 169 masm.ucomiss(ToFloatRegister(opd), ScratchFloatReg);
michael@0 170 emitBranch(Assembler::NotEqual, test->ifTrue(), test->ifFalse());
michael@0 171 return true;
michael@0 172 }
michael@0 173
michael@0 174 bool
michael@0 175 CodeGeneratorX86Shared::visitBitAndAndBranch(LBitAndAndBranch *baab)
michael@0 176 {
michael@0 177 if (baab->right()->isConstant())
michael@0 178 masm.testl(ToRegister(baab->left()), Imm32(ToInt32(baab->right())));
michael@0 179 else
michael@0 180 masm.testl(ToRegister(baab->left()), ToRegister(baab->right()));
michael@0 181 emitBranch(Assembler::NonZero, baab->ifTrue(), baab->ifFalse());
michael@0 182 return true;
michael@0 183 }
michael@0 184
michael@0 185 void
michael@0 186 CodeGeneratorX86Shared::emitCompare(MCompare::CompareType type, const LAllocation *left, const LAllocation *right)
michael@0 187 {
michael@0 188 #ifdef JS_CODEGEN_X64
michael@0 189 if (type == MCompare::Compare_Object) {
michael@0 190 masm.cmpq(ToRegister(left), ToOperand(right));
michael@0 191 return;
michael@0 192 }
michael@0 193 #endif
michael@0 194
michael@0 195 if (right->isConstant())
michael@0 196 masm.cmpl(ToRegister(left), Imm32(ToInt32(right)));
michael@0 197 else
michael@0 198 masm.cmpl(ToRegister(left), ToOperand(right));
michael@0 199 }
michael@0 200
michael@0 201 bool
michael@0 202 CodeGeneratorX86Shared::visitCompare(LCompare *comp)
michael@0 203 {
michael@0 204 MCompare *mir = comp->mir();
michael@0 205 emitCompare(mir->compareType(), comp->left(), comp->right());
michael@0 206 masm.emitSet(JSOpToCondition(mir->compareType(), comp->jsop()), ToRegister(comp->output()));
michael@0 207 return true;
michael@0 208 }
michael@0 209
michael@0 210 bool
michael@0 211 CodeGeneratorX86Shared::visitCompareAndBranch(LCompareAndBranch *comp)
michael@0 212 {
michael@0 213 MCompare *mir = comp->cmpMir();
michael@0 214 emitCompare(mir->compareType(), comp->left(), comp->right());
michael@0 215 Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
michael@0 216 emitBranch(cond, comp->ifTrue(), comp->ifFalse());
michael@0 217 return true;
michael@0 218 }
michael@0 219
michael@0 220 bool
michael@0 221 CodeGeneratorX86Shared::visitCompareD(LCompareD *comp)
michael@0 222 {
michael@0 223 FloatRegister lhs = ToFloatRegister(comp->left());
michael@0 224 FloatRegister rhs = ToFloatRegister(comp->right());
michael@0 225
michael@0 226 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
michael@0 227
michael@0 228 Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
michael@0 229 if (comp->mir()->operandsAreNeverNaN())
michael@0 230 nanCond = Assembler::NaN_HandledByCond;
michael@0 231
michael@0 232 masm.compareDouble(cond, lhs, rhs);
michael@0 233 masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()), nanCond);
michael@0 234 return true;
michael@0 235 }
michael@0 236
michael@0 237 bool
michael@0 238 CodeGeneratorX86Shared::visitCompareF(LCompareF *comp)
michael@0 239 {
michael@0 240 FloatRegister lhs = ToFloatRegister(comp->left());
michael@0 241 FloatRegister rhs = ToFloatRegister(comp->right());
michael@0 242
michael@0 243 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
michael@0 244
michael@0 245 Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
michael@0 246 if (comp->mir()->operandsAreNeverNaN())
michael@0 247 nanCond = Assembler::NaN_HandledByCond;
michael@0 248
michael@0 249 masm.compareFloat(cond, lhs, rhs);
michael@0 250 masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()), nanCond);
michael@0 251 return true;
michael@0 252 }
michael@0 253
michael@0 254 bool
michael@0 255 CodeGeneratorX86Shared::visitNotI(LNotI *ins)
michael@0 256 {
michael@0 257 masm.cmpl(ToRegister(ins->input()), Imm32(0));
michael@0 258 masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
michael@0 259 return true;
michael@0 260 }
michael@0 261
michael@0 262 bool
michael@0 263 CodeGeneratorX86Shared::visitNotD(LNotD *ins)
michael@0 264 {
michael@0 265 FloatRegister opd = ToFloatRegister(ins->input());
michael@0 266
michael@0 267 // Not returns true if the input is a NaN. We don't have to worry about
michael@0 268 // it if we know the input is never NaN though.
michael@0 269 Assembler::NaNCond nanCond = Assembler::NaN_IsTrue;
michael@0 270 if (ins->mir()->operandIsNeverNaN())
michael@0 271 nanCond = Assembler::NaN_HandledByCond;
michael@0 272
michael@0 273 masm.xorpd(ScratchFloatReg, ScratchFloatReg);
michael@0 274 masm.compareDouble(Assembler::DoubleEqualOrUnordered, opd, ScratchFloatReg);
michael@0 275 masm.emitSet(Assembler::Equal, ToRegister(ins->output()), nanCond);
michael@0 276 return true;
michael@0 277 }
michael@0 278
michael@0 279 bool
michael@0 280 CodeGeneratorX86Shared::visitNotF(LNotF *ins)
michael@0 281 {
michael@0 282 FloatRegister opd = ToFloatRegister(ins->input());
michael@0 283
michael@0 284 // Not returns true if the input is a NaN. We don't have to worry about
michael@0 285 // it if we know the input is never NaN though.
michael@0 286 Assembler::NaNCond nanCond = Assembler::NaN_IsTrue;
michael@0 287 if (ins->mir()->operandIsNeverNaN())
michael@0 288 nanCond = Assembler::NaN_HandledByCond;
michael@0 289
michael@0 290 masm.xorps(ScratchFloatReg, ScratchFloatReg);
michael@0 291 masm.compareFloat(Assembler::DoubleEqualOrUnordered, opd, ScratchFloatReg);
michael@0 292 masm.emitSet(Assembler::Equal, ToRegister(ins->output()), nanCond);
michael@0 293 return true;
michael@0 294 }
michael@0 295
michael@0 296 bool
michael@0 297 CodeGeneratorX86Shared::visitCompareDAndBranch(LCompareDAndBranch *comp)
michael@0 298 {
michael@0 299 FloatRegister lhs = ToFloatRegister(comp->left());
michael@0 300 FloatRegister rhs = ToFloatRegister(comp->right());
michael@0 301
michael@0 302 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
michael@0 303
michael@0 304 Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
michael@0 305 if (comp->cmpMir()->operandsAreNeverNaN())
michael@0 306 nanCond = Assembler::NaN_HandledByCond;
michael@0 307
michael@0 308 masm.compareDouble(cond, lhs, rhs);
michael@0 309 emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse(), nanCond);
michael@0 310 return true;
michael@0 311 }
michael@0 312
michael@0 313 bool
michael@0 314 CodeGeneratorX86Shared::visitCompareFAndBranch(LCompareFAndBranch *comp)
michael@0 315 {
michael@0 316 FloatRegister lhs = ToFloatRegister(comp->left());
michael@0 317 FloatRegister rhs = ToFloatRegister(comp->right());
michael@0 318
michael@0 319 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
michael@0 320
michael@0 321 Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
michael@0 322 if (comp->cmpMir()->operandsAreNeverNaN())
michael@0 323 nanCond = Assembler::NaN_HandledByCond;
michael@0 324
michael@0 325 masm.compareFloat(cond, lhs, rhs);
michael@0 326 emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse(), nanCond);
michael@0 327 return true;
michael@0 328 }
michael@0 329
michael@0 330 bool
michael@0 331 CodeGeneratorX86Shared::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins)
michael@0 332 {
michael@0 333 const MAsmJSPassStackArg *mir = ins->mir();
michael@0 334 Address dst(StackPointer, mir->spOffset());
michael@0 335 if (ins->arg()->isConstant()) {
michael@0 336 masm.storePtr(ImmWord(ToInt32(ins->arg())), dst);
michael@0 337 } else {
michael@0 338 if (ins->arg()->isGeneralReg())
michael@0 339 masm.storePtr(ToRegister(ins->arg()), dst);
michael@0 340 else
michael@0 341 masm.storeDouble(ToFloatRegister(ins->arg()), dst);
michael@0 342 }
michael@0 343 return true;
michael@0 344 }
michael@0 345
michael@0 346 bool
michael@0 347 CodeGeneratorX86Shared::generateOutOfLineCode()
michael@0 348 {
michael@0 349 if (!CodeGeneratorShared::generateOutOfLineCode())
michael@0 350 return false;
michael@0 351
michael@0 352 if (deoptLabel_.used()) {
michael@0 353 // All non-table-based bailouts will go here.
michael@0 354 masm.bind(&deoptLabel_);
michael@0 355
michael@0 356 // Push the frame size, so the handler can recover the IonScript.
michael@0 357 masm.push(Imm32(frameSize()));
michael@0 358
michael@0 359 JitCode *handler = gen->jitRuntime()->getGenericBailoutHandler();
michael@0 360 masm.jmp(ImmPtr(handler->raw()), Relocation::JITCODE);
michael@0 361 }
michael@0 362
michael@0 363 return true;
michael@0 364 }
michael@0 365
michael@0 366 class BailoutJump {
michael@0 367 Assembler::Condition cond_;
michael@0 368
michael@0 369 public:
michael@0 370 BailoutJump(Assembler::Condition cond) : cond_(cond)
michael@0 371 { }
michael@0 372 #ifdef JS_CODEGEN_X86
michael@0 373 void operator()(MacroAssembler &masm, uint8_t *code) const {
michael@0 374 masm.j(cond_, ImmPtr(code), Relocation::HARDCODED);
michael@0 375 }
michael@0 376 #endif
michael@0 377 void operator()(MacroAssembler &masm, Label *label) const {
michael@0 378 masm.j(cond_, label);
michael@0 379 }
michael@0 380 };
michael@0 381
michael@0 382 class BailoutLabel {
michael@0 383 Label *label_;
michael@0 384
michael@0 385 public:
michael@0 386 BailoutLabel(Label *label) : label_(label)
michael@0 387 { }
michael@0 388 #ifdef JS_CODEGEN_X86
michael@0 389 void operator()(MacroAssembler &masm, uint8_t *code) const {
michael@0 390 masm.retarget(label_, ImmPtr(code), Relocation::HARDCODED);
michael@0 391 }
michael@0 392 #endif
michael@0 393 void operator()(MacroAssembler &masm, Label *label) const {
michael@0 394 masm.retarget(label_, label);
michael@0 395 }
michael@0 396 };
michael@0 397
michael@0 398 template <typename T> bool
michael@0 399 CodeGeneratorX86Shared::bailout(const T &binder, LSnapshot *snapshot)
michael@0 400 {
michael@0 401 CompileInfo &info = snapshot->mir()->block()->info();
michael@0 402 switch (info.executionMode()) {
michael@0 403 case ParallelExecution: {
michael@0 404 // in parallel mode, make no attempt to recover, just signal an error.
michael@0 405 OutOfLineAbortPar *ool = oolAbortPar(ParallelBailoutUnsupported,
michael@0 406 snapshot->mir()->block(),
michael@0 407 snapshot->mir()->pc());
michael@0 408 binder(masm, ool->entry());
michael@0 409 return true;
michael@0 410 }
michael@0 411 case SequentialExecution:
michael@0 412 break;
michael@0 413 default:
michael@0 414 MOZ_ASSUME_UNREACHABLE("No such execution mode");
michael@0 415 }
michael@0 416
michael@0 417 if (!encode(snapshot))
michael@0 418 return false;
michael@0 419
michael@0 420 // Though the assembler doesn't track all frame pushes, at least make sure
michael@0 421 // the known value makes sense. We can't use bailout tables if the stack
michael@0 422 // isn't properly aligned to the static frame size.
michael@0 423 JS_ASSERT_IF(frameClass_ != FrameSizeClass::None() && deoptTable_,
michael@0 424 frameClass_.frameSize() == masm.framePushed());
michael@0 425
michael@0 426 #ifdef JS_CODEGEN_X86
michael@0 427 // On x64, bailout tables are pointless, because 16 extra bytes are
michael@0 428 // reserved per external jump, whereas it takes only 10 bytes to encode a
michael@0 429 // a non-table based bailout.
michael@0 430 if (assignBailoutId(snapshot)) {
michael@0 431 binder(masm, deoptTable_->raw() + snapshot->bailoutId() * BAILOUT_TABLE_ENTRY_SIZE);
michael@0 432 return true;
michael@0 433 }
michael@0 434 #endif
michael@0 435
michael@0 436 // We could not use a jump table, either because all bailout IDs were
michael@0 437 // reserved, or a jump table is not optimal for this frame size or
michael@0 438 // platform. Whatever, we will generate a lazy bailout.
michael@0 439 OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot);
michael@0 440 if (!addOutOfLineCode(ool))
michael@0 441 return false;
michael@0 442
michael@0 443 binder(masm, ool->entry());
michael@0 444 return true;
michael@0 445 }
michael@0 446
michael@0 447 bool
michael@0 448 CodeGeneratorX86Shared::bailoutIf(Assembler::Condition condition, LSnapshot *snapshot)
michael@0 449 {
michael@0 450 return bailout(BailoutJump(condition), snapshot);
michael@0 451 }
michael@0 452
michael@0 453 bool
michael@0 454 CodeGeneratorX86Shared::bailoutIf(Assembler::DoubleCondition condition, LSnapshot *snapshot)
michael@0 455 {
michael@0 456 JS_ASSERT(Assembler::NaNCondFromDoubleCondition(condition) == Assembler::NaN_HandledByCond);
michael@0 457 return bailoutIf(Assembler::ConditionFromDoubleCondition(condition), snapshot);
michael@0 458 }
michael@0 459
michael@0 460 bool
michael@0 461 CodeGeneratorX86Shared::bailoutFrom(Label *label, LSnapshot *snapshot)
michael@0 462 {
michael@0 463 JS_ASSERT(label->used() && !label->bound());
michael@0 464 return bailout(BailoutLabel(label), snapshot);
michael@0 465 }
michael@0 466
michael@0 467 bool
michael@0 468 CodeGeneratorX86Shared::bailout(LSnapshot *snapshot)
michael@0 469 {
michael@0 470 Label label;
michael@0 471 masm.jump(&label);
michael@0 472 return bailoutFrom(&label, snapshot);
michael@0 473 }
michael@0 474
michael@0 475 bool
michael@0 476 CodeGeneratorX86Shared::visitOutOfLineBailout(OutOfLineBailout *ool)
michael@0 477 {
michael@0 478 masm.push(Imm32(ool->snapshot()->snapshotOffset()));
michael@0 479 masm.jmp(&deoptLabel_);
michael@0 480 return true;
michael@0 481 }
michael@0 482
michael@0 483 bool
michael@0 484 CodeGeneratorX86Shared::visitMinMaxD(LMinMaxD *ins)
michael@0 485 {
michael@0 486 FloatRegister first = ToFloatRegister(ins->first());
michael@0 487 FloatRegister second = ToFloatRegister(ins->second());
michael@0 488 #ifdef DEBUG
michael@0 489 FloatRegister output = ToFloatRegister(ins->output());
michael@0 490 JS_ASSERT(first == output);
michael@0 491 #endif
michael@0 492
michael@0 493 Label done, nan, minMaxInst;
michael@0 494
michael@0 495 // Do a ucomisd to catch equality and NaNs, which both require special
michael@0 496 // handling. If the operands are ordered and inequal, we branch straight to
michael@0 497 // the min/max instruction. If we wanted, we could also branch for less-than
michael@0 498 // or greater-than here instead of using min/max, however these conditions
michael@0 499 // will sometimes be hard on the branch predictor.
michael@0 500 masm.ucomisd(first, second);
michael@0 501 masm.j(Assembler::NotEqual, &minMaxInst);
michael@0 502 if (!ins->mir()->range() || ins->mir()->range()->canBeNaN())
michael@0 503 masm.j(Assembler::Parity, &nan);
michael@0 504
michael@0 505 // Ordered and equal. The operands are bit-identical unless they are zero
michael@0 506 // and negative zero. These instructions merge the sign bits in that
michael@0 507 // case, and are no-ops otherwise.
michael@0 508 if (ins->mir()->isMax())
michael@0 509 masm.andpd(second, first);
michael@0 510 else
michael@0 511 masm.orpd(second, first);
michael@0 512 masm.jump(&done);
michael@0 513
michael@0 514 // x86's min/max are not symmetric; if either operand is a NaN, they return
michael@0 515 // the read-only operand. We need to return a NaN if either operand is a
michael@0 516 // NaN, so we explicitly check for a NaN in the read-write operand.
michael@0 517 if (!ins->mir()->range() || ins->mir()->range()->canBeNaN()) {
michael@0 518 masm.bind(&nan);
michael@0 519 masm.ucomisd(first, first);
michael@0 520 masm.j(Assembler::Parity, &done);
michael@0 521 }
michael@0 522
michael@0 523 // When the values are inequal, or second is NaN, x86's min and max will
michael@0 524 // return the value we need.
michael@0 525 masm.bind(&minMaxInst);
michael@0 526 if (ins->mir()->isMax())
michael@0 527 masm.maxsd(second, first);
michael@0 528 else
michael@0 529 masm.minsd(second, first);
michael@0 530
michael@0 531 masm.bind(&done);
michael@0 532 return true;
michael@0 533 }
michael@0 534
michael@0 535 bool
michael@0 536 CodeGeneratorX86Shared::visitAbsD(LAbsD *ins)
michael@0 537 {
michael@0 538 FloatRegister input = ToFloatRegister(ins->input());
michael@0 539 JS_ASSERT(input == ToFloatRegister(ins->output()));
michael@0 540 // Load a value which is all ones except for the sign bit.
michael@0 541 masm.loadConstantDouble(SpecificNaN<double>(0, FloatingPoint<double>::SignificandBits),
michael@0 542 ScratchFloatReg);
michael@0 543 masm.andpd(ScratchFloatReg, input);
michael@0 544 return true;
michael@0 545 }
michael@0 546
michael@0 547 bool
michael@0 548 CodeGeneratorX86Shared::visitAbsF(LAbsF *ins)
michael@0 549 {
michael@0 550 FloatRegister input = ToFloatRegister(ins->input());
michael@0 551 JS_ASSERT(input == ToFloatRegister(ins->output()));
michael@0 552 // Same trick as visitAbsD above.
michael@0 553 masm.loadConstantFloat32(SpecificNaN<float>(0, FloatingPoint<float>::SignificandBits),
michael@0 554 ScratchFloatReg);
michael@0 555 masm.andps(ScratchFloatReg, input);
michael@0 556 return true;
michael@0 557 }
michael@0 558
michael@0 559 bool
michael@0 560 CodeGeneratorX86Shared::visitSqrtD(LSqrtD *ins)
michael@0 561 {
michael@0 562 FloatRegister input = ToFloatRegister(ins->input());
michael@0 563 FloatRegister output = ToFloatRegister(ins->output());
michael@0 564 masm.sqrtsd(input, output);
michael@0 565 return true;
michael@0 566 }
michael@0 567
michael@0 568 bool
michael@0 569 CodeGeneratorX86Shared::visitSqrtF(LSqrtF *ins)
michael@0 570 {
michael@0 571 FloatRegister input = ToFloatRegister(ins->input());
michael@0 572 FloatRegister output = ToFloatRegister(ins->output());
michael@0 573 masm.sqrtss(input, output);
michael@0 574 return true;
michael@0 575 }
michael@0 576
michael@0 577 bool
michael@0 578 CodeGeneratorX86Shared::visitPowHalfD(LPowHalfD *ins)
michael@0 579 {
michael@0 580 FloatRegister input = ToFloatRegister(ins->input());
michael@0 581 JS_ASSERT(input == ToFloatRegister(ins->output()));
michael@0 582
michael@0 583 Label done, sqrt;
michael@0 584
michael@0 585 if (!ins->mir()->operandIsNeverNegativeInfinity()) {
michael@0 586 // Branch if not -Infinity.
michael@0 587 masm.loadConstantDouble(NegativeInfinity<double>(), ScratchFloatReg);
michael@0 588
michael@0 589 Assembler::DoubleCondition cond = Assembler::DoubleNotEqualOrUnordered;
michael@0 590 if (ins->mir()->operandIsNeverNaN())
michael@0 591 cond = Assembler::DoubleNotEqual;
michael@0 592 masm.branchDouble(cond, input, ScratchFloatReg, &sqrt);
michael@0 593
michael@0 594 // Math.pow(-Infinity, 0.5) == Infinity.
michael@0 595 masm.xorpd(input, input);
michael@0 596 masm.subsd(ScratchFloatReg, input);
michael@0 597 masm.jump(&done);
michael@0 598
michael@0 599 masm.bind(&sqrt);
michael@0 600 }
michael@0 601
michael@0 602 if (!ins->mir()->operandIsNeverNegativeZero()) {
michael@0 603 // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). Adding 0 converts any -0 to 0.
michael@0 604 masm.xorpd(ScratchFloatReg, ScratchFloatReg);
michael@0 605 masm.addsd(ScratchFloatReg, input);
michael@0 606 }
michael@0 607
michael@0 608 masm.sqrtsd(input, input);
michael@0 609
michael@0 610 masm.bind(&done);
michael@0 611 return true;
michael@0 612 }
michael@0 613
michael@0 614 class OutOfLineUndoALUOperation : public OutOfLineCodeBase<CodeGeneratorX86Shared>
michael@0 615 {
michael@0 616 LInstruction *ins_;
michael@0 617
michael@0 618 public:
michael@0 619 OutOfLineUndoALUOperation(LInstruction *ins)
michael@0 620 : ins_(ins)
michael@0 621 { }
michael@0 622
michael@0 623 virtual bool accept(CodeGeneratorX86Shared *codegen) {
michael@0 624 return codegen->visitOutOfLineUndoALUOperation(this);
michael@0 625 }
michael@0 626 LInstruction *ins() const {
michael@0 627 return ins_;
michael@0 628 }
michael@0 629 };
michael@0 630
michael@0 631 bool
michael@0 632 CodeGeneratorX86Shared::visitAddI(LAddI *ins)
michael@0 633 {
michael@0 634 if (ins->rhs()->isConstant())
michael@0 635 masm.addl(Imm32(ToInt32(ins->rhs())), ToOperand(ins->lhs()));
michael@0 636 else
michael@0 637 masm.addl(ToOperand(ins->rhs()), ToRegister(ins->lhs()));
michael@0 638
michael@0 639 if (ins->snapshot()) {
michael@0 640 if (ins->recoversInput()) {
michael@0 641 OutOfLineUndoALUOperation *ool = new(alloc()) OutOfLineUndoALUOperation(ins);
michael@0 642 if (!addOutOfLineCode(ool))
michael@0 643 return false;
michael@0 644 masm.j(Assembler::Overflow, ool->entry());
michael@0 645 } else {
michael@0 646 if (!bailoutIf(Assembler::Overflow, ins->snapshot()))
michael@0 647 return false;
michael@0 648 }
michael@0 649 }
michael@0 650 return true;
michael@0 651 }
michael@0 652
michael@0 653 bool
michael@0 654 CodeGeneratorX86Shared::visitSubI(LSubI *ins)
michael@0 655 {
michael@0 656 if (ins->rhs()->isConstant())
michael@0 657 masm.subl(Imm32(ToInt32(ins->rhs())), ToOperand(ins->lhs()));
michael@0 658 else
michael@0 659 masm.subl(ToOperand(ins->rhs()), ToRegister(ins->lhs()));
michael@0 660
michael@0 661 if (ins->snapshot()) {
michael@0 662 if (ins->recoversInput()) {
michael@0 663 OutOfLineUndoALUOperation *ool = new(alloc()) OutOfLineUndoALUOperation(ins);
michael@0 664 if (!addOutOfLineCode(ool))
michael@0 665 return false;
michael@0 666 masm.j(Assembler::Overflow, ool->entry());
michael@0 667 } else {
michael@0 668 if (!bailoutIf(Assembler::Overflow, ins->snapshot()))
michael@0 669 return false;
michael@0 670 }
michael@0 671 }
michael@0 672 return true;
michael@0 673 }
michael@0 674
michael@0 675 bool
michael@0 676 CodeGeneratorX86Shared::visitOutOfLineUndoALUOperation(OutOfLineUndoALUOperation *ool)
michael@0 677 {
michael@0 678 LInstruction *ins = ool->ins();
michael@0 679 Register reg = ToRegister(ins->getDef(0));
michael@0 680
michael@0 681 mozilla::DebugOnly<LAllocation *> lhs = ins->getOperand(0);
michael@0 682 LAllocation *rhs = ins->getOperand(1);
michael@0 683
michael@0 684 JS_ASSERT(reg == ToRegister(lhs));
michael@0 685 JS_ASSERT_IF(rhs->isGeneralReg(), reg != ToRegister(rhs));
michael@0 686
michael@0 687 // Undo the effect of the ALU operation, which was performed on the output
michael@0 688 // register and overflowed. Writing to the output register clobbered an
michael@0 689 // input reg, and the original value of the input needs to be recovered
michael@0 690 // to satisfy the constraint imposed by any RECOVERED_INPUT operands to
michael@0 691 // the bailout snapshot.
michael@0 692
michael@0 693 if (rhs->isConstant()) {
michael@0 694 Imm32 constant(ToInt32(rhs));
michael@0 695 if (ins->isAddI())
michael@0 696 masm.subl(constant, reg);
michael@0 697 else
michael@0 698 masm.addl(constant, reg);
michael@0 699 } else {
michael@0 700 if (ins->isAddI())
michael@0 701 masm.subl(ToOperand(rhs), reg);
michael@0 702 else
michael@0 703 masm.addl(ToOperand(rhs), reg);
michael@0 704 }
michael@0 705
michael@0 706 return bailout(ool->ins()->snapshot());
michael@0 707 }
michael@0 708
michael@0 709 class MulNegativeZeroCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared>
michael@0 710 {
michael@0 711 LMulI *ins_;
michael@0 712
michael@0 713 public:
michael@0 714 MulNegativeZeroCheck(LMulI *ins)
michael@0 715 : ins_(ins)
michael@0 716 { }
michael@0 717
michael@0 718 virtual bool accept(CodeGeneratorX86Shared *codegen) {
michael@0 719 return codegen->visitMulNegativeZeroCheck(this);
michael@0 720 }
michael@0 721 LMulI *ins() const {
michael@0 722 return ins_;
michael@0 723 }
michael@0 724 };
michael@0 725
michael@0 726 bool
michael@0 727 CodeGeneratorX86Shared::visitMulI(LMulI *ins)
michael@0 728 {
michael@0 729 const LAllocation *lhs = ins->lhs();
michael@0 730 const LAllocation *rhs = ins->rhs();
michael@0 731 MMul *mul = ins->mir();
michael@0 732 JS_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow());
michael@0 733
michael@0 734 if (rhs->isConstant()) {
michael@0 735 // Bailout on -0.0
michael@0 736 int32_t constant = ToInt32(rhs);
michael@0 737 if (mul->canBeNegativeZero() && constant <= 0) {
michael@0 738 Assembler::Condition bailoutCond = (constant == 0) ? Assembler::Signed : Assembler::Equal;
michael@0 739 masm.testl(ToRegister(lhs), ToRegister(lhs));
michael@0 740 if (!bailoutIf(bailoutCond, ins->snapshot()))
michael@0 741 return false;
michael@0 742 }
michael@0 743
michael@0 744 switch (constant) {
michael@0 745 case -1:
michael@0 746 masm.negl(ToOperand(lhs));
michael@0 747 break;
michael@0 748 case 0:
michael@0 749 masm.xorl(ToOperand(lhs), ToRegister(lhs));
michael@0 750 return true; // escape overflow check;
michael@0 751 case 1:
michael@0 752 // nop
michael@0 753 return true; // escape overflow check;
michael@0 754 case 2:
michael@0 755 masm.addl(ToOperand(lhs), ToRegister(lhs));
michael@0 756 break;
michael@0 757 default:
michael@0 758 if (!mul->canOverflow() && constant > 0) {
michael@0 759 // Use shift if cannot overflow and constant is power of 2
michael@0 760 int32_t shift = FloorLog2(constant);
michael@0 761 if ((1 << shift) == constant) {
michael@0 762 masm.shll(Imm32(shift), ToRegister(lhs));
michael@0 763 return true;
michael@0 764 }
michael@0 765 }
michael@0 766 masm.imull(Imm32(ToInt32(rhs)), ToRegister(lhs));
michael@0 767 }
michael@0 768
michael@0 769 // Bailout on overflow
michael@0 770 if (mul->canOverflow() && !bailoutIf(Assembler::Overflow, ins->snapshot()))
michael@0 771 return false;
michael@0 772 } else {
michael@0 773 masm.imull(ToOperand(rhs), ToRegister(lhs));
michael@0 774
michael@0 775 // Bailout on overflow
michael@0 776 if (mul->canOverflow() && !bailoutIf(Assembler::Overflow, ins->snapshot()))
michael@0 777 return false;
michael@0 778
michael@0 779 if (mul->canBeNegativeZero()) {
michael@0 780 // Jump to an OOL path if the result is 0.
michael@0 781 MulNegativeZeroCheck *ool = new(alloc()) MulNegativeZeroCheck(ins);
michael@0 782 if (!addOutOfLineCode(ool))
michael@0 783 return false;
michael@0 784
michael@0 785 masm.testl(ToRegister(lhs), ToRegister(lhs));
michael@0 786 masm.j(Assembler::Zero, ool->entry());
michael@0 787 masm.bind(ool->rejoin());
michael@0 788 }
michael@0 789 }
michael@0 790
michael@0 791 return true;
michael@0 792 }
michael@0 793
michael@0 794 class ReturnZero : public OutOfLineCodeBase<CodeGeneratorX86Shared>
michael@0 795 {
michael@0 796 Register reg_;
michael@0 797
michael@0 798 public:
michael@0 799 explicit ReturnZero(Register reg)
michael@0 800 : reg_(reg)
michael@0 801 { }
michael@0 802
michael@0 803 virtual bool accept(CodeGeneratorX86Shared *codegen) {
michael@0 804 return codegen->visitReturnZero(this);
michael@0 805 }
michael@0 806 Register reg() const {
michael@0 807 return reg_;
michael@0 808 }
michael@0 809 };
michael@0 810
michael@0 811 bool
michael@0 812 CodeGeneratorX86Shared::visitReturnZero(ReturnZero *ool)
michael@0 813 {
michael@0 814 masm.mov(ImmWord(0), ool->reg());
michael@0 815 masm.jmp(ool->rejoin());
michael@0 816 return true;
michael@0 817 }
michael@0 818
michael@0 819 bool
michael@0 820 CodeGeneratorX86Shared::visitUDivOrMod(LUDivOrMod *ins)
michael@0 821 {
michael@0 822 Register lhs = ToRegister(ins->lhs());
michael@0 823 Register rhs = ToRegister(ins->rhs());
michael@0 824 Register output = ToRegister(ins->output());
michael@0 825
michael@0 826 JS_ASSERT_IF(lhs != rhs, rhs != eax);
michael@0 827 JS_ASSERT(rhs != edx);
michael@0 828 JS_ASSERT_IF(output == eax, ToRegister(ins->remainder()) == edx);
michael@0 829
michael@0 830 ReturnZero *ool = nullptr;
michael@0 831
michael@0 832 // Put the lhs in eax.
michael@0 833 if (lhs != eax)
michael@0 834 masm.mov(lhs, eax);
michael@0 835
michael@0 836 // Prevent divide by zero.
michael@0 837 if (ins->canBeDivideByZero()) {
michael@0 838 masm.testl(rhs, rhs);
michael@0 839 if (ins->mir()->isTruncated()) {
michael@0 840 if (!ool)
michael@0 841 ool = new(alloc()) ReturnZero(output);
michael@0 842 masm.j(Assembler::Zero, ool->entry());
michael@0 843 } else {
michael@0 844 if (!bailoutIf(Assembler::Zero, ins->snapshot()))
michael@0 845 return false;
michael@0 846 }
michael@0 847 }
michael@0 848
michael@0 849 // Zero extend the lhs into edx to make (edx:eax), since udiv is 64-bit.
michael@0 850 masm.mov(ImmWord(0), edx);
michael@0 851 masm.udiv(rhs);
michael@0 852
michael@0 853 // Unsigned div or mod can return a value that's not a signed int32.
michael@0 854 // If our users aren't expecting that, bail.
michael@0 855 if (!ins->mir()->isTruncated()) {
michael@0 856 masm.testl(output, output);
michael@0 857 if (!bailoutIf(Assembler::Signed, ins->snapshot()))
michael@0 858 return false;
michael@0 859 }
michael@0 860
michael@0 861 if (ool) {
michael@0 862 if (!addOutOfLineCode(ool))
michael@0 863 return false;
michael@0 864 masm.bind(ool->rejoin());
michael@0 865 }
michael@0 866
michael@0 867 return true;
michael@0 868 }
michael@0 869
michael@0 870 bool
michael@0 871 CodeGeneratorX86Shared::visitMulNegativeZeroCheck(MulNegativeZeroCheck *ool)
michael@0 872 {
michael@0 873 LMulI *ins = ool->ins();
michael@0 874 Register result = ToRegister(ins->output());
michael@0 875 Operand lhsCopy = ToOperand(ins->lhsCopy());
michael@0 876 Operand rhs = ToOperand(ins->rhs());
michael@0 877 JS_ASSERT_IF(lhsCopy.kind() == Operand::REG, lhsCopy.reg() != result.code());
michael@0 878
michael@0 879 // Result is -0 if lhs or rhs is negative.
michael@0 880 masm.movl(lhsCopy, result);
michael@0 881 masm.orl(rhs, result);
michael@0 882 if (!bailoutIf(Assembler::Signed, ins->snapshot()))
michael@0 883 return false;
michael@0 884
michael@0 885 masm.mov(ImmWord(0), result);
michael@0 886 masm.jmp(ool->rejoin());
michael@0 887 return true;
michael@0 888 }
michael@0 889
michael@0 890 bool
michael@0 891 CodeGeneratorX86Shared::visitDivPowTwoI(LDivPowTwoI *ins)
michael@0 892 {
michael@0 893 Register lhs = ToRegister(ins->numerator());
michael@0 894 mozilla::DebugOnly<Register> output = ToRegister(ins->output());
michael@0 895
michael@0 896 int32_t shift = ins->shift();
michael@0 897 bool negativeDivisor = ins->negativeDivisor();
michael@0 898 MDiv *mir = ins->mir();
michael@0 899
michael@0 900 // We use defineReuseInput so these should always be the same, which is
michael@0 901 // convenient since all of our instructions here are two-address.
michael@0 902 JS_ASSERT(lhs == output);
michael@0 903
michael@0 904 if (!mir->isTruncated() && negativeDivisor) {
michael@0 905 // 0 divided by a negative number must return a double.
michael@0 906 masm.testl(lhs, lhs);
michael@0 907 if (!bailoutIf(Assembler::Zero, ins->snapshot()))
michael@0 908 return false;
michael@0 909 }
michael@0 910
michael@0 911 if (shift != 0) {
michael@0 912 if (!mir->isTruncated()) {
michael@0 913 // If the remainder is != 0, bailout since this must be a double.
michael@0 914 masm.testl(lhs, Imm32(UINT32_MAX >> (32 - shift)));
michael@0 915 if (!bailoutIf(Assembler::NonZero, ins->snapshot()))
michael@0 916 return false;
michael@0 917 }
michael@0 918
michael@0 919 // Adjust the value so that shifting produces a correctly rounded result
michael@0 920 // when the numerator is negative. See 10-1 "Signed Division by a Known
michael@0 921 // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
michael@0 922 if (mir->canBeNegativeDividend()) {
michael@0 923 Register lhsCopy = ToRegister(ins->numeratorCopy());
michael@0 924 JS_ASSERT(lhsCopy != lhs);
michael@0 925 if (shift > 1)
michael@0 926 masm.sarl(Imm32(31), lhs);
michael@0 927 masm.shrl(Imm32(32 - shift), lhs);
michael@0 928 masm.addl(lhsCopy, lhs);
michael@0 929 }
michael@0 930
michael@0 931 masm.sarl(Imm32(shift), lhs);
michael@0 932 if (negativeDivisor)
michael@0 933 masm.negl(lhs);
michael@0 934 } else if (shift == 0 && negativeDivisor) {
michael@0 935 // INT32_MIN / -1 overflows.
michael@0 936 masm.negl(lhs);
michael@0 937 if (!mir->isTruncated() && !bailoutIf(Assembler::Overflow, ins->snapshot()))
michael@0 938 return false;
michael@0 939 }
michael@0 940
michael@0 941 return true;
michael@0 942 }
michael@0 943
michael@0 944 bool
michael@0 945 CodeGeneratorX86Shared::visitDivOrModConstantI(LDivOrModConstantI *ins) {
michael@0 946 Register lhs = ToRegister(ins->numerator());
michael@0 947 Register output = ToRegister(ins->output());
michael@0 948 int32_t d = ins->denominator();
michael@0 949
michael@0 950 // This emits the division answer into edx or the modulus answer into eax.
michael@0 951 JS_ASSERT(output == eax || output == edx);
michael@0 952 JS_ASSERT(lhs != eax && lhs != edx);
michael@0 953 bool isDiv = (output == edx);
michael@0 954
michael@0 955 // The absolute value of the denominator isn't a power of 2 (see LDivPowTwoI
michael@0 956 // and LModPowTwoI).
michael@0 957 JS_ASSERT((Abs(d) & (Abs(d) - 1)) != 0);
michael@0 958
michael@0 959 // We will first divide by Abs(d), and negate the answer if d is negative.
michael@0 960 // If desired, this can be avoided by generalizing computeDivisionConstants.
michael@0 961 ReciprocalMulConstants rmc = computeDivisionConstants(Abs(d));
michael@0 962
michael@0 963 // As explained in the comments of computeDivisionConstants, we first compute
michael@0 964 // X >> (32 + shift), where X is either (rmc.multiplier * n) if the multiplier
michael@0 965 // is non-negative or (rmc.multiplier * n) + (2^32 * n) otherwise. This is the
michael@0 966 // desired division result if n is non-negative, and is one less than the result
michael@0 967 // otherwise.
michael@0 968 masm.movl(Imm32(rmc.multiplier), eax);
michael@0 969 masm.imull(lhs);
michael@0 970 if (rmc.multiplier < 0)
michael@0 971 masm.addl(lhs, edx);
michael@0 972 masm.sarl(Imm32(rmc.shiftAmount), edx);
michael@0 973
michael@0 974 // We'll subtract -1 instead of adding 1, because (n < 0 ? -1 : 0) can be
michael@0 975 // computed with just a sign-extending shift of 31 bits.
michael@0 976 if (ins->canBeNegativeDividend()) {
michael@0 977 masm.movl(lhs, eax);
michael@0 978 masm.sarl(Imm32(31), eax);
michael@0 979 masm.subl(eax, edx);
michael@0 980 }
michael@0 981
michael@0 982 // After this, edx contains the correct truncated division result.
michael@0 983 if (d < 0)
michael@0 984 masm.negl(edx);
michael@0 985
michael@0 986 if (!isDiv) {
michael@0 987 masm.imull(Imm32(-d), edx, eax);
michael@0 988 masm.addl(lhs, eax);
michael@0 989 }
michael@0 990
michael@0 991 if (!ins->mir()->isTruncated()) {
michael@0 992 if (isDiv) {
michael@0 993 // This is a division op. Multiply the obtained value by d to check if
michael@0 994 // the correct answer is an integer. This cannot overflow, since |d| > 1.
michael@0 995 masm.imull(Imm32(d), edx, eax);
michael@0 996 masm.cmpl(lhs, eax);
michael@0 997 if (!bailoutIf(Assembler::NotEqual, ins->snapshot()))
michael@0 998 return false;
michael@0 999
michael@0 1000 // If lhs is zero and the divisor is negative, the answer should have
michael@0 1001 // been -0.
michael@0 1002 if (d < 0) {
michael@0 1003 masm.testl(lhs, lhs);
michael@0 1004 if (!bailoutIf(Assembler::Zero, ins->snapshot()))
michael@0 1005 return false;
michael@0 1006 }
michael@0 1007 } else if (ins->canBeNegativeDividend()) {
michael@0 1008 // This is a mod op. If the computed value is zero and lhs
michael@0 1009 // is negative, the answer should have been -0.
michael@0 1010 Label done;
michael@0 1011
michael@0 1012 masm.cmpl(lhs, Imm32(0));
michael@0 1013 masm.j(Assembler::GreaterThanOrEqual, &done);
michael@0 1014
michael@0 1015 masm.testl(eax, eax);
michael@0 1016 if (!bailoutIf(Assembler::Zero, ins->snapshot()))
michael@0 1017 return false;
michael@0 1018
michael@0 1019 masm.bind(&done);
michael@0 1020 }
michael@0 1021 }
michael@0 1022
michael@0 1023 return true;
michael@0 1024 }
michael@0 1025
michael@0 1026 bool
michael@0 1027 CodeGeneratorX86Shared::visitDivI(LDivI *ins)
michael@0 1028 {
michael@0 1029 Register remainder = ToRegister(ins->remainder());
michael@0 1030 Register lhs = ToRegister(ins->lhs());
michael@0 1031 Register rhs = ToRegister(ins->rhs());
michael@0 1032 Register output = ToRegister(ins->output());
michael@0 1033
michael@0 1034 MDiv *mir = ins->mir();
michael@0 1035
michael@0 1036 JS_ASSERT_IF(lhs != rhs, rhs != eax);
michael@0 1037 JS_ASSERT(rhs != edx);
michael@0 1038 JS_ASSERT(remainder == edx);
michael@0 1039 JS_ASSERT(output == eax);
michael@0 1040
michael@0 1041 Label done;
michael@0 1042 ReturnZero *ool = nullptr;
michael@0 1043
michael@0 1044 // Put the lhs in eax, for either the negative overflow case or the regular
michael@0 1045 // divide case.
michael@0 1046 if (lhs != eax)
michael@0 1047 masm.mov(lhs, eax);
michael@0 1048
michael@0 1049 // Handle divide by zero.
michael@0 1050 if (mir->canBeDivideByZero()) {
michael@0 1051 masm.testl(rhs, rhs);
michael@0 1052 if (mir->canTruncateInfinities()) {
michael@0 1053 // Truncated division by zero is zero (Infinity|0 == 0)
michael@0 1054 if (!ool)
michael@0 1055 ool = new(alloc()) ReturnZero(output);
michael@0 1056 masm.j(Assembler::Zero, ool->entry());
michael@0 1057 } else {
michael@0 1058 JS_ASSERT(mir->fallible());
michael@0 1059 if (!bailoutIf(Assembler::Zero, ins->snapshot()))
michael@0 1060 return false;
michael@0 1061 }
michael@0 1062 }
michael@0 1063
michael@0 1064 // Handle an integer overflow exception from -2147483648 / -1.
michael@0 1065 if (mir->canBeNegativeOverflow()) {
michael@0 1066 Label notmin;
michael@0 1067 masm.cmpl(lhs, Imm32(INT32_MIN));
michael@0 1068 masm.j(Assembler::NotEqual, &notmin);
michael@0 1069 masm.cmpl(rhs, Imm32(-1));
michael@0 1070 if (mir->canTruncateOverflow()) {
michael@0 1071 // (-INT32_MIN)|0 == INT32_MIN and INT32_MIN is already in the
michael@0 1072 // output register (lhs == eax).
michael@0 1073 masm.j(Assembler::Equal, &done);
michael@0 1074 } else {
michael@0 1075 JS_ASSERT(mir->fallible());
michael@0 1076 if (!bailoutIf(Assembler::Equal, ins->snapshot()))
michael@0 1077 return false;
michael@0 1078 }
michael@0 1079 masm.bind(&notmin);
michael@0 1080 }
michael@0 1081
michael@0 1082 // Handle negative 0.
michael@0 1083 if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
michael@0 1084 Label nonzero;
michael@0 1085 masm.testl(lhs, lhs);
michael@0 1086 masm.j(Assembler::NonZero, &nonzero);
michael@0 1087 masm.cmpl(rhs, Imm32(0));
michael@0 1088 if (!bailoutIf(Assembler::LessThan, ins->snapshot()))
michael@0 1089 return false;
michael@0 1090 masm.bind(&nonzero);
michael@0 1091 }
michael@0 1092
michael@0 1093 // Sign extend the lhs into edx to make (edx:eax), since idiv is 64-bit.
michael@0 1094 if (lhs != eax)
michael@0 1095 masm.mov(lhs, eax);
michael@0 1096 masm.cdq();
michael@0 1097 masm.idiv(rhs);
michael@0 1098
michael@0 1099 if (!mir->canTruncateRemainder()) {
michael@0 1100 // If the remainder is > 0, bailout since this must be a double.
michael@0 1101 masm.testl(remainder, remainder);
michael@0 1102 if (!bailoutIf(Assembler::NonZero, ins->snapshot()))
michael@0 1103 return false;
michael@0 1104 }
michael@0 1105
michael@0 1106 masm.bind(&done);
michael@0 1107
michael@0 1108 if (ool) {
michael@0 1109 if (!addOutOfLineCode(ool))
michael@0 1110 return false;
michael@0 1111 masm.bind(ool->rejoin());
michael@0 1112 }
michael@0 1113
michael@0 1114 return true;
michael@0 1115 }
michael@0 1116
michael@0 1117 bool
michael@0 1118 CodeGeneratorX86Shared::visitModPowTwoI(LModPowTwoI *ins)
michael@0 1119 {
michael@0 1120 Register lhs = ToRegister(ins->getOperand(0));
michael@0 1121 int32_t shift = ins->shift();
michael@0 1122
michael@0 1123 Label negative;
michael@0 1124
michael@0 1125 if (ins->mir()->canBeNegativeDividend()) {
michael@0 1126 // Switch based on sign of the lhs.
michael@0 1127 // Positive numbers are just a bitmask
michael@0 1128 masm.branchTest32(Assembler::Signed, lhs, lhs, &negative);
michael@0 1129 }
michael@0 1130
michael@0 1131 masm.andl(Imm32((uint32_t(1) << shift) - 1), lhs);
michael@0 1132
michael@0 1133 if (ins->mir()->canBeNegativeDividend()) {
michael@0 1134 Label done;
michael@0 1135 masm.jump(&done);
michael@0 1136
michael@0 1137 // Negative numbers need a negate, bitmask, negate
michael@0 1138 masm.bind(&negative);
michael@0 1139
michael@0 1140 // Unlike in the visitModI case, we are not computing the mod by means of a
michael@0 1141 // division. Therefore, the divisor = -1 case isn't problematic (the andl
michael@0 1142 // always returns 0, which is what we expect).
michael@0 1143 //
michael@0 1144 // The negl instruction overflows if lhs == INT32_MIN, but this is also not
michael@0 1145 // a problem: shift is at most 31, and so the andl also always returns 0.
michael@0 1146 masm.negl(lhs);
michael@0 1147 masm.andl(Imm32((uint32_t(1) << shift) - 1), lhs);
michael@0 1148 masm.negl(lhs);
michael@0 1149
michael@0 1150 // Since a%b has the same sign as b, and a is negative in this branch,
michael@0 1151 // an answer of 0 means the correct result is actually -0. Bail out.
michael@0 1152 if (!ins->mir()->isTruncated() && !bailoutIf(Assembler::Zero, ins->snapshot()))
michael@0 1153 return false;
michael@0 1154 masm.bind(&done);
michael@0 1155 }
michael@0 1156 return true;
michael@0 1157
michael@0 1158 }
michael@0 1159
michael@0 1160 class ModOverflowCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared>
michael@0 1161 {
michael@0 1162 Label done_;
michael@0 1163 LModI *ins_;
michael@0 1164 Register rhs_;
michael@0 1165
michael@0 1166 public:
michael@0 1167 explicit ModOverflowCheck(LModI *ins, Register rhs)
michael@0 1168 : ins_(ins), rhs_(rhs)
michael@0 1169 { }
michael@0 1170
michael@0 1171 virtual bool accept(CodeGeneratorX86Shared *codegen) {
michael@0 1172 return codegen->visitModOverflowCheck(this);
michael@0 1173 }
michael@0 1174 Label *done() {
michael@0 1175 return &done_;
michael@0 1176 }
michael@0 1177 LModI *ins() const {
michael@0 1178 return ins_;
michael@0 1179 }
michael@0 1180 Register rhs() const {
michael@0 1181 return rhs_;
michael@0 1182 }
michael@0 1183 };
michael@0 1184
michael@0 1185 bool
michael@0 1186 CodeGeneratorX86Shared::visitModOverflowCheck(ModOverflowCheck *ool)
michael@0 1187 {
michael@0 1188 masm.cmpl(ool->rhs(), Imm32(-1));
michael@0 1189 if (ool->ins()->mir()->isTruncated()) {
michael@0 1190 masm.j(Assembler::NotEqual, ool->rejoin());
michael@0 1191 masm.mov(ImmWord(0), edx);
michael@0 1192 masm.jmp(ool->done());
michael@0 1193 } else {
michael@0 1194 if (!bailoutIf(Assembler::Equal, ool->ins()->snapshot()))
michael@0 1195 return false;
michael@0 1196 masm.jmp(ool->rejoin());
michael@0 1197 }
michael@0 1198 return true;
michael@0 1199 }
michael@0 1200
michael@0 1201 bool
michael@0 1202 CodeGeneratorX86Shared::visitModI(LModI *ins)
michael@0 1203 {
michael@0 1204 Register remainder = ToRegister(ins->remainder());
michael@0 1205 Register lhs = ToRegister(ins->lhs());
michael@0 1206 Register rhs = ToRegister(ins->rhs());
michael@0 1207
michael@0 1208 // Required to use idiv.
michael@0 1209 JS_ASSERT_IF(lhs != rhs, rhs != eax);
michael@0 1210 JS_ASSERT(rhs != edx);
michael@0 1211 JS_ASSERT(remainder == edx);
michael@0 1212 JS_ASSERT(ToRegister(ins->getTemp(0)) == eax);
michael@0 1213
michael@0 1214 Label done;
michael@0 1215 ReturnZero *ool = nullptr;
michael@0 1216 ModOverflowCheck *overflow = nullptr;
michael@0 1217
michael@0 1218 // Set up eax in preparation for doing a div.
michael@0 1219 if (lhs != eax)
michael@0 1220 masm.mov(lhs, eax);
michael@0 1221
michael@0 1222 // Prevent divide by zero.
michael@0 1223 if (ins->mir()->canBeDivideByZero()) {
michael@0 1224 masm.testl(rhs, rhs);
michael@0 1225 if (ins->mir()->isTruncated()) {
michael@0 1226 if (!ool)
michael@0 1227 ool = new(alloc()) ReturnZero(edx);
michael@0 1228 masm.j(Assembler::Zero, ool->entry());
michael@0 1229 } else {
michael@0 1230 if (!bailoutIf(Assembler::Zero, ins->snapshot()))
michael@0 1231 return false;
michael@0 1232 }
michael@0 1233 }
michael@0 1234
michael@0 1235 Label negative;
michael@0 1236
michael@0 1237 // Switch based on sign of the lhs.
michael@0 1238 if (ins->mir()->canBeNegativeDividend())
michael@0 1239 masm.branchTest32(Assembler::Signed, lhs, lhs, &negative);
michael@0 1240
michael@0 1241 // If lhs >= 0 then remainder = lhs % rhs. The remainder must be positive.
michael@0 1242 {
michael@0 1243 // Check if rhs is a power-of-two.
michael@0 1244 if (ins->mir()->canBePowerOfTwoDivisor()) {
michael@0 1245 JS_ASSERT(rhs != remainder);
michael@0 1246
michael@0 1247 // Rhs y is a power-of-two if (y & (y-1)) == 0. Note that if
michael@0 1248 // y is any negative number other than INT32_MIN, both y and
michael@0 1249 // y-1 will have the sign bit set so these are never optimized
michael@0 1250 // as powers-of-two. If y is INT32_MIN, y-1 will be INT32_MAX
michael@0 1251 // and because lhs >= 0 at this point, lhs & INT32_MAX returns
michael@0 1252 // the correct value.
michael@0 1253 Label notPowerOfTwo;
michael@0 1254 masm.mov(rhs, remainder);
michael@0 1255 masm.subl(Imm32(1), remainder);
michael@0 1256 masm.branchTest32(Assembler::NonZero, remainder, rhs, &notPowerOfTwo);
michael@0 1257 {
michael@0 1258 masm.andl(lhs, remainder);
michael@0 1259 masm.jmp(&done);
michael@0 1260 }
michael@0 1261 masm.bind(&notPowerOfTwo);
michael@0 1262 }
michael@0 1263
michael@0 1264 // Since lhs >= 0, the sign-extension will be 0
michael@0 1265 masm.mov(ImmWord(0), edx);
michael@0 1266 masm.idiv(rhs);
michael@0 1267 }
michael@0 1268
michael@0 1269 // Otherwise, we have to beware of two special cases:
michael@0 1270 if (ins->mir()->canBeNegativeDividend()) {
michael@0 1271 masm.jump(&done);
michael@0 1272
michael@0 1273 masm.bind(&negative);
michael@0 1274
michael@0 1275 // Prevent an integer overflow exception from -2147483648 % -1
michael@0 1276 Label notmin;
michael@0 1277 masm.cmpl(lhs, Imm32(INT32_MIN));
michael@0 1278 overflow = new(alloc()) ModOverflowCheck(ins, rhs);
michael@0 1279 masm.j(Assembler::Equal, overflow->entry());
michael@0 1280 masm.bind(overflow->rejoin());
michael@0 1281 masm.cdq();
michael@0 1282 masm.idiv(rhs);
michael@0 1283
michael@0 1284 if (!ins->mir()->isTruncated()) {
michael@0 1285 // A remainder of 0 means that the rval must be -0, which is a double.
michael@0 1286 masm.testl(remainder, remainder);
michael@0 1287 if (!bailoutIf(Assembler::Zero, ins->snapshot()))
michael@0 1288 return false;
michael@0 1289 }
michael@0 1290 }
michael@0 1291
michael@0 1292 masm.bind(&done);
michael@0 1293
michael@0 1294 if (overflow) {
michael@0 1295 if (!addOutOfLineCode(overflow))
michael@0 1296 return false;
michael@0 1297 masm.bind(overflow->done());
michael@0 1298 }
michael@0 1299
michael@0 1300 if (ool) {
michael@0 1301 if (!addOutOfLineCode(ool))
michael@0 1302 return false;
michael@0 1303 masm.bind(ool->rejoin());
michael@0 1304 }
michael@0 1305
michael@0 1306 return true;
michael@0 1307 }
michael@0 1308
michael@0 1309 bool
michael@0 1310 CodeGeneratorX86Shared::visitBitNotI(LBitNotI *ins)
michael@0 1311 {
michael@0 1312 const LAllocation *input = ins->getOperand(0);
michael@0 1313 JS_ASSERT(!input->isConstant());
michael@0 1314
michael@0 1315 masm.notl(ToOperand(input));
michael@0 1316 return true;
michael@0 1317 }
michael@0 1318
michael@0 1319 bool
michael@0 1320 CodeGeneratorX86Shared::visitBitOpI(LBitOpI *ins)
michael@0 1321 {
michael@0 1322 const LAllocation *lhs = ins->getOperand(0);
michael@0 1323 const LAllocation *rhs = ins->getOperand(1);
michael@0 1324
michael@0 1325 switch (ins->bitop()) {
michael@0 1326 case JSOP_BITOR:
michael@0 1327 if (rhs->isConstant())
michael@0 1328 masm.orl(Imm32(ToInt32(rhs)), ToOperand(lhs));
michael@0 1329 else
michael@0 1330 masm.orl(ToOperand(rhs), ToRegister(lhs));
michael@0 1331 break;
michael@0 1332 case JSOP_BITXOR:
michael@0 1333 if (rhs->isConstant())
michael@0 1334 masm.xorl(Imm32(ToInt32(rhs)), ToOperand(lhs));
michael@0 1335 else
michael@0 1336 masm.xorl(ToOperand(rhs), ToRegister(lhs));
michael@0 1337 break;
michael@0 1338 case JSOP_BITAND:
michael@0 1339 if (rhs->isConstant())
michael@0 1340 masm.andl(Imm32(ToInt32(rhs)), ToOperand(lhs));
michael@0 1341 else
michael@0 1342 masm.andl(ToOperand(rhs), ToRegister(lhs));
michael@0 1343 break;
michael@0 1344 default:
michael@0 1345 MOZ_ASSUME_UNREACHABLE("unexpected binary opcode");
michael@0 1346 }
michael@0 1347
michael@0 1348 return true;
michael@0 1349 }
michael@0 1350
michael@0 1351 bool
michael@0 1352 CodeGeneratorX86Shared::visitShiftI(LShiftI *ins)
michael@0 1353 {
michael@0 1354 Register lhs = ToRegister(ins->lhs());
michael@0 1355 const LAllocation *rhs = ins->rhs();
michael@0 1356
michael@0 1357 if (rhs->isConstant()) {
michael@0 1358 int32_t shift = ToInt32(rhs) & 0x1F;
michael@0 1359 switch (ins->bitop()) {
michael@0 1360 case JSOP_LSH:
michael@0 1361 if (shift)
michael@0 1362 masm.shll(Imm32(shift), lhs);
michael@0 1363 break;
michael@0 1364 case JSOP_RSH:
michael@0 1365 if (shift)
michael@0 1366 masm.sarl(Imm32(shift), lhs);
michael@0 1367 break;
michael@0 1368 case JSOP_URSH:
michael@0 1369 if (shift) {
michael@0 1370 masm.shrl(Imm32(shift), lhs);
michael@0 1371 } else if (ins->mir()->toUrsh()->fallible()) {
michael@0 1372 // x >>> 0 can overflow.
michael@0 1373 masm.testl(lhs, lhs);
michael@0 1374 if (!bailoutIf(Assembler::Signed, ins->snapshot()))
michael@0 1375 return false;
michael@0 1376 }
michael@0 1377 break;
michael@0 1378 default:
michael@0 1379 MOZ_ASSUME_UNREACHABLE("Unexpected shift op");
michael@0 1380 }
michael@0 1381 } else {
michael@0 1382 JS_ASSERT(ToRegister(rhs) == ecx);
michael@0 1383 switch (ins->bitop()) {
michael@0 1384 case JSOP_LSH:
michael@0 1385 masm.shll_cl(lhs);
michael@0 1386 break;
michael@0 1387 case JSOP_RSH:
michael@0 1388 masm.sarl_cl(lhs);
michael@0 1389 break;
michael@0 1390 case JSOP_URSH:
michael@0 1391 masm.shrl_cl(lhs);
michael@0 1392 if (ins->mir()->toUrsh()->fallible()) {
michael@0 1393 // x >>> 0 can overflow.
michael@0 1394 masm.testl(lhs, lhs);
michael@0 1395 if (!bailoutIf(Assembler::Signed, ins->snapshot()))
michael@0 1396 return false;
michael@0 1397 }
michael@0 1398 break;
michael@0 1399 default:
michael@0 1400 MOZ_ASSUME_UNREACHABLE("Unexpected shift op");
michael@0 1401 }
michael@0 1402 }
michael@0 1403
michael@0 1404 return true;
michael@0 1405 }
michael@0 1406
michael@0 1407 bool
michael@0 1408 CodeGeneratorX86Shared::visitUrshD(LUrshD *ins)
michael@0 1409 {
michael@0 1410 Register lhs = ToRegister(ins->lhs());
michael@0 1411 JS_ASSERT(ToRegister(ins->temp()) == lhs);
michael@0 1412
michael@0 1413 const LAllocation *rhs = ins->rhs();
michael@0 1414 FloatRegister out = ToFloatRegister(ins->output());
michael@0 1415
michael@0 1416 if (rhs->isConstant()) {
michael@0 1417 int32_t shift = ToInt32(rhs) & 0x1F;
michael@0 1418 if (shift)
michael@0 1419 masm.shrl(Imm32(shift), lhs);
michael@0 1420 } else {
michael@0 1421 JS_ASSERT(ToRegister(rhs) == ecx);
michael@0 1422 masm.shrl_cl(lhs);
michael@0 1423 }
michael@0 1424
michael@0 1425 masm.convertUInt32ToDouble(lhs, out);
michael@0 1426 return true;
michael@0 1427 }
michael@0 1428
michael@0 1429 MoveOperand
michael@0 1430 CodeGeneratorX86Shared::toMoveOperand(const LAllocation *a) const
michael@0 1431 {
michael@0 1432 if (a->isGeneralReg())
michael@0 1433 return MoveOperand(ToRegister(a));
michael@0 1434 if (a->isFloatReg())
michael@0 1435 return MoveOperand(ToFloatRegister(a));
michael@0 1436 return MoveOperand(StackPointer, ToStackOffset(a));
michael@0 1437 }
michael@0 1438
michael@0 1439 class OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorX86Shared>
michael@0 1440 {
michael@0 1441 MTableSwitch *mir_;
michael@0 1442 CodeLabel jumpLabel_;
michael@0 1443
michael@0 1444 bool accept(CodeGeneratorX86Shared *codegen) {
michael@0 1445 return codegen->visitOutOfLineTableSwitch(this);
michael@0 1446 }
michael@0 1447
michael@0 1448 public:
michael@0 1449 OutOfLineTableSwitch(MTableSwitch *mir)
michael@0 1450 : mir_(mir)
michael@0 1451 {}
michael@0 1452
michael@0 1453 MTableSwitch *mir() const {
michael@0 1454 return mir_;
michael@0 1455 }
michael@0 1456
michael@0 1457 CodeLabel *jumpLabel() {
michael@0 1458 return &jumpLabel_;
michael@0 1459 }
michael@0 1460 };
michael@0 1461
michael@0 1462 bool
michael@0 1463 CodeGeneratorX86Shared::visitOutOfLineTableSwitch(OutOfLineTableSwitch *ool)
michael@0 1464 {
michael@0 1465 MTableSwitch *mir = ool->mir();
michael@0 1466
michael@0 1467 masm.align(sizeof(void*));
michael@0 1468 masm.bind(ool->jumpLabel()->src());
michael@0 1469 if (!masm.addCodeLabel(*ool->jumpLabel()))
michael@0 1470 return false;
michael@0 1471
michael@0 1472 for (size_t i = 0; i < mir->numCases(); i++) {
michael@0 1473 LBlock *caseblock = mir->getCase(i)->lir();
michael@0 1474 Label *caseheader = caseblock->label();
michael@0 1475 uint32_t caseoffset = caseheader->offset();
michael@0 1476
michael@0 1477 // The entries of the jump table need to be absolute addresses and thus
michael@0 1478 // must be patched after codegen is finished.
michael@0 1479 CodeLabel cl;
michael@0 1480 masm.writeCodePointer(cl.dest());
michael@0 1481 cl.src()->bind(caseoffset);
michael@0 1482 if (!masm.addCodeLabel(cl))
michael@0 1483 return false;
michael@0 1484 }
michael@0 1485
michael@0 1486 return true;
michael@0 1487 }
michael@0 1488
michael@0 1489 bool
michael@0 1490 CodeGeneratorX86Shared::emitTableSwitchDispatch(MTableSwitch *mir, const Register &index,
michael@0 1491 const Register &base)
michael@0 1492 {
michael@0 1493 Label *defaultcase = mir->getDefault()->lir()->label();
michael@0 1494
michael@0 1495 // Lower value with low value
michael@0 1496 if (mir->low() != 0)
michael@0 1497 masm.subl(Imm32(mir->low()), index);
michael@0 1498
michael@0 1499 // Jump to default case if input is out of range
michael@0 1500 int32_t cases = mir->numCases();
michael@0 1501 masm.cmpl(index, Imm32(cases));
michael@0 1502 masm.j(AssemblerX86Shared::AboveOrEqual, defaultcase);
michael@0 1503
michael@0 1504 // To fill in the CodeLabels for the case entries, we need to first
michael@0 1505 // generate the case entries (we don't yet know their offsets in the
michael@0 1506 // instruction stream).
michael@0 1507 OutOfLineTableSwitch *ool = new(alloc()) OutOfLineTableSwitch(mir);
michael@0 1508 if (!addOutOfLineCode(ool))
michael@0 1509 return false;
michael@0 1510
michael@0 1511 // Compute the position where a pointer to the right case stands.
michael@0 1512 masm.mov(ool->jumpLabel()->dest(), base);
michael@0 1513 Operand pointer = Operand(base, index, ScalePointer);
michael@0 1514
michael@0 1515 // Jump to the right case
michael@0 1516 masm.jmp(pointer);
michael@0 1517
michael@0 1518 return true;
michael@0 1519 }
michael@0 1520
michael@0 1521 bool
michael@0 1522 CodeGeneratorX86Shared::visitMathD(LMathD *math)
michael@0 1523 {
michael@0 1524 FloatRegister lhs = ToFloatRegister(math->lhs());
michael@0 1525 Operand rhs = ToOperand(math->rhs());
michael@0 1526
michael@0 1527 JS_ASSERT(ToFloatRegister(math->output()) == lhs);
michael@0 1528
michael@0 1529 switch (math->jsop()) {
michael@0 1530 case JSOP_ADD:
michael@0 1531 masm.addsd(rhs, lhs);
michael@0 1532 break;
michael@0 1533 case JSOP_SUB:
michael@0 1534 masm.subsd(rhs, lhs);
michael@0 1535 break;
michael@0 1536 case JSOP_MUL:
michael@0 1537 masm.mulsd(rhs, lhs);
michael@0 1538 break;
michael@0 1539 case JSOP_DIV:
michael@0 1540 masm.divsd(rhs, lhs);
michael@0 1541 break;
michael@0 1542 default:
michael@0 1543 MOZ_ASSUME_UNREACHABLE("unexpected opcode");
michael@0 1544 }
michael@0 1545 return true;
michael@0 1546 }
michael@0 1547
michael@0 1548 bool
michael@0 1549 CodeGeneratorX86Shared::visitMathF(LMathF *math)
michael@0 1550 {
michael@0 1551 FloatRegister lhs = ToFloatRegister(math->lhs());
michael@0 1552 Operand rhs = ToOperand(math->rhs());
michael@0 1553
michael@0 1554 JS_ASSERT(ToFloatRegister(math->output()) == lhs);
michael@0 1555
michael@0 1556 switch (math->jsop()) {
michael@0 1557 case JSOP_ADD:
michael@0 1558 masm.addss(rhs, lhs);
michael@0 1559 break;
michael@0 1560 case JSOP_SUB:
michael@0 1561 masm.subss(rhs, lhs);
michael@0 1562 break;
michael@0 1563 case JSOP_MUL:
michael@0 1564 masm.mulss(rhs, lhs);
michael@0 1565 break;
michael@0 1566 case JSOP_DIV:
michael@0 1567 masm.divss(rhs, lhs);
michael@0 1568 break;
michael@0 1569 default:
michael@0 1570 MOZ_ASSUME_UNREACHABLE("unexpected opcode");
michael@0 1571 return false;
michael@0 1572 }
michael@0 1573 return true;
michael@0 1574 }
michael@0 1575
michael@0 1576 bool
michael@0 1577 CodeGeneratorX86Shared::visitFloor(LFloor *lir)
michael@0 1578 {
michael@0 1579 FloatRegister input = ToFloatRegister(lir->input());
michael@0 1580 FloatRegister scratch = ScratchFloatReg;
michael@0 1581 Register output = ToRegister(lir->output());
michael@0 1582
michael@0 1583 Label bailout;
michael@0 1584
michael@0 1585 if (AssemblerX86Shared::HasSSE41()) {
michael@0 1586 // Bail on negative-zero.
michael@0 1587 masm.branchNegativeZero(input, output, &bailout);
michael@0 1588 if (!bailoutFrom(&bailout, lir->snapshot()))
michael@0 1589 return false;
michael@0 1590
michael@0 1591 // Round toward -Infinity.
michael@0 1592 masm.roundsd(input, scratch, JSC::X86Assembler::RoundDown);
michael@0 1593
michael@0 1594 masm.cvttsd2si(scratch, output);
michael@0 1595 masm.cmp32(output, Imm32(INT_MIN));
michael@0 1596 if (!bailoutIf(Assembler::Equal, lir->snapshot()))
michael@0 1597 return false;
michael@0 1598 } else {
michael@0 1599 Label negative, end;
michael@0 1600
michael@0 1601 // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
michael@0 1602 masm.xorpd(scratch, scratch);
michael@0 1603 masm.branchDouble(Assembler::DoubleLessThan, input, scratch, &negative);
michael@0 1604
michael@0 1605 // Bail on negative-zero.
michael@0 1606 masm.branchNegativeZero(input, output, &bailout);
michael@0 1607 if (!bailoutFrom(&bailout, lir->snapshot()))
michael@0 1608 return false;
michael@0 1609
michael@0 1610 // Input is non-negative, so truncation correctly rounds.
michael@0 1611 masm.cvttsd2si(input, output);
michael@0 1612 masm.cmp32(output, Imm32(INT_MIN));
michael@0 1613 if (!bailoutIf(Assembler::Equal, lir->snapshot()))
michael@0 1614 return false;
michael@0 1615
michael@0 1616 masm.jump(&end);
michael@0 1617
michael@0 1618 // Input is negative, but isn't -0.
michael@0 1619 // Negative values go on a comparatively expensive path, since no
michael@0 1620 // native rounding mode matches JS semantics. Still better than callVM.
michael@0 1621 masm.bind(&negative);
michael@0 1622 {
michael@0 1623 // Truncate and round toward zero.
michael@0 1624 // This is off-by-one for everything but integer-valued inputs.
michael@0 1625 masm.cvttsd2si(input, output);
michael@0 1626 masm.cmp32(output, Imm32(INT_MIN));
michael@0 1627 if (!bailoutIf(Assembler::Equal, lir->snapshot()))
michael@0 1628 return false;
michael@0 1629
michael@0 1630 // Test whether the input double was integer-valued.
michael@0 1631 masm.convertInt32ToDouble(output, scratch);
michael@0 1632 masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch, &end);
michael@0 1633
michael@0 1634 // Input is not integer-valued, so we rounded off-by-one in the
michael@0 1635 // wrong direction. Correct by subtraction.
michael@0 1636 masm.subl(Imm32(1), output);
michael@0 1637 // Cannot overflow: output was already checked against INT_MIN.
michael@0 1638 }
michael@0 1639
michael@0 1640 masm.bind(&end);
michael@0 1641 }
michael@0 1642 return true;
michael@0 1643 }
michael@0 1644
michael@0 1645 bool
michael@0 1646 CodeGeneratorX86Shared::visitFloorF(LFloorF *lir)
michael@0 1647 {
michael@0 1648 FloatRegister input = ToFloatRegister(lir->input());
michael@0 1649 FloatRegister scratch = ScratchFloatReg;
michael@0 1650 Register output = ToRegister(lir->output());
michael@0 1651
michael@0 1652 Label bailout;
michael@0 1653
michael@0 1654 if (AssemblerX86Shared::HasSSE41()) {
michael@0 1655 // Bail on negative-zero.
michael@0 1656 masm.branchNegativeZeroFloat32(input, output, &bailout);
michael@0 1657 if (!bailoutFrom(&bailout, lir->snapshot()))
michael@0 1658 return false;
michael@0 1659
michael@0 1660 // Round toward -Infinity.
michael@0 1661 masm.roundss(input, scratch, JSC::X86Assembler::RoundDown);
michael@0 1662
michael@0 1663 masm.cvttss2si(scratch, output);
michael@0 1664 masm.cmp32(output, Imm32(INT_MIN));
michael@0 1665 if (!bailoutIf(Assembler::Equal, lir->snapshot()))
michael@0 1666 return false;
michael@0 1667 } else {
michael@0 1668 Label negative, end;
michael@0 1669
michael@0 1670 // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
michael@0 1671 masm.xorps(scratch, scratch);
michael@0 1672 masm.branchFloat(Assembler::DoubleLessThan, input, scratch, &negative);
michael@0 1673
michael@0 1674 // Bail on negative-zero.
michael@0 1675 masm.branchNegativeZeroFloat32(input, output, &bailout);
michael@0 1676 if (!bailoutFrom(&bailout, lir->snapshot()))
michael@0 1677 return false;
michael@0 1678
michael@0 1679 // Input is non-negative, so truncation correctly rounds.
michael@0 1680 masm.cvttss2si(input, output);
michael@0 1681 masm.cmp32(output, Imm32(INT_MIN));
michael@0 1682 if (!bailoutIf(Assembler::Equal, lir->snapshot()))
michael@0 1683 return false;
michael@0 1684
michael@0 1685 masm.jump(&end);
michael@0 1686
michael@0 1687 // Input is negative, but isn't -0.
michael@0 1688 // Negative values go on a comparatively expensive path, since no
michael@0 1689 // native rounding mode matches JS semantics. Still better than callVM.
michael@0 1690 masm.bind(&negative);
michael@0 1691 {
michael@0 1692 // Truncate and round toward zero.
michael@0 1693 // This is off-by-one for everything but integer-valued inputs.
michael@0 1694 masm.cvttss2si(input, output);
michael@0 1695 masm.cmp32(output, Imm32(INT_MIN));
michael@0 1696 if (!bailoutIf(Assembler::Equal, lir->snapshot()))
michael@0 1697 return false;
michael@0 1698
michael@0 1699 // Test whether the input double was integer-valued.
michael@0 1700 masm.convertInt32ToFloat32(output, scratch);
michael@0 1701 masm.branchFloat(Assembler::DoubleEqualOrUnordered, input, scratch, &end);
michael@0 1702
michael@0 1703 // Input is not integer-valued, so we rounded off-by-one in the
michael@0 1704 // wrong direction. Correct by subtraction.
michael@0 1705 masm.subl(Imm32(1), output);
michael@0 1706 // Cannot overflow: output was already checked against INT_MIN.
michael@0 1707 }
michael@0 1708
michael@0 1709 masm.bind(&end);
michael@0 1710 }
michael@0 1711 return true;
michael@0 1712 }
michael@0 1713
michael@0 1714 bool
michael@0 1715 CodeGeneratorX86Shared::visitRound(LRound *lir)
michael@0 1716 {
michael@0 1717 FloatRegister input = ToFloatRegister(lir->input());
michael@0 1718 FloatRegister temp = ToFloatRegister(lir->temp());
michael@0 1719 FloatRegister scratch = ScratchFloatReg;
michael@0 1720 Register output = ToRegister(lir->output());
michael@0 1721
michael@0 1722 Label negative, end, bailout;
michael@0 1723
michael@0 1724 // Load 0.5 in the temp register.
michael@0 1725 masm.loadConstantDouble(0.5, temp);
michael@0 1726
michael@0 1727 // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
michael@0 1728 masm.xorpd(scratch, scratch);
michael@0 1729 masm.branchDouble(Assembler::DoubleLessThan, input, scratch, &negative);
michael@0 1730
michael@0 1731 // Bail on negative-zero.
michael@0 1732 masm.branchNegativeZero(input, output, &bailout);
michael@0 1733 if (!bailoutFrom(&bailout, lir->snapshot()))
michael@0 1734 return false;
michael@0 1735
michael@0 1736 // Input is non-negative. Add 0.5 and truncate, rounding down. Note that we
michael@0 1737 // have to add the input to the temp register (which contains 0.5) because
michael@0 1738 // we're not allowed to modify the input register.
michael@0 1739 masm.addsd(input, temp);
michael@0 1740
michael@0 1741 masm.cvttsd2si(temp, output);
michael@0 1742 masm.cmp32(output, Imm32(INT_MIN));
michael@0 1743 if (!bailoutIf(Assembler::Equal, lir->snapshot()))
michael@0 1744 return false;
michael@0 1745
michael@0 1746 masm.jump(&end);
michael@0 1747
michael@0 1748
michael@0 1749 // Input is negative, but isn't -0.
michael@0 1750 masm.bind(&negative);
michael@0 1751
michael@0 1752 if (AssemblerX86Shared::HasSSE41()) {
michael@0 1753 // Add 0.5 and round toward -Infinity. The result is stored in the temp
michael@0 1754 // register (currently contains 0.5).
michael@0 1755 masm.addsd(input, temp);
michael@0 1756 masm.roundsd(temp, scratch, JSC::X86Assembler::RoundDown);
michael@0 1757
michael@0 1758 // Truncate.
michael@0 1759 masm.cvttsd2si(scratch, output);
michael@0 1760 masm.cmp32(output, Imm32(INT_MIN));
michael@0 1761 if (!bailoutIf(Assembler::Equal, lir->snapshot()))
michael@0 1762 return false;
michael@0 1763
michael@0 1764 // If the result is positive zero, then the actual result is -0. Bail.
michael@0 1765 // Otherwise, the truncation will have produced the correct negative integer.
michael@0 1766 masm.testl(output, output);
michael@0 1767 if (!bailoutIf(Assembler::Zero, lir->snapshot()))
michael@0 1768 return false;
michael@0 1769
michael@0 1770 } else {
michael@0 1771 masm.addsd(input, temp);
michael@0 1772
michael@0 1773 // Round toward -Infinity without the benefit of ROUNDSD.
michael@0 1774 {
michael@0 1775 // If input + 0.5 >= 0, input is a negative number >= -0.5 and the result is -0.
michael@0 1776 masm.compareDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch);
michael@0 1777 if (!bailoutIf(Assembler::DoubleGreaterThanOrEqual, lir->snapshot()))
michael@0 1778 return false;
michael@0 1779
michael@0 1780 // Truncate and round toward zero.
michael@0 1781 // This is off-by-one for everything but integer-valued inputs.
michael@0 1782 masm.cvttsd2si(temp, output);
michael@0 1783 masm.cmp32(output, Imm32(INT_MIN));
michael@0 1784 if (!bailoutIf(Assembler::Equal, lir->snapshot()))
michael@0 1785 return false;
michael@0 1786
michael@0 1787 // Test whether the truncated double was integer-valued.
michael@0 1788 masm.convertInt32ToDouble(output, scratch);
michael@0 1789 masm.branchDouble(Assembler::DoubleEqualOrUnordered, temp, scratch, &end);
michael@0 1790
michael@0 1791 // Input is not integer-valued, so we rounded off-by-one in the
michael@0 1792 // wrong direction. Correct by subtraction.
michael@0 1793 masm.subl(Imm32(1), output);
michael@0 1794 // Cannot overflow: output was already checked against INT_MIN.
michael@0 1795 }
michael@0 1796 }
michael@0 1797
michael@0 1798 masm.bind(&end);
michael@0 1799 return true;
michael@0 1800 }
michael@0 1801
michael@0 1802 bool
michael@0 1803 CodeGeneratorX86Shared::visitRoundF(LRoundF *lir)
michael@0 1804 {
michael@0 1805 FloatRegister input = ToFloatRegister(lir->input());
michael@0 1806 FloatRegister temp = ToFloatRegister(lir->temp());
michael@0 1807 FloatRegister scratch = ScratchFloatReg;
michael@0 1808 Register output = ToRegister(lir->output());
michael@0 1809
michael@0 1810 Label negative, end, bailout;
michael@0 1811
michael@0 1812 // Load 0.5 in the temp register.
michael@0 1813 masm.loadConstantFloat32(0.5f, temp);
michael@0 1814
michael@0 1815 // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
michael@0 1816 masm.xorps(scratch, scratch);
michael@0 1817 masm.branchFloat(Assembler::DoubleLessThan, input, scratch, &negative);
michael@0 1818
michael@0 1819 // Bail on negative-zero.
michael@0 1820 masm.branchNegativeZeroFloat32(input, output, &bailout);
michael@0 1821 if (!bailoutFrom(&bailout, lir->snapshot()))
michael@0 1822 return false;
michael@0 1823
michael@0 1824 // Input is non-negative. Add 0.5 and truncate, rounding down. Note that we
michael@0 1825 // have to add the input to the temp register (which contains 0.5) because
michael@0 1826 // we're not allowed to modify the input register.
michael@0 1827 masm.addss(input, temp);
michael@0 1828
michael@0 1829 masm.cvttss2si(temp, output);
michael@0 1830 masm.cmp32(output, Imm32(INT_MIN));
michael@0 1831 if (!bailoutIf(Assembler::Equal, lir->snapshot()))
michael@0 1832 return false;
michael@0 1833
michael@0 1834 masm.jump(&end);
michael@0 1835
michael@0 1836
michael@0 1837 // Input is negative, but isn't -0.
michael@0 1838 masm.bind(&negative);
michael@0 1839
michael@0 1840 if (AssemblerX86Shared::HasSSE41()) {
michael@0 1841 // Add 0.5 and round toward -Infinity. The result is stored in the temp
michael@0 1842 // register (currently contains 0.5).
michael@0 1843 masm.addss(input, temp);
michael@0 1844 masm.roundss(temp, scratch, JSC::X86Assembler::RoundDown);
michael@0 1845
michael@0 1846 // Truncate.
michael@0 1847 masm.cvttss2si(scratch, output);
michael@0 1848 masm.cmp32(output, Imm32(INT_MIN));
michael@0 1849 if (!bailoutIf(Assembler::Equal, lir->snapshot()))
michael@0 1850 return false;
michael@0 1851
michael@0 1852 // If the result is positive zero, then the actual result is -0. Bail.
michael@0 1853 // Otherwise, the truncation will have produced the correct negative integer.
michael@0 1854 masm.testl(output, output);
michael@0 1855 if (!bailoutIf(Assembler::Zero, lir->snapshot()))
michael@0 1856 return false;
michael@0 1857
michael@0 1858 } else {
michael@0 1859 masm.addss(input, temp);
michael@0 1860 // Round toward -Infinity without the benefit of ROUNDSS.
michael@0 1861 {
michael@0 1862 // If input + 0.5 >= 0, input is a negative number >= -0.5 and the result is -0.
michael@0 1863 masm.compareFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch);
michael@0 1864 if (!bailoutIf(Assembler::DoubleGreaterThanOrEqual, lir->snapshot()))
michael@0 1865 return false;
michael@0 1866
michael@0 1867 // Truncate and round toward zero.
michael@0 1868 // This is off-by-one for everything but integer-valued inputs.
michael@0 1869 masm.cvttss2si(temp, output);
michael@0 1870 masm.cmp32(output, Imm32(INT_MIN));
michael@0 1871 if (!bailoutIf(Assembler::Equal, lir->snapshot()))
michael@0 1872 return false;
michael@0 1873
michael@0 1874 // Test whether the truncated double was integer-valued.
michael@0 1875 masm.convertInt32ToFloat32(output, scratch);
michael@0 1876 masm.branchFloat(Assembler::DoubleEqualOrUnordered, temp, scratch, &end);
michael@0 1877
michael@0 1878 // Input is not integer-valued, so we rounded off-by-one in the
michael@0 1879 // wrong direction. Correct by subtraction.
michael@0 1880 masm.subl(Imm32(1), output);
michael@0 1881 // Cannot overflow: output was already checked against INT_MIN.
michael@0 1882 }
michael@0 1883 }
michael@0 1884
michael@0 1885 masm.bind(&end);
michael@0 1886 return true;
michael@0 1887 }
michael@0 1888
michael@0 1889 bool
michael@0 1890 CodeGeneratorX86Shared::visitGuardShape(LGuardShape *guard)
michael@0 1891 {
michael@0 1892 Register obj = ToRegister(guard->input());
michael@0 1893 masm.cmpPtr(Operand(obj, JSObject::offsetOfShape()), ImmGCPtr(guard->mir()->shape()));
michael@0 1894
michael@0 1895 return bailoutIf(Assembler::NotEqual, guard->snapshot());
michael@0 1896 }
michael@0 1897
michael@0 1898 bool
michael@0 1899 CodeGeneratorX86Shared::visitGuardObjectType(LGuardObjectType *guard)
michael@0 1900 {
michael@0 1901 Register obj = ToRegister(guard->input());
michael@0 1902 masm.cmpPtr(Operand(obj, JSObject::offsetOfType()), ImmGCPtr(guard->mir()->typeObject()));
michael@0 1903
michael@0 1904 Assembler::Condition cond =
michael@0 1905 guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
michael@0 1906 return bailoutIf(cond, guard->snapshot());
michael@0 1907 }
michael@0 1908
michael@0 1909 bool
michael@0 1910 CodeGeneratorX86Shared::visitGuardClass(LGuardClass *guard)
michael@0 1911 {
michael@0 1912 Register obj = ToRegister(guard->input());
michael@0 1913 Register tmp = ToRegister(guard->tempInt());
michael@0 1914
michael@0 1915 masm.loadPtr(Address(obj, JSObject::offsetOfType()), tmp);
michael@0 1916 masm.cmpPtr(Operand(tmp, types::TypeObject::offsetOfClasp()), ImmPtr(guard->mir()->getClass()));
michael@0 1917 if (!bailoutIf(Assembler::NotEqual, guard->snapshot()))
michael@0 1918 return false;
michael@0 1919 return true;
michael@0 1920 }
michael@0 1921
michael@0 1922 bool
michael@0 1923 CodeGeneratorX86Shared::visitEffectiveAddress(LEffectiveAddress *ins)
michael@0 1924 {
michael@0 1925 const MEffectiveAddress *mir = ins->mir();
michael@0 1926 Register base = ToRegister(ins->base());
michael@0 1927 Register index = ToRegister(ins->index());
michael@0 1928 Register output = ToRegister(ins->output());
michael@0 1929 masm.leal(Operand(base, index, mir->scale(), mir->displacement()), output);
michael@0 1930 return true;
michael@0 1931 }
michael@0 1932
michael@0 1933 Operand
michael@0 1934 CodeGeneratorX86Shared::createArrayElementOperand(Register elements, const LAllocation *index)
michael@0 1935 {
michael@0 1936 if (index->isConstant())
michael@0 1937 return Operand(elements, ToInt32(index) * sizeof(js::Value));
michael@0 1938
michael@0 1939 return Operand(elements, ToRegister(index), TimesEight);
michael@0 1940 }
michael@0 1941 bool
michael@0 1942 CodeGeneratorX86Shared::generateInvalidateEpilogue()
michael@0 1943 {
michael@0 1944 // Ensure that there is enough space in the buffer for the OsiPoint
michael@0 1945 // patching to occur. Otherwise, we could overwrite the invalidation
michael@0 1946 // epilogue.
michael@0 1947 for (size_t i = 0; i < sizeof(void *); i+= Assembler::nopSize())
michael@0 1948 masm.nop();
michael@0 1949
michael@0 1950 masm.bind(&invalidate_);
michael@0 1951
michael@0 1952 // Push the Ion script onto the stack (when we determine what that pointer is).
michael@0 1953 invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
michael@0 1954 JitCode *thunk = gen->jitRuntime()->getInvalidationThunk();
michael@0 1955
michael@0 1956 masm.call(thunk);
michael@0 1957
michael@0 1958 // We should never reach this point in JIT code -- the invalidation thunk should
michael@0 1959 // pop the invalidated JS frame and return directly to its caller.
michael@0 1960 masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
michael@0 1961 return true;
michael@0 1962 }
michael@0 1963
michael@0 1964 bool
michael@0 1965 CodeGeneratorX86Shared::visitNegI(LNegI *ins)
michael@0 1966 {
michael@0 1967 Register input = ToRegister(ins->input());
michael@0 1968 JS_ASSERT(input == ToRegister(ins->output()));
michael@0 1969
michael@0 1970 masm.neg32(input);
michael@0 1971 return true;
michael@0 1972 }
michael@0 1973
michael@0 1974 bool
michael@0 1975 CodeGeneratorX86Shared::visitNegD(LNegD *ins)
michael@0 1976 {
michael@0 1977 FloatRegister input = ToFloatRegister(ins->input());
michael@0 1978 JS_ASSERT(input == ToFloatRegister(ins->output()));
michael@0 1979
michael@0 1980 masm.negateDouble(input);
michael@0 1981 return true;
michael@0 1982 }
michael@0 1983
michael@0 1984 bool
michael@0 1985 CodeGeneratorX86Shared::visitNegF(LNegF *ins)
michael@0 1986 {
michael@0 1987 FloatRegister input = ToFloatRegister(ins->input());
michael@0 1988 JS_ASSERT(input == ToFloatRegister(ins->output()));
michael@0 1989
michael@0 1990 masm.negateFloat(input);
michael@0 1991 return true;
michael@0 1992 }
michael@0 1993
michael@0 1994 bool
michael@0 1995 CodeGeneratorX86Shared::visitForkJoinGetSlice(LForkJoinGetSlice *ins)
michael@0 1996 {
michael@0 1997 MOZ_ASSERT(gen->info().executionMode() == ParallelExecution);
michael@0 1998 MOZ_ASSERT(ToRegister(ins->forkJoinContext()) == ForkJoinGetSliceReg_cx);
michael@0 1999 MOZ_ASSERT(ToRegister(ins->temp1()) == eax);
michael@0 2000 MOZ_ASSERT(ToRegister(ins->temp2()) == edx);
michael@0 2001 MOZ_ASSERT(ToRegister(ins->temp3()) == ForkJoinGetSliceReg_temp0);
michael@0 2002 MOZ_ASSERT(ToRegister(ins->temp4()) == ForkJoinGetSliceReg_temp1);
michael@0 2003 MOZ_ASSERT(ToRegister(ins->output()) == ForkJoinGetSliceReg_output);
michael@0 2004
michael@0 2005 masm.call(gen->jitRuntime()->forkJoinGetSliceStub());
michael@0 2006 return true;
michael@0 2007 }
michael@0 2008
michael@0 2009 JitCode *
michael@0 2010 JitRuntime::generateForkJoinGetSliceStub(JSContext *cx)
michael@0 2011 {
michael@0 2012 #ifdef JS_THREADSAFE
michael@0 2013 MacroAssembler masm(cx);
michael@0 2014
michael@0 2015 // We need two fixed temps. We need to fix eax for cmpxchg, and edx for
michael@0 2016 // div.
michael@0 2017 Register cxReg = ForkJoinGetSliceReg_cx, worker = cxReg;
michael@0 2018 Register pool = ForkJoinGetSliceReg_temp0;
michael@0 2019 Register bounds = ForkJoinGetSliceReg_temp1;
michael@0 2020 Register output = ForkJoinGetSliceReg_output;
michael@0 2021
michael@0 2022 MOZ_ASSERT(worker != eax && worker != edx);
michael@0 2023 MOZ_ASSERT(pool != eax && pool != edx);
michael@0 2024 MOZ_ASSERT(bounds != eax && bounds != edx);
michael@0 2025 MOZ_ASSERT(output != eax && output != edx);
michael@0 2026
michael@0 2027 Label stealWork, noMoreWork, gotSlice;
michael@0 2028 Operand workerSliceBounds(Address(worker, ThreadPoolWorker::offsetOfSliceBounds()));
michael@0 2029
michael@0 2030 // Clobber cx to load the worker.
michael@0 2031 masm.push(cxReg);
michael@0 2032 masm.loadPtr(Address(cxReg, ForkJoinContext::offsetOfWorker()), worker);
michael@0 2033
michael@0 2034 // Load the thread pool, which is used in all cases below.
michael@0 2035 masm.loadThreadPool(pool);
michael@0 2036
michael@0 2037 {
michael@0 2038 // Try to get a slice from the current thread.
michael@0 2039 Label getOwnSliceLoopHead;
michael@0 2040 masm.bind(&getOwnSliceLoopHead);
michael@0 2041
michael@0 2042 // Load the slice bounds for the current thread.
michael@0 2043 masm.loadSliceBounds(worker, bounds);
michael@0 2044
michael@0 2045 // The slice bounds is a uint32 composed from two uint16s:
michael@0 2046 // [ from , to ]
michael@0 2047 // ^~~~ ^~
michael@0 2048 // upper 16 bits | lower 16 bits
michael@0 2049 masm.move32(bounds, output);
michael@0 2050 masm.shrl(Imm32(16), output);
michael@0 2051
michael@0 2052 // If we don't have any slices left ourselves, move on to stealing.
michael@0 2053 masm.branch16(Assembler::Equal, output, bounds, &stealWork);
michael@0 2054
michael@0 2055 // If we still have work, try to CAS [ from+1, to ].
michael@0 2056 masm.move32(bounds, edx);
michael@0 2057 masm.add32(Imm32(0x10000), edx);
michael@0 2058 masm.move32(bounds, eax);
michael@0 2059 masm.atomic_cmpxchg32(edx, workerSliceBounds, eax);
michael@0 2060 masm.j(Assembler::NonZero, &getOwnSliceLoopHead);
michael@0 2061
michael@0 2062 // If the CAS succeeded, return |from| in output.
michael@0 2063 masm.jump(&gotSlice);
michael@0 2064 }
michael@0 2065
michael@0 2066 // Try to steal work.
michael@0 2067 masm.bind(&stealWork);
michael@0 2068
michael@0 2069 // It's not technically correct to test whether work-stealing is turned on
michael@0 2070 // only during stub-generation time, but it's a DEBUG only thing.
michael@0 2071 if (cx->runtime()->threadPool.workStealing()) {
michael@0 2072 Label stealWorkLoopHead;
michael@0 2073 masm.bind(&stealWorkLoopHead);
michael@0 2074
michael@0 2075 // Check if we have work.
michael@0 2076 masm.branch32(Assembler::Equal,
michael@0 2077 Address(pool, ThreadPool::offsetOfPendingSlices()),
michael@0 2078 Imm32(0), &noMoreWork);
michael@0 2079
michael@0 2080 // Get an id at random. The following is an inline of
michael@0 2081 // the 32-bit xorshift in ThreadPoolWorker::randomWorker().
michael@0 2082 {
michael@0 2083 // Reload the current worker.
michael@0 2084 masm.loadPtr(Address(StackPointer, 0), cxReg);
michael@0 2085 masm.loadPtr(Address(cxReg, ForkJoinContext::offsetOfWorker()), worker);
michael@0 2086
michael@0 2087 // Perform the xorshift to get a random number in eax, using edx
michael@0 2088 // as a temp.
michael@0 2089 Address rngState(worker, ThreadPoolWorker::offsetOfSchedulerRNGState());
michael@0 2090 masm.load32(rngState, eax);
michael@0 2091 masm.move32(eax, edx);
michael@0 2092 masm.shll(Imm32(ThreadPoolWorker::XORSHIFT_A), eax);
michael@0 2093 masm.xor32(edx, eax);
michael@0 2094 masm.move32(eax, edx);
michael@0 2095 masm.shrl(Imm32(ThreadPoolWorker::XORSHIFT_B), eax);
michael@0 2096 masm.xor32(edx, eax);
michael@0 2097 masm.move32(eax, edx);
michael@0 2098 masm.shll(Imm32(ThreadPoolWorker::XORSHIFT_C), eax);
michael@0 2099 masm.xor32(edx, eax);
michael@0 2100 masm.store32(eax, rngState);
michael@0 2101
michael@0 2102 // Compute the random worker id by computing % numWorkers. Reuse
michael@0 2103 // output as a temp.
michael@0 2104 masm.move32(Imm32(0), edx);
michael@0 2105 masm.move32(Imm32(cx->runtime()->threadPool.numWorkers()), output);
michael@0 2106 masm.udiv(output);
michael@0 2107 }
michael@0 2108
michael@0 2109 // Load the worker from the workers array.
michael@0 2110 masm.loadPtr(Address(pool, ThreadPool::offsetOfWorkers()), worker);
michael@0 2111 masm.loadPtr(BaseIndex(worker, edx, ScalePointer), worker);
michael@0 2112
michael@0 2113 // Try to get a slice from the designated victim worker.
michael@0 2114 Label stealSliceFromWorkerLoopHead;
michael@0 2115 masm.bind(&stealSliceFromWorkerLoopHead);
michael@0 2116
michael@0 2117 // Load the slice bounds and decompose for the victim worker.
michael@0 2118 masm.loadSliceBounds(worker, bounds);
michael@0 2119 masm.move32(bounds, eax);
michael@0 2120 masm.shrl(Imm32(16), eax);
michael@0 2121
michael@0 2122 // If the victim worker has no more slices left, find another worker.
michael@0 2123 masm.branch16(Assembler::Equal, eax, bounds, &stealWorkLoopHead);
michael@0 2124
michael@0 2125 // If the victim worker still has work, try to CAS [ from, to-1 ].
michael@0 2126 masm.move32(bounds, output);
michael@0 2127 masm.sub32(Imm32(1), output);
michael@0 2128 masm.move32(bounds, eax);
michael@0 2129 masm.atomic_cmpxchg32(output, workerSliceBounds, eax);
michael@0 2130 masm.j(Assembler::NonZero, &stealSliceFromWorkerLoopHead);
michael@0 2131
michael@0 2132 // If the CAS succeeded, return |to-1| in output.
michael@0 2133 #ifdef DEBUG
michael@0 2134 masm.atomic_inc32(Operand(Address(pool, ThreadPool::offsetOfStolenSlices())));
michael@0 2135 #endif
michael@0 2136 // Copies lower 16 bits only.
michael@0 2137 masm.movzwl(output, output);
michael@0 2138 }
michael@0 2139
michael@0 2140 // If we successfully got a slice, decrement pool->pendingSlices_ and
michael@0 2141 // return the slice.
michael@0 2142 masm.bind(&gotSlice);
michael@0 2143 masm.atomic_dec32(Operand(Address(pool, ThreadPool::offsetOfPendingSlices())));
michael@0 2144 masm.pop(cxReg);
michael@0 2145 masm.ret();
michael@0 2146
michael@0 2147 // There's no more slices to give out, return a sentinel value.
michael@0 2148 masm.bind(&noMoreWork);
michael@0 2149 masm.move32(Imm32(ThreadPool::MAX_SLICE_ID), output);
michael@0 2150 masm.pop(cxReg);
michael@0 2151 masm.ret();
michael@0 2152
michael@0 2153 Linker linker(masm);
michael@0 2154 JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
michael@0 2155
michael@0 2156 #ifdef JS_ION_PERF
michael@0 2157 writePerfSpewerJitCodeProfile(code, "ForkJoinGetSliceStub");
michael@0 2158 #endif
michael@0 2159
michael@0 2160 return code;
michael@0 2161 #else
michael@0 2162 return nullptr;
michael@0 2163 #endif // JS_THREADSAFE
michael@0 2164 }
michael@0 2165
michael@0 2166 } // namespace jit
michael@0 2167 } // namespace js

mercurial