js/src/jit/arm/CodeGenerator-arm.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #include "jit/arm/CodeGenerator-arm.h"
michael@0 8
michael@0 9 #include "mozilla/MathAlgorithms.h"
michael@0 10
michael@0 11 #include "jscntxt.h"
michael@0 12 #include "jscompartment.h"
michael@0 13 #include "jsnum.h"
michael@0 14
michael@0 15 #include "jit/CodeGenerator.h"
michael@0 16 #include "jit/IonFrames.h"
michael@0 17 #include "jit/JitCompartment.h"
michael@0 18 #include "jit/MIR.h"
michael@0 19 #include "jit/MIRGraph.h"
michael@0 20 #include "vm/Shape.h"
michael@0 21 #include "vm/TraceLogging.h"
michael@0 22
michael@0 23 #include "jsscriptinlines.h"
michael@0 24
michael@0 25 #include "jit/shared/CodeGenerator-shared-inl.h"
michael@0 26
michael@0 27 using namespace js;
michael@0 28 using namespace js::jit;
michael@0 29
michael@0 30 using mozilla::FloorLog2;
michael@0 31 using mozilla::NegativeInfinity;
michael@0 32 using JS::GenericNaN;
michael@0 33
michael@0 34 // shared
michael@0 35 CodeGeneratorARM::CodeGeneratorARM(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm)
michael@0 36 : CodeGeneratorShared(gen, graph, masm)
michael@0 37 {
michael@0 38 }
michael@0 39
michael@0 40 bool
michael@0 41 CodeGeneratorARM::generatePrologue()
michael@0 42 {
michael@0 43 JS_ASSERT(!gen->compilingAsmJS());
michael@0 44
michael@0 45 // Note that this automatically sets MacroAssembler::framePushed().
michael@0 46 masm.reserveStack(frameSize());
michael@0 47 masm.checkStackAlignment();
michael@0 48 return true;
michael@0 49 }
michael@0 50
michael@0 51 bool
michael@0 52 CodeGeneratorARM::generateAsmJSPrologue(Label *stackOverflowLabel)
michael@0 53 {
michael@0 54 JS_ASSERT(gen->compilingAsmJS());
michael@0 55
michael@0 56 masm.Push(lr);
michael@0 57
michael@0 58 // The asm.js over-recursed handler wants to be able to assume that SP
michael@0 59 // points to the return address, so perform the check after pushing lr but
michael@0 60 // before pushing frameDepth.
michael@0 61 if (!omitOverRecursedCheck()) {
michael@0 62 masm.branchPtr(Assembler::AboveOrEqual,
michael@0 63 AsmJSAbsoluteAddress(AsmJSImm_StackLimit),
michael@0 64 StackPointer,
michael@0 65 stackOverflowLabel);
michael@0 66 }
michael@0 67
michael@0 68 // Note that this automatically sets MacroAssembler::framePushed().
michael@0 69 masm.reserveStack(frameDepth_);
michael@0 70 masm.checkStackAlignment();
michael@0 71 return true;
michael@0 72 }
michael@0 73
michael@0 74 bool
michael@0 75 CodeGeneratorARM::generateEpilogue()
michael@0 76 {
michael@0 77 masm.bind(&returnLabel_);
michael@0 78
michael@0 79 #ifdef JS_TRACE_LOGGING
michael@0 80 if (!gen->compilingAsmJS() && gen->info().executionMode() == SequentialExecution) {
michael@0 81 if (!emitTracelogStopEvent(TraceLogger::IonMonkey))
michael@0 82 return false;
michael@0 83 if (!emitTracelogScriptStop())
michael@0 84 return false;
michael@0 85 }
michael@0 86 #endif
michael@0 87
michael@0 88 if (gen->compilingAsmJS()) {
michael@0 89 // Pop the stack we allocated at the start of the function.
michael@0 90 masm.freeStack(frameDepth_);
michael@0 91 masm.Pop(pc);
michael@0 92 JS_ASSERT(masm.framePushed() == 0);
michael@0 93 //masm.as_bkpt();
michael@0 94 } else {
michael@0 95 // Pop the stack we allocated at the start of the function.
michael@0 96 masm.freeStack(frameSize());
michael@0 97 JS_ASSERT(masm.framePushed() == 0);
michael@0 98 masm.ma_pop(pc);
michael@0 99 }
michael@0 100 masm.dumpPool();
michael@0 101 return true;
michael@0 102 }
michael@0 103
michael@0 104 void
michael@0 105 CodeGeneratorARM::emitBranch(Assembler::Condition cond, MBasicBlock *mirTrue, MBasicBlock *mirFalse)
michael@0 106 {
michael@0 107 if (isNextBlock(mirFalse->lir())) {
michael@0 108 jumpToBlock(mirTrue, cond);
michael@0 109 } else {
michael@0 110 jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
michael@0 111 jumpToBlock(mirTrue);
michael@0 112 }
michael@0 113 }
michael@0 114
michael@0 115
michael@0 116 bool
michael@0 117 OutOfLineBailout::accept(CodeGeneratorARM *codegen)
michael@0 118 {
michael@0 119 return codegen->visitOutOfLineBailout(this);
michael@0 120 }
michael@0 121
michael@0 122 bool
michael@0 123 CodeGeneratorARM::visitTestIAndBranch(LTestIAndBranch *test)
michael@0 124 {
michael@0 125 const LAllocation *opd = test->getOperand(0);
michael@0 126 MBasicBlock *ifTrue = test->ifTrue();
michael@0 127 MBasicBlock *ifFalse = test->ifFalse();
michael@0 128
michael@0 129 // Test the operand
michael@0 130 masm.ma_cmp(ToRegister(opd), Imm32(0));
michael@0 131
michael@0 132 if (isNextBlock(ifFalse->lir())) {
michael@0 133 jumpToBlock(ifTrue, Assembler::NonZero);
michael@0 134 } else if (isNextBlock(ifTrue->lir())) {
michael@0 135 jumpToBlock(ifFalse, Assembler::Zero);
michael@0 136 } else {
michael@0 137 jumpToBlock(ifFalse, Assembler::Zero);
michael@0 138 jumpToBlock(ifTrue);
michael@0 139 }
michael@0 140 return true;
michael@0 141 }
michael@0 142
michael@0 143 bool
michael@0 144 CodeGeneratorARM::visitCompare(LCompare *comp)
michael@0 145 {
michael@0 146 Assembler::Condition cond = JSOpToCondition(comp->mir()->compareType(), comp->jsop());
michael@0 147 const LAllocation *left = comp->getOperand(0);
michael@0 148 const LAllocation *right = comp->getOperand(1);
michael@0 149 const LDefinition *def = comp->getDef(0);
michael@0 150
michael@0 151 if (right->isConstant())
michael@0 152 masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)));
michael@0 153 else
michael@0 154 masm.ma_cmp(ToRegister(left), ToOperand(right));
michael@0 155 masm.ma_mov(Imm32(0), ToRegister(def));
michael@0 156 masm.ma_mov(Imm32(1), ToRegister(def), NoSetCond, cond);
michael@0 157 return true;
michael@0 158 }
michael@0 159
michael@0 160 bool
michael@0 161 CodeGeneratorARM::visitCompareAndBranch(LCompareAndBranch *comp)
michael@0 162 {
michael@0 163 Assembler::Condition cond = JSOpToCondition(comp->cmpMir()->compareType(), comp->jsop());
michael@0 164 if (comp->right()->isConstant())
michael@0 165 masm.ma_cmp(ToRegister(comp->left()), Imm32(ToInt32(comp->right())));
michael@0 166 else
michael@0 167 masm.ma_cmp(ToRegister(comp->left()), ToOperand(comp->right()));
michael@0 168 emitBranch(cond, comp->ifTrue(), comp->ifFalse());
michael@0 169 return true;
michael@0 170
michael@0 171 }
michael@0 172
michael@0 173 bool
michael@0 174 CodeGeneratorARM::generateOutOfLineCode()
michael@0 175 {
michael@0 176 if (!CodeGeneratorShared::generateOutOfLineCode())
michael@0 177 return false;
michael@0 178
michael@0 179 if (deoptLabel_.used()) {
michael@0 180 // All non-table-based bailouts will go here.
michael@0 181 masm.bind(&deoptLabel_);
michael@0 182
michael@0 183 // Push the frame size, so the handler can recover the IonScript.
michael@0 184 masm.ma_mov(Imm32(frameSize()), lr);
michael@0 185
michael@0 186 JitCode *handler = gen->jitRuntime()->getGenericBailoutHandler();
michael@0 187 masm.branch(handler);
michael@0 188 }
michael@0 189
michael@0 190 return true;
michael@0 191 }
michael@0 192
michael@0 193 bool
michael@0 194 CodeGeneratorARM::bailoutIf(Assembler::Condition condition, LSnapshot *snapshot)
michael@0 195 {
michael@0 196 CompileInfo &info = snapshot->mir()->block()->info();
michael@0 197 switch (info.executionMode()) {
michael@0 198
michael@0 199 case ParallelExecution: {
michael@0 200 // in parallel mode, make no attempt to recover, just signal an error.
michael@0 201 OutOfLineAbortPar *ool = oolAbortPar(ParallelBailoutUnsupported,
michael@0 202 snapshot->mir()->block(),
michael@0 203 snapshot->mir()->pc());
michael@0 204 masm.ma_b(ool->entry(), condition);
michael@0 205 return true;
michael@0 206 }
michael@0 207 case SequentialExecution:
michael@0 208 break;
michael@0 209 default:
michael@0 210 MOZ_ASSUME_UNREACHABLE("No such execution mode");
michael@0 211 }
michael@0 212 if (!encode(snapshot))
michael@0 213 return false;
michael@0 214
michael@0 215 // Though the assembler doesn't track all frame pushes, at least make sure
michael@0 216 // the known value makes sense. We can't use bailout tables if the stack
michael@0 217 // isn't properly aligned to the static frame size.
michael@0 218 JS_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
michael@0 219 frameClass_.frameSize() == masm.framePushed());
michael@0 220
michael@0 221 if (assignBailoutId(snapshot)) {
michael@0 222 uint8_t *code = deoptTable_->raw() + snapshot->bailoutId() * BAILOUT_TABLE_ENTRY_SIZE;
michael@0 223 masm.ma_b(code, Relocation::HARDCODED, condition);
michael@0 224 return true;
michael@0 225 }
michael@0 226
michael@0 227 // We could not use a jump table, either because all bailout IDs were
michael@0 228 // reserved, or a jump table is not optimal for this frame size or
michael@0 229 // platform. Whatever, we will generate a lazy bailout.
michael@0 230 OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed());
michael@0 231 if (!addOutOfLineCode(ool))
michael@0 232 return false;
michael@0 233
michael@0 234 masm.ma_b(ool->entry(), condition);
michael@0 235
michael@0 236 return true;
michael@0 237 }
michael@0 238 bool
michael@0 239 CodeGeneratorARM::bailoutFrom(Label *label, LSnapshot *snapshot)
michael@0 240 {
michael@0 241 if (masm.bailed())
michael@0 242 return false;
michael@0 243 JS_ASSERT(label->used());
michael@0 244 JS_ASSERT(!label->bound());
michael@0 245
michael@0 246 CompileInfo &info = snapshot->mir()->block()->info();
michael@0 247 switch (info.executionMode()) {
michael@0 248
michael@0 249 case ParallelExecution: {
michael@0 250 // in parallel mode, make no attempt to recover, just signal an error.
michael@0 251 OutOfLineAbortPar *ool = oolAbortPar(ParallelBailoutUnsupported,
michael@0 252 snapshot->mir()->block(),
michael@0 253 snapshot->mir()->pc());
michael@0 254 masm.retarget(label, ool->entry());
michael@0 255 return true;
michael@0 256 }
michael@0 257 case SequentialExecution:
michael@0 258 break;
michael@0 259 default:
michael@0 260 MOZ_ASSUME_UNREACHABLE("No such execution mode");
michael@0 261 }
michael@0 262
michael@0 263 if (!encode(snapshot))
michael@0 264 return false;
michael@0 265
michael@0 266 // Though the assembler doesn't track all frame pushes, at least make sure
michael@0 267 // the known value makes sense. We can't use bailout tables if the stack
michael@0 268 // isn't properly aligned to the static frame size.
michael@0 269 JS_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
michael@0 270 frameClass_.frameSize() == masm.framePushed());
michael@0 271
michael@0 272 // On ARM we don't use a bailout table.
michael@0 273 OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed());
michael@0 274 if (!addOutOfLineCode(ool)) {
michael@0 275 return false;
michael@0 276 }
michael@0 277
michael@0 278 masm.retarget(label, ool->entry());
michael@0 279
michael@0 280 return true;
michael@0 281 }
michael@0 282
michael@0 283 bool
michael@0 284 CodeGeneratorARM::bailout(LSnapshot *snapshot)
michael@0 285 {
michael@0 286 Label label;
michael@0 287 masm.ma_b(&label);
michael@0 288 return bailoutFrom(&label, snapshot);
michael@0 289 }
michael@0 290
michael@0 291 bool
michael@0 292 CodeGeneratorARM::visitOutOfLineBailout(OutOfLineBailout *ool)
michael@0 293 {
michael@0 294 masm.ma_mov(Imm32(ool->snapshot()->snapshotOffset()), ScratchRegister);
michael@0 295 masm.ma_push(ScratchRegister); // BailoutStack::padding_
michael@0 296 masm.ma_push(ScratchRegister); // BailoutStack::snapshotOffset_
michael@0 297 masm.ma_b(&deoptLabel_);
michael@0 298 return true;
michael@0 299 }
michael@0 300
michael@0 301 bool
michael@0 302 CodeGeneratorARM::visitMinMaxD(LMinMaxD *ins)
michael@0 303 {
michael@0 304 FloatRegister first = ToFloatRegister(ins->first());
michael@0 305 FloatRegister second = ToFloatRegister(ins->second());
michael@0 306 FloatRegister output = ToFloatRegister(ins->output());
michael@0 307
michael@0 308 JS_ASSERT(first == output);
michael@0 309
michael@0 310 Assembler::Condition cond = ins->mir()->isMax()
michael@0 311 ? Assembler::VFP_LessThanOrEqual
michael@0 312 : Assembler::VFP_GreaterThanOrEqual;
michael@0 313 Label nan, equal, returnSecond, done;
michael@0 314
michael@0 315 masm.compareDouble(first, second);
michael@0 316 masm.ma_b(&nan, Assembler::VFP_Unordered); // first or second is NaN, result is NaN.
michael@0 317 masm.ma_b(&equal, Assembler::VFP_Equal); // make sure we handle -0 and 0 right.
michael@0 318 masm.ma_b(&returnSecond, cond);
michael@0 319 masm.ma_b(&done);
michael@0 320
michael@0 321 // Check for zero.
michael@0 322 masm.bind(&equal);
michael@0 323 masm.compareDouble(first, InvalidFloatReg);
michael@0 324 masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered); // first wasn't 0 or -0, so just return it.
michael@0 325 // So now both operands are either -0 or 0.
michael@0 326 if (ins->mir()->isMax()) {
michael@0 327 masm.ma_vadd(second, first, first); // -0 + -0 = -0 and -0 + 0 = 0.
michael@0 328 } else {
michael@0 329 masm.ma_vneg(first, first);
michael@0 330 masm.ma_vsub(first, second, first);
michael@0 331 masm.ma_vneg(first, first);
michael@0 332 }
michael@0 333 masm.ma_b(&done);
michael@0 334
michael@0 335 masm.bind(&nan);
michael@0 336 masm.loadConstantDouble(GenericNaN(), output);
michael@0 337 masm.ma_b(&done);
michael@0 338
michael@0 339 masm.bind(&returnSecond);
michael@0 340 masm.ma_vmov(second, output);
michael@0 341
michael@0 342 masm.bind(&done);
michael@0 343 return true;
michael@0 344 }
michael@0 345
michael@0 346 bool
michael@0 347 CodeGeneratorARM::visitAbsD(LAbsD *ins)
michael@0 348 {
michael@0 349 FloatRegister input = ToFloatRegister(ins->input());
michael@0 350 JS_ASSERT(input == ToFloatRegister(ins->output()));
michael@0 351 masm.ma_vabs(input, input);
michael@0 352 return true;
michael@0 353 }
michael@0 354
michael@0 355 bool
michael@0 356 CodeGeneratorARM::visitAbsF(LAbsF *ins)
michael@0 357 {
michael@0 358 FloatRegister input = ToFloatRegister(ins->input());
michael@0 359 JS_ASSERT(input == ToFloatRegister(ins->output()));
michael@0 360 masm.ma_vabs_f32(input, input);
michael@0 361 return true;
michael@0 362 }
michael@0 363
michael@0 364 bool
michael@0 365 CodeGeneratorARM::visitSqrtD(LSqrtD *ins)
michael@0 366 {
michael@0 367 FloatRegister input = ToFloatRegister(ins->input());
michael@0 368 FloatRegister output = ToFloatRegister(ins->output());
michael@0 369 masm.ma_vsqrt(input, output);
michael@0 370 return true;
michael@0 371 }
michael@0 372
michael@0 373 bool
michael@0 374 CodeGeneratorARM::visitSqrtF(LSqrtF *ins)
michael@0 375 {
michael@0 376 FloatRegister input = ToFloatRegister(ins->input());
michael@0 377 FloatRegister output = ToFloatRegister(ins->output());
michael@0 378 masm.ma_vsqrt_f32(input, output);
michael@0 379 return true;
michael@0 380 }
michael@0 381
michael@0 382 bool
michael@0 383 CodeGeneratorARM::visitAddI(LAddI *ins)
michael@0 384 {
michael@0 385 const LAllocation *lhs = ins->getOperand(0);
michael@0 386 const LAllocation *rhs = ins->getOperand(1);
michael@0 387 const LDefinition *dest = ins->getDef(0);
michael@0 388
michael@0 389 if (rhs->isConstant())
michael@0 390 masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond);
michael@0 391 else
michael@0 392 masm.ma_add(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond);
michael@0 393
michael@0 394 if (ins->snapshot() && !bailoutIf(Assembler::Overflow, ins->snapshot()))
michael@0 395 return false;
michael@0 396
michael@0 397 return true;
michael@0 398 }
michael@0 399
michael@0 400 bool
michael@0 401 CodeGeneratorARM::visitSubI(LSubI *ins)
michael@0 402 {
michael@0 403 const LAllocation *lhs = ins->getOperand(0);
michael@0 404 const LAllocation *rhs = ins->getOperand(1);
michael@0 405 const LDefinition *dest = ins->getDef(0);
michael@0 406
michael@0 407 if (rhs->isConstant())
michael@0 408 masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond);
michael@0 409 else
michael@0 410 masm.ma_sub(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond);
michael@0 411
michael@0 412 if (ins->snapshot() && !bailoutIf(Assembler::Overflow, ins->snapshot()))
michael@0 413 return false;
michael@0 414 return true;
michael@0 415 }
michael@0 416
michael@0 417 bool
michael@0 418 CodeGeneratorARM::visitMulI(LMulI *ins)
michael@0 419 {
michael@0 420 const LAllocation *lhs = ins->getOperand(0);
michael@0 421 const LAllocation *rhs = ins->getOperand(1);
michael@0 422 const LDefinition *dest = ins->getDef(0);
michael@0 423 MMul *mul = ins->mir();
michael@0 424 JS_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow());
michael@0 425
michael@0 426 if (rhs->isConstant()) {
michael@0 427 // Bailout when this condition is met.
michael@0 428 Assembler::Condition c = Assembler::Overflow;
michael@0 429 // Bailout on -0.0
michael@0 430 int32_t constant = ToInt32(rhs);
michael@0 431 if (mul->canBeNegativeZero() && constant <= 0) {
michael@0 432 Assembler::Condition bailoutCond = (constant == 0) ? Assembler::LessThan : Assembler::Equal;
michael@0 433 masm.ma_cmp(ToRegister(lhs), Imm32(0));
michael@0 434 if (!bailoutIf(bailoutCond, ins->snapshot()))
michael@0 435 return false;
michael@0 436 }
michael@0 437 // TODO: move these to ma_mul.
michael@0 438 switch (constant) {
michael@0 439 case -1:
michael@0 440 masm.ma_rsb(ToRegister(lhs), Imm32(0), ToRegister(dest), SetCond);
michael@0 441 break;
michael@0 442 case 0:
michael@0 443 masm.ma_mov(Imm32(0), ToRegister(dest));
michael@0 444 return true; // escape overflow check;
michael@0 445 case 1:
michael@0 446 // nop
michael@0 447 masm.ma_mov(ToRegister(lhs), ToRegister(dest));
michael@0 448 return true; // escape overflow check;
michael@0 449 case 2:
michael@0 450 masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCond);
michael@0 451 // Overflow is handled later.
michael@0 452 break;
michael@0 453 default: {
michael@0 454 bool handled = false;
michael@0 455 if (constant > 0) {
michael@0 456 // Try shift and add sequences for a positive constant.
michael@0 457 if (!mul->canOverflow()) {
michael@0 458 // If it cannot overflow, we can do lots of optimizations
michael@0 459 Register src = ToRegister(lhs);
michael@0 460 uint32_t shift = FloorLog2(constant);
michael@0 461 uint32_t rest = constant - (1 << shift);
michael@0 462 // See if the constant has one bit set, meaning it can be encoded as a bitshift
michael@0 463 if ((1 << shift) == constant) {
michael@0 464 masm.ma_lsl(Imm32(shift), src, ToRegister(dest));
michael@0 465 handled = true;
michael@0 466 } else {
michael@0 467 // If the constant cannot be encoded as (1<<C1), see if it can be encoded as
michael@0 468 // (1<<C1) | (1<<C2), which can be computed using an add and a shift
michael@0 469 uint32_t shift_rest = FloorLog2(rest);
michael@0 470 if ((1u << shift_rest) == rest) {
michael@0 471 masm.as_add(ToRegister(dest), src, lsl(src, shift-shift_rest));
michael@0 472 if (shift_rest != 0)
michael@0 473 masm.ma_lsl(Imm32(shift_rest), ToRegister(dest), ToRegister(dest));
michael@0 474 handled = true;
michael@0 475 }
michael@0 476 }
michael@0 477 } else if (ToRegister(lhs) != ToRegister(dest)) {
michael@0 478 // To stay on the safe side, only optimize things that are a
michael@0 479 // power of 2.
michael@0 480
michael@0 481 uint32_t shift = FloorLog2(constant);
michael@0 482 if ((1 << shift) == constant) {
michael@0 483 // dest = lhs * pow(2,shift)
michael@0 484 masm.ma_lsl(Imm32(shift), ToRegister(lhs), ToRegister(dest));
michael@0 485 // At runtime, check (lhs == dest >> shift), if this does not hold,
michael@0 486 // some bits were lost due to overflow, and the computation should
michael@0 487 // be resumed as a double.
michael@0 488 masm.as_cmp(ToRegister(lhs), asr(ToRegister(dest), shift));
michael@0 489 c = Assembler::NotEqual;
michael@0 490 handled = true;
michael@0 491 }
michael@0 492 }
michael@0 493 }
michael@0 494
michael@0 495 if (!handled) {
michael@0 496 if (mul->canOverflow())
michael@0 497 c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), c);
michael@0 498 else
michael@0 499 masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest));
michael@0 500 }
michael@0 501 }
michael@0 502 }
michael@0 503 // Bailout on overflow
michael@0 504 if (mul->canOverflow() && !bailoutIf(c, ins->snapshot()))
michael@0 505 return false;
michael@0 506 } else {
michael@0 507 Assembler::Condition c = Assembler::Overflow;
michael@0 508
michael@0 509 //masm.imull(ToOperand(rhs), ToRegister(lhs));
michael@0 510 if (mul->canOverflow())
michael@0 511 c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), c);
michael@0 512 else
michael@0 513 masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest));
michael@0 514
michael@0 515 // Bailout on overflow
michael@0 516 if (mul->canOverflow() && !bailoutIf(c, ins->snapshot()))
michael@0 517 return false;
michael@0 518
michael@0 519 if (mul->canBeNegativeZero()) {
michael@0 520 Label done;
michael@0 521 masm.ma_cmp(ToRegister(dest), Imm32(0));
michael@0 522 masm.ma_b(&done, Assembler::NotEqual);
michael@0 523
michael@0 524 // Result is -0 if lhs or rhs is negative.
michael@0 525 masm.ma_cmn(ToRegister(lhs), ToRegister(rhs));
michael@0 526 if (!bailoutIf(Assembler::Signed, ins->snapshot()))
michael@0 527 return false;
michael@0 528
michael@0 529 masm.bind(&done);
michael@0 530 }
michael@0 531 }
michael@0 532
michael@0 533 return true;
michael@0 534 }
michael@0 535
michael@0 536 bool
michael@0 537 CodeGeneratorARM::divICommon(MDiv *mir, Register lhs, Register rhs, Register output,
michael@0 538 LSnapshot *snapshot, Label &done)
michael@0 539 {
michael@0 540 if (mir->canBeNegativeOverflow()) {
michael@0 541 // Handle INT32_MIN / -1;
michael@0 542 // The integer division will give INT32_MIN, but we want -(double)INT32_MIN.
michael@0 543 masm.ma_cmp(lhs, Imm32(INT32_MIN)); // sets EQ if lhs == INT32_MIN
michael@0 544 masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT32_MIN), sets EQ if rhs == -1
michael@0 545 if (mir->canTruncateOverflow()) {
michael@0 546 // (-INT32_MIN)|0 = INT32_MIN
michael@0 547 Label skip;
michael@0 548 masm.ma_b(&skip, Assembler::NotEqual);
michael@0 549 masm.ma_mov(Imm32(INT32_MIN), output);
michael@0 550 masm.ma_b(&done);
michael@0 551 masm.bind(&skip);
michael@0 552 } else {
michael@0 553 JS_ASSERT(mir->fallible());
michael@0 554 if (!bailoutIf(Assembler::Equal, snapshot))
michael@0 555 return false;
michael@0 556 }
michael@0 557 }
michael@0 558
michael@0 559 // Handle divide by zero.
michael@0 560 if (mir->canBeDivideByZero()) {
michael@0 561 masm.ma_cmp(rhs, Imm32(0));
michael@0 562 if (mir->canTruncateInfinities()) {
michael@0 563 // Infinity|0 == 0
michael@0 564 Label skip;
michael@0 565 masm.ma_b(&skip, Assembler::NotEqual);
michael@0 566 masm.ma_mov(Imm32(0), output);
michael@0 567 masm.ma_b(&done);
michael@0 568 masm.bind(&skip);
michael@0 569 } else {
michael@0 570 JS_ASSERT(mir->fallible());
michael@0 571 if (!bailoutIf(Assembler::Equal, snapshot))
michael@0 572 return false;
michael@0 573 }
michael@0 574 }
michael@0 575
michael@0 576 // Handle negative 0.
michael@0 577 if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
michael@0 578 Label nonzero;
michael@0 579 masm.ma_cmp(lhs, Imm32(0));
michael@0 580 masm.ma_b(&nonzero, Assembler::NotEqual);
michael@0 581 masm.ma_cmp(rhs, Imm32(0));
michael@0 582 JS_ASSERT(mir->fallible());
michael@0 583 if (!bailoutIf(Assembler::LessThan, snapshot))
michael@0 584 return false;
michael@0 585 masm.bind(&nonzero);
michael@0 586 }
michael@0 587
michael@0 588 return true;
michael@0 589 }
michael@0 590
michael@0 591 bool
michael@0 592 CodeGeneratorARM::visitDivI(LDivI *ins)
michael@0 593 {
michael@0 594 // Extract the registers from this instruction
michael@0 595 Register lhs = ToRegister(ins->lhs());
michael@0 596 Register rhs = ToRegister(ins->rhs());
michael@0 597 Register temp = ToRegister(ins->getTemp(0));
michael@0 598 Register output = ToRegister(ins->output());
michael@0 599 MDiv *mir = ins->mir();
michael@0 600
michael@0 601 Label done;
michael@0 602 if (!divICommon(mir, lhs, rhs, output, ins->snapshot(), done))
michael@0 603 return false;
michael@0 604
michael@0 605 if (mir->canTruncateRemainder()) {
michael@0 606 masm.ma_sdiv(lhs, rhs, output);
michael@0 607 } else {
michael@0 608 masm.ma_sdiv(lhs, rhs, ScratchRegister);
michael@0 609 masm.ma_mul(ScratchRegister, rhs, temp);
michael@0 610 masm.ma_cmp(lhs, temp);
michael@0 611 if (!bailoutIf(Assembler::NotEqual, ins->snapshot()))
michael@0 612 return false;
michael@0 613 masm.ma_mov(ScratchRegister, output);
michael@0 614 }
michael@0 615
michael@0 616 masm.bind(&done);
michael@0 617
michael@0 618 return true;
michael@0 619 }
michael@0 620
michael@0 621 extern "C" {
michael@0 622 extern int64_t __aeabi_idivmod(int,int);
michael@0 623 extern int64_t __aeabi_uidivmod(int,int);
michael@0 624 }
michael@0 625
michael@0 626 bool
michael@0 627 CodeGeneratorARM::visitSoftDivI(LSoftDivI *ins)
michael@0 628 {
michael@0 629 // Extract the registers from this instruction
michael@0 630 Register lhs = ToRegister(ins->lhs());
michael@0 631 Register rhs = ToRegister(ins->rhs());
michael@0 632 Register output = ToRegister(ins->output());
michael@0 633 MDiv *mir = ins->mir();
michael@0 634
michael@0 635 Label done;
michael@0 636 if (!divICommon(mir, lhs, rhs, output, ins->snapshot(), done))
michael@0 637 return false;
michael@0 638
michael@0 639 masm.setupAlignedABICall(2);
michael@0 640 masm.passABIArg(lhs);
michael@0 641 masm.passABIArg(rhs);
michael@0 642 if (gen->compilingAsmJS())
michael@0 643 masm.callWithABI(AsmJSImm_aeabi_idivmod);
michael@0 644 else
michael@0 645 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod));
michael@0 646 // idivmod returns the quotient in r0, and the remainder in r1.
michael@0 647 if (!mir->canTruncateRemainder()) {
michael@0 648 JS_ASSERT(mir->fallible());
michael@0 649 masm.ma_cmp(r1, Imm32(0));
michael@0 650 if (!bailoutIf(Assembler::NonZero, ins->snapshot()))
michael@0 651 return false;
michael@0 652 }
michael@0 653
michael@0 654 masm.bind(&done);
michael@0 655
michael@0 656 return true;
michael@0 657 }
michael@0 658
michael@0 659 bool
michael@0 660 CodeGeneratorARM::visitDivPowTwoI(LDivPowTwoI *ins)
michael@0 661 {
michael@0 662 Register lhs = ToRegister(ins->numerator());
michael@0 663 Register output = ToRegister(ins->output());
michael@0 664 int32_t shift = ins->shift();
michael@0 665
michael@0 666 if (shift != 0) {
michael@0 667 MDiv *mir = ins->mir();
michael@0 668 if (!mir->isTruncated()) {
michael@0 669 // If the remainder is != 0, bailout since this must be a double.
michael@0 670 masm.as_mov(ScratchRegister, lsl(lhs, 32 - shift), SetCond);
michael@0 671 if (!bailoutIf(Assembler::NonZero, ins->snapshot()))
michael@0 672 return false;
michael@0 673 }
michael@0 674
michael@0 675 if (!mir->canBeNegativeDividend()) {
michael@0 676 // Numerator is unsigned, so needs no adjusting. Do the shift.
michael@0 677 masm.as_mov(output, asr(lhs, shift));
michael@0 678 return true;
michael@0 679 }
michael@0 680
michael@0 681 // Adjust the value so that shifting produces a correctly rounded result
michael@0 682 // when the numerator is negative. See 10-1 "Signed Division by a Known
michael@0 683 // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
michael@0 684 if (shift > 1) {
michael@0 685 masm.as_mov(ScratchRegister, asr(lhs, 31));
michael@0 686 masm.as_add(ScratchRegister, lhs, lsr(ScratchRegister, 32 - shift));
michael@0 687 } else
michael@0 688 masm.as_add(ScratchRegister, lhs, lsr(lhs, 32 - shift));
michael@0 689
michael@0 690 // Do the shift.
michael@0 691 masm.as_mov(output, asr(ScratchRegister, shift));
michael@0 692 } else {
michael@0 693 masm.ma_mov(lhs, output);
michael@0 694 }
michael@0 695
michael@0 696 return true;
michael@0 697 }
michael@0 698
michael@0 699 bool
michael@0 700 CodeGeneratorARM::modICommon(MMod *mir, Register lhs, Register rhs, Register output,
michael@0 701 LSnapshot *snapshot, Label &done)
michael@0 702 {
michael@0 703 // 0/X (with X < 0) is bad because both of these values *should* be doubles, and
michael@0 704 // the result should be -0.0, which cannot be represented in integers.
michael@0 705 // X/0 is bad because it will give garbage (or abort), when it should give
michael@0 706 // either \infty, -\infty or NAN.
michael@0 707
michael@0 708 // Prevent 0 / X (with X < 0) and X / 0
michael@0 709 // testing X / Y. Compare Y with 0.
michael@0 710 // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
michael@0 711 // If (Y < 0), then we compare X with 0, and bail if X == 0
michael@0 712 // If (Y == 0), then we simply want to bail. Since this does not set
michael@0 713 // the flags necessary for LT to trigger, we don't test X, and take the
michael@0 714 // bailout because the EQ flag is set.
michael@0 715 // if (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take the bailout.
michael@0 716 if (mir->canBeDivideByZero() || mir->canBeNegativeDividend()) {
michael@0 717 masm.ma_cmp(rhs, Imm32(0));
michael@0 718 masm.ma_cmp(lhs, Imm32(0), Assembler::LessThan);
michael@0 719 if (mir->isTruncated()) {
michael@0 720 // NaN|0 == 0 and (0 % -X)|0 == 0
michael@0 721 Label skip;
michael@0 722 masm.ma_b(&skip, Assembler::NotEqual);
michael@0 723 masm.ma_mov(Imm32(0), output);
michael@0 724 masm.ma_b(&done);
michael@0 725 masm.bind(&skip);
michael@0 726 } else {
michael@0 727 JS_ASSERT(mir->fallible());
michael@0 728 if (!bailoutIf(Assembler::Equal, snapshot))
michael@0 729 return false;
michael@0 730 }
michael@0 731 }
michael@0 732
michael@0 733 return true;
michael@0 734 }
michael@0 735
michael@0 736 bool
michael@0 737 CodeGeneratorARM::visitModI(LModI *ins)
michael@0 738 {
michael@0 739 Register lhs = ToRegister(ins->lhs());
michael@0 740 Register rhs = ToRegister(ins->rhs());
michael@0 741 Register output = ToRegister(ins->output());
michael@0 742 Register callTemp = ToRegister(ins->callTemp());
michael@0 743 MMod *mir = ins->mir();
michael@0 744
michael@0 745 // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
michael@0 746 masm.ma_mov(lhs, callTemp);
michael@0 747
michael@0 748 Label done;
michael@0 749 if (!modICommon(mir, lhs, rhs, output, ins->snapshot(), done))
michael@0 750 return false;
michael@0 751
michael@0 752 masm.ma_smod(lhs, rhs, output);
michael@0 753
michael@0 754 // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
michael@0 755 if (mir->canBeNegativeDividend()) {
michael@0 756 if (mir->isTruncated()) {
michael@0 757 // -0.0|0 == 0
michael@0 758 } else {
michael@0 759 JS_ASSERT(mir->fallible());
michael@0 760 // See if X < 0
michael@0 761 masm.ma_cmp(output, Imm32(0));
michael@0 762 masm.ma_b(&done, Assembler::NotEqual);
michael@0 763 masm.ma_cmp(callTemp, Imm32(0));
michael@0 764 if (!bailoutIf(Assembler::Signed, ins->snapshot()))
michael@0 765 return false;
michael@0 766 }
michael@0 767 }
michael@0 768
michael@0 769 masm.bind(&done);
michael@0 770 return true;
michael@0 771 }
michael@0 772
michael@0 773 bool
michael@0 774 CodeGeneratorARM::visitSoftModI(LSoftModI *ins)
michael@0 775 {
michael@0 776 // Extract the registers from this instruction
michael@0 777 Register lhs = ToRegister(ins->lhs());
michael@0 778 Register rhs = ToRegister(ins->rhs());
michael@0 779 Register output = ToRegister(ins->output());
michael@0 780 Register callTemp = ToRegister(ins->callTemp());
michael@0 781 MMod *mir = ins->mir();
michael@0 782 Label done;
michael@0 783
michael@0 784 // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
michael@0 785 JS_ASSERT(callTemp.code() > r3.code() && callTemp.code() < r12.code());
michael@0 786 masm.ma_mov(lhs, callTemp);
michael@0 787
michael@0 788 // Prevent INT_MIN % -1;
michael@0 789 // The integer division will give INT_MIN, but we want -(double)INT_MIN.
michael@0 790 if (mir->canBeNegativeDividend()) {
michael@0 791 masm.ma_cmp(lhs, Imm32(INT_MIN)); // sets EQ if lhs == INT_MIN
michael@0 792 masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT_MIN), sets EQ if rhs == -1
michael@0 793 if (mir->isTruncated()) {
michael@0 794 // (INT_MIN % -1)|0 == 0
michael@0 795 Label skip;
michael@0 796 masm.ma_b(&skip, Assembler::NotEqual);
michael@0 797 masm.ma_mov(Imm32(0), output);
michael@0 798 masm.ma_b(&done);
michael@0 799 masm.bind(&skip);
michael@0 800 } else {
michael@0 801 JS_ASSERT(mir->fallible());
michael@0 802 if (!bailoutIf(Assembler::Equal, ins->snapshot()))
michael@0 803 return false;
michael@0 804 }
michael@0 805 }
michael@0 806
michael@0 807 if (!modICommon(mir, lhs, rhs, output, ins->snapshot(), done))
michael@0 808 return false;
michael@0 809
michael@0 810 masm.setupAlignedABICall(2);
michael@0 811 masm.passABIArg(lhs);
michael@0 812 masm.passABIArg(rhs);
michael@0 813 if (gen->compilingAsmJS())
michael@0 814 masm.callWithABI(AsmJSImm_aeabi_idivmod);
michael@0 815 else
michael@0 816 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod));
michael@0 817
michael@0 818 // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
michael@0 819 if (mir->canBeNegativeDividend()) {
michael@0 820 if (mir->isTruncated()) {
michael@0 821 // -0.0|0 == 0
michael@0 822 } else {
michael@0 823 JS_ASSERT(mir->fallible());
michael@0 824 // See if X < 0
michael@0 825 masm.ma_cmp(r1, Imm32(0));
michael@0 826 masm.ma_b(&done, Assembler::NotEqual);
michael@0 827 masm.ma_cmp(callTemp, Imm32(0));
michael@0 828 if (!bailoutIf(Assembler::Signed, ins->snapshot()))
michael@0 829 return false;
michael@0 830 }
michael@0 831 }
michael@0 832 masm.bind(&done);
michael@0 833 return true;
michael@0 834 }
michael@0 835
michael@0 836 bool
michael@0 837 CodeGeneratorARM::visitModPowTwoI(LModPowTwoI *ins)
michael@0 838 {
michael@0 839 Register in = ToRegister(ins->getOperand(0));
michael@0 840 Register out = ToRegister(ins->getDef(0));
michael@0 841 MMod *mir = ins->mir();
michael@0 842 Label fin;
michael@0 843 // bug 739870, jbramley has a different sequence that may help with speed here
michael@0 844 masm.ma_mov(in, out, SetCond);
michael@0 845 masm.ma_b(&fin, Assembler::Zero);
michael@0 846 masm.ma_rsb(Imm32(0), out, NoSetCond, Assembler::Signed);
michael@0 847 masm.ma_and(Imm32((1<<ins->shift())-1), out);
michael@0 848 masm.ma_rsb(Imm32(0), out, SetCond, Assembler::Signed);
michael@0 849 if (mir->canBeNegativeDividend()) {
michael@0 850 if (!mir->isTruncated()) {
michael@0 851 JS_ASSERT(mir->fallible());
michael@0 852 if (!bailoutIf(Assembler::Zero, ins->snapshot()))
michael@0 853 return false;
michael@0 854 } else {
michael@0 855 // -0|0 == 0
michael@0 856 }
michael@0 857 }
michael@0 858 masm.bind(&fin);
michael@0 859 return true;
michael@0 860 }
michael@0 861
michael@0 862 bool
michael@0 863 CodeGeneratorARM::visitModMaskI(LModMaskI *ins)
michael@0 864 {
michael@0 865 Register src = ToRegister(ins->getOperand(0));
michael@0 866 Register dest = ToRegister(ins->getDef(0));
michael@0 867 Register tmp1 = ToRegister(ins->getTemp(0));
michael@0 868 Register tmp2 = ToRegister(ins->getTemp(1));
michael@0 869 MMod *mir = ins->mir();
michael@0 870 masm.ma_mod_mask(src, dest, tmp1, tmp2, ins->shift());
michael@0 871 if (mir->canBeNegativeDividend()) {
michael@0 872 if (!mir->isTruncated()) {
michael@0 873 JS_ASSERT(mir->fallible());
michael@0 874 if (!bailoutIf(Assembler::Zero, ins->snapshot()))
michael@0 875 return false;
michael@0 876 } else {
michael@0 877 // -0|0 == 0
michael@0 878 }
michael@0 879 }
michael@0 880 return true;
michael@0 881 }
michael@0 882 bool
michael@0 883 CodeGeneratorARM::visitBitNotI(LBitNotI *ins)
michael@0 884 {
michael@0 885 const LAllocation *input = ins->getOperand(0);
michael@0 886 const LDefinition *dest = ins->getDef(0);
michael@0 887 // this will not actually be true on arm.
michael@0 888 // We can not an imm8m in order to get a wider range
michael@0 889 // of numbers
michael@0 890 JS_ASSERT(!input->isConstant());
michael@0 891
michael@0 892 masm.ma_mvn(ToRegister(input), ToRegister(dest));
michael@0 893 return true;
michael@0 894 }
michael@0 895
michael@0 896 bool
michael@0 897 CodeGeneratorARM::visitBitOpI(LBitOpI *ins)
michael@0 898 {
michael@0 899 const LAllocation *lhs = ins->getOperand(0);
michael@0 900 const LAllocation *rhs = ins->getOperand(1);
michael@0 901 const LDefinition *dest = ins->getDef(0);
michael@0 902 // all of these bitops should be either imm32's, or integer registers.
michael@0 903 switch (ins->bitop()) {
michael@0 904 case JSOP_BITOR:
michael@0 905 if (rhs->isConstant())
michael@0 906 masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
michael@0 907 else
michael@0 908 masm.ma_orr(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
michael@0 909 break;
michael@0 910 case JSOP_BITXOR:
michael@0 911 if (rhs->isConstant())
michael@0 912 masm.ma_eor(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
michael@0 913 else
michael@0 914 masm.ma_eor(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
michael@0 915 break;
michael@0 916 case JSOP_BITAND:
michael@0 917 if (rhs->isConstant())
michael@0 918 masm.ma_and(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
michael@0 919 else
michael@0 920 masm.ma_and(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
michael@0 921 break;
michael@0 922 default:
michael@0 923 MOZ_ASSUME_UNREACHABLE("unexpected binary opcode");
michael@0 924 }
michael@0 925
michael@0 926 return true;
michael@0 927 }
michael@0 928
michael@0 929 bool
michael@0 930 CodeGeneratorARM::visitShiftI(LShiftI *ins)
michael@0 931 {
michael@0 932 Register lhs = ToRegister(ins->lhs());
michael@0 933 const LAllocation *rhs = ins->rhs();
michael@0 934 Register dest = ToRegister(ins->output());
michael@0 935
michael@0 936 if (rhs->isConstant()) {
michael@0 937 int32_t shift = ToInt32(rhs) & 0x1F;
michael@0 938 switch (ins->bitop()) {
michael@0 939 case JSOP_LSH:
michael@0 940 if (shift)
michael@0 941 masm.ma_lsl(Imm32(shift), lhs, dest);
michael@0 942 else
michael@0 943 masm.ma_mov(lhs, dest);
michael@0 944 break;
michael@0 945 case JSOP_RSH:
michael@0 946 if (shift)
michael@0 947 masm.ma_asr(Imm32(shift), lhs, dest);
michael@0 948 else
michael@0 949 masm.ma_mov(lhs, dest);
michael@0 950 break;
michael@0 951 case JSOP_URSH:
michael@0 952 if (shift) {
michael@0 953 masm.ma_lsr(Imm32(shift), lhs, dest);
michael@0 954 } else {
michael@0 955 // x >>> 0 can overflow.
michael@0 956 masm.ma_mov(lhs, dest);
michael@0 957 if (ins->mir()->toUrsh()->fallible()) {
michael@0 958 masm.ma_cmp(dest, Imm32(0));
michael@0 959 if (!bailoutIf(Assembler::LessThan, ins->snapshot()))
michael@0 960 return false;
michael@0 961 }
michael@0 962 }
michael@0 963 break;
michael@0 964 default:
michael@0 965 MOZ_ASSUME_UNREACHABLE("Unexpected shift op");
michael@0 966 }
michael@0 967 } else {
michael@0 968 // The shift amounts should be AND'ed into the 0-31 range since arm
michael@0 969 // shifts by the lower byte of the register (it will attempt to shift
michael@0 970 // by 250 if you ask it to).
michael@0 971 masm.ma_and(Imm32(0x1F), ToRegister(rhs), dest);
michael@0 972
michael@0 973 switch (ins->bitop()) {
michael@0 974 case JSOP_LSH:
michael@0 975 masm.ma_lsl(dest, lhs, dest);
michael@0 976 break;
michael@0 977 case JSOP_RSH:
michael@0 978 masm.ma_asr(dest, lhs, dest);
michael@0 979 break;
michael@0 980 case JSOP_URSH:
michael@0 981 masm.ma_lsr(dest, lhs, dest);
michael@0 982 if (ins->mir()->toUrsh()->fallible()) {
michael@0 983 // x >>> 0 can overflow.
michael@0 984 masm.ma_cmp(dest, Imm32(0));
michael@0 985 if (!bailoutIf(Assembler::LessThan, ins->snapshot()))
michael@0 986 return false;
michael@0 987 }
michael@0 988 break;
michael@0 989 default:
michael@0 990 MOZ_ASSUME_UNREACHABLE("Unexpected shift op");
michael@0 991 }
michael@0 992 }
michael@0 993
michael@0 994 return true;
michael@0 995 }
michael@0 996
michael@0 997 bool
michael@0 998 CodeGeneratorARM::visitUrshD(LUrshD *ins)
michael@0 999 {
michael@0 1000 Register lhs = ToRegister(ins->lhs());
michael@0 1001 Register temp = ToRegister(ins->temp());
michael@0 1002
michael@0 1003 const LAllocation *rhs = ins->rhs();
michael@0 1004 FloatRegister out = ToFloatRegister(ins->output());
michael@0 1005
michael@0 1006 if (rhs->isConstant()) {
michael@0 1007 int32_t shift = ToInt32(rhs) & 0x1F;
michael@0 1008 if (shift)
michael@0 1009 masm.ma_lsr(Imm32(shift), lhs, temp);
michael@0 1010 else
michael@0 1011 masm.ma_mov(lhs, temp);
michael@0 1012 } else {
michael@0 1013 masm.ma_and(Imm32(0x1F), ToRegister(rhs), temp);
michael@0 1014 masm.ma_lsr(temp, lhs, temp);
michael@0 1015 }
michael@0 1016
michael@0 1017 masm.convertUInt32ToDouble(temp, out);
michael@0 1018 return true;
michael@0 1019 }
michael@0 1020
michael@0 1021 bool
michael@0 1022 CodeGeneratorARM::visitPowHalfD(LPowHalfD *ins)
michael@0 1023 {
michael@0 1024 FloatRegister input = ToFloatRegister(ins->input());
michael@0 1025 FloatRegister output = ToFloatRegister(ins->output());
michael@0 1026
michael@0 1027 Label done;
michael@0 1028
michael@0 1029 // Masm.pow(-Infinity, 0.5) == Infinity.
michael@0 1030 masm.ma_vimm(NegativeInfinity<double>(), ScratchFloatReg);
michael@0 1031 masm.compareDouble(input, ScratchFloatReg);
michael@0 1032 masm.ma_vneg(ScratchFloatReg, output, Assembler::Equal);
michael@0 1033 masm.ma_b(&done, Assembler::Equal);
michael@0 1034
michael@0 1035 // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). Adding 0 converts any -0 to 0.
michael@0 1036 masm.ma_vimm(0.0, ScratchFloatReg);
michael@0 1037 masm.ma_vadd(ScratchFloatReg, input, output);
michael@0 1038 masm.ma_vsqrt(output, output);
michael@0 1039
michael@0 1040 masm.bind(&done);
michael@0 1041 return true;
michael@0 1042 }
michael@0 1043
michael@0 1044 MoveOperand
michael@0 1045 CodeGeneratorARM::toMoveOperand(const LAllocation *a) const
michael@0 1046 {
michael@0 1047 if (a->isGeneralReg())
michael@0 1048 return MoveOperand(ToRegister(a));
michael@0 1049 if (a->isFloatReg())
michael@0 1050 return MoveOperand(ToFloatRegister(a));
michael@0 1051 JS_ASSERT((ToStackOffset(a) & 3) == 0);
michael@0 1052 int32_t offset = ToStackOffset(a);
michael@0 1053
michael@0 1054 // The way the stack slots work, we assume that everything from depth == 0 downwards is writable
michael@0 1055 // however, since our frame is included in this, ensure that the frame gets skipped
michael@0 1056 if (gen->compilingAsmJS())
michael@0 1057 offset -= AlignmentMidPrologue;
michael@0 1058
michael@0 1059 return MoveOperand(StackPointer, offset);
michael@0 1060 }
michael@0 1061
michael@0 1062 class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorARM>
michael@0 1063 {
michael@0 1064 MTableSwitch *mir_;
michael@0 1065 Vector<CodeLabel, 8, IonAllocPolicy> codeLabels_;
michael@0 1066
michael@0 1067 bool accept(CodeGeneratorARM *codegen) {
michael@0 1068 return codegen->visitOutOfLineTableSwitch(this);
michael@0 1069 }
michael@0 1070
michael@0 1071 public:
michael@0 1072 OutOfLineTableSwitch(TempAllocator &alloc, MTableSwitch *mir)
michael@0 1073 : mir_(mir),
michael@0 1074 codeLabels_(alloc)
michael@0 1075 {}
michael@0 1076
michael@0 1077 MTableSwitch *mir() const {
michael@0 1078 return mir_;
michael@0 1079 }
michael@0 1080
michael@0 1081 bool addCodeLabel(CodeLabel label) {
michael@0 1082 return codeLabels_.append(label);
michael@0 1083 }
michael@0 1084 CodeLabel codeLabel(unsigned i) {
michael@0 1085 return codeLabels_[i];
michael@0 1086 }
michael@0 1087 };
michael@0 1088
michael@0 1089 bool
michael@0 1090 CodeGeneratorARM::visitOutOfLineTableSwitch(OutOfLineTableSwitch *ool)
michael@0 1091 {
michael@0 1092 MTableSwitch *mir = ool->mir();
michael@0 1093
michael@0 1094 size_t numCases = mir->numCases();
michael@0 1095 for (size_t i = 0; i < numCases; i++) {
michael@0 1096 LBlock *caseblock = mir->getCase(numCases - 1 - i)->lir();
michael@0 1097 Label *caseheader = caseblock->label();
michael@0 1098 uint32_t caseoffset = caseheader->offset();
michael@0 1099
michael@0 1100 // The entries of the jump table need to be absolute addresses and thus
michael@0 1101 // must be patched after codegen is finished.
michael@0 1102 CodeLabel cl = ool->codeLabel(i);
michael@0 1103 cl.src()->bind(caseoffset);
michael@0 1104 if (!masm.addCodeLabel(cl))
michael@0 1105 return false;
michael@0 1106 }
michael@0 1107
michael@0 1108 return true;
michael@0 1109 }
michael@0 1110
michael@0 1111 bool
michael@0 1112 CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch *mir, const Register &index,
michael@0 1113 const Register &base)
michael@0 1114 {
michael@0 1115 // the code generated by this is utter hax.
michael@0 1116 // the end result looks something like:
michael@0 1117 // SUBS index, input, #base
michael@0 1118 // RSBSPL index, index, #max
michael@0 1119 // LDRPL pc, pc, index lsl 2
michael@0 1120 // B default
michael@0 1121
michael@0 1122 // If the range of targets in N through M, we first subtract off the lowest
michael@0 1123 // case (N), which both shifts the arguments into the range 0 to (M-N) with
michael@0 1124 // and sets the MInus flag if the argument was out of range on the low end.
michael@0 1125
michael@0 1126 // Then we a reverse subtract with the size of the jump table, which will
michael@0 1127 // reverse the order of range (It is size through 0, rather than 0 through
michael@0 1128 // size). The main purpose of this is that we set the same flag as the lower
michael@0 1129 // bound check for the upper bound check. Lastly, we do this conditionally
michael@0 1130 // on the previous check succeeding.
michael@0 1131
michael@0 1132 // Then we conditionally load the pc offset by the (reversed) index (times
michael@0 1133 // the address size) into the pc, which branches to the correct case.
michael@0 1134 // NOTE: when we go to read the pc, the value that we get back is the pc of
michael@0 1135 // the current instruction *PLUS 8*. This means that ldr foo, [pc, +0]
michael@0 1136 // reads $pc+8. In other words, there is an empty word after the branch into
michael@0 1137 // the switch table before the table actually starts. Since the only other
michael@0 1138 // unhandled case is the default case (both out of range high and out of range low)
michael@0 1139 // I then insert a branch to default case into the extra slot, which ensures
michael@0 1140 // we don't attempt to execute the address table.
michael@0 1141 Label *defaultcase = mir->getDefault()->lir()->label();
michael@0 1142
michael@0 1143 int32_t cases = mir->numCases();
michael@0 1144 // Lower value with low value
michael@0 1145 masm.ma_sub(index, Imm32(mir->low()), index, SetCond);
michael@0 1146 masm.ma_rsb(index, Imm32(cases - 1), index, SetCond, Assembler::NotSigned);
michael@0 1147 AutoForbidPools afp(&masm);
michael@0 1148 masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset, Assembler::NotSigned);
michael@0 1149 masm.ma_b(defaultcase);
michael@0 1150
michael@0 1151 // To fill in the CodeLabels for the case entries, we need to first
michael@0 1152 // generate the case entries (we don't yet know their offsets in the
michael@0 1153 // instruction stream).
michael@0 1154 OutOfLineTableSwitch *ool = new(alloc()) OutOfLineTableSwitch(alloc(), mir);
michael@0 1155 for (int32_t i = 0; i < cases; i++) {
michael@0 1156 CodeLabel cl;
michael@0 1157 masm.writeCodePointer(cl.dest());
michael@0 1158 if (!ool->addCodeLabel(cl))
michael@0 1159 return false;
michael@0 1160 }
michael@0 1161 if (!addOutOfLineCode(ool))
michael@0 1162 return false;
michael@0 1163
michael@0 1164 return true;
michael@0 1165 }
michael@0 1166
michael@0 1167 bool
michael@0 1168 CodeGeneratorARM::visitMathD(LMathD *math)
michael@0 1169 {
michael@0 1170 const LAllocation *src1 = math->getOperand(0);
michael@0 1171 const LAllocation *src2 = math->getOperand(1);
michael@0 1172 const LDefinition *output = math->getDef(0);
michael@0 1173
michael@0 1174 switch (math->jsop()) {
michael@0 1175 case JSOP_ADD:
michael@0 1176 masm.ma_vadd(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
michael@0 1177 break;
michael@0 1178 case JSOP_SUB:
michael@0 1179 masm.ma_vsub(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
michael@0 1180 break;
michael@0 1181 case JSOP_MUL:
michael@0 1182 masm.ma_vmul(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
michael@0 1183 break;
michael@0 1184 case JSOP_DIV:
michael@0 1185 masm.ma_vdiv(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
michael@0 1186 break;
michael@0 1187 default:
michael@0 1188 MOZ_ASSUME_UNREACHABLE("unexpected opcode");
michael@0 1189 }
michael@0 1190 return true;
michael@0 1191 }
michael@0 1192
michael@0 1193 bool
michael@0 1194 CodeGeneratorARM::visitMathF(LMathF *math)
michael@0 1195 {
michael@0 1196 const LAllocation *src1 = math->getOperand(0);
michael@0 1197 const LAllocation *src2 = math->getOperand(1);
michael@0 1198 const LDefinition *output = math->getDef(0);
michael@0 1199
michael@0 1200 switch (math->jsop()) {
michael@0 1201 case JSOP_ADD:
michael@0 1202 masm.ma_vadd_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
michael@0 1203 break;
michael@0 1204 case JSOP_SUB:
michael@0 1205 masm.ma_vsub_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
michael@0 1206 break;
michael@0 1207 case JSOP_MUL:
michael@0 1208 masm.ma_vmul_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
michael@0 1209 break;
michael@0 1210 case JSOP_DIV:
michael@0 1211 masm.ma_vdiv_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
michael@0 1212 break;
michael@0 1213 default:
michael@0 1214 MOZ_ASSUME_UNREACHABLE("unexpected opcode");
michael@0 1215 }
michael@0 1216 return true;
michael@0 1217 }
michael@0 1218
michael@0 1219 bool
michael@0 1220 CodeGeneratorARM::visitFloor(LFloor *lir)
michael@0 1221 {
michael@0 1222 FloatRegister input = ToFloatRegister(lir->input());
michael@0 1223 Register output = ToRegister(lir->output());
michael@0 1224 Label bail;
michael@0 1225 masm.floor(input, output, &bail);
michael@0 1226 if (!bailoutFrom(&bail, lir->snapshot()))
michael@0 1227 return false;
michael@0 1228 return true;
michael@0 1229 }
michael@0 1230
michael@0 1231 bool
michael@0 1232 CodeGeneratorARM::visitFloorF(LFloorF *lir)
michael@0 1233 {
michael@0 1234 FloatRegister input = ToFloatRegister(lir->input());
michael@0 1235 Register output = ToRegister(lir->output());
michael@0 1236 Label bail;
michael@0 1237 masm.floorf(input, output, &bail);
michael@0 1238 if (!bailoutFrom(&bail, lir->snapshot()))
michael@0 1239 return false;
michael@0 1240 return true;
michael@0 1241 }
michael@0 1242
michael@0 1243 bool
michael@0 1244 CodeGeneratorARM::visitRound(LRound *lir)
michael@0 1245 {
michael@0 1246 FloatRegister input = ToFloatRegister(lir->input());
michael@0 1247 Register output = ToRegister(lir->output());
michael@0 1248 FloatRegister tmp = ToFloatRegister(lir->temp());
michael@0 1249 Label bail;
michael@0 1250 // Output is either correct, or clamped. All -0 cases have been translated to a clamped
michael@0 1251 // case.a
michael@0 1252 masm.round(input, output, &bail, tmp);
michael@0 1253 if (!bailoutFrom(&bail, lir->snapshot()))
michael@0 1254 return false;
michael@0 1255 return true;
michael@0 1256 }
michael@0 1257
michael@0 1258 bool
michael@0 1259 CodeGeneratorARM::visitRoundF(LRoundF *lir)
michael@0 1260 {
michael@0 1261 FloatRegister input = ToFloatRegister(lir->input());
michael@0 1262 Register output = ToRegister(lir->output());
michael@0 1263 FloatRegister tmp = ToFloatRegister(lir->temp());
michael@0 1264 Label bail;
michael@0 1265 // Output is either correct, or clamped. All -0 cases have been translated to a clamped
michael@0 1266 // case.a
michael@0 1267 masm.roundf(input, output, &bail, tmp);
michael@0 1268 if (!bailoutFrom(&bail, lir->snapshot()))
michael@0 1269 return false;
michael@0 1270 return true;
michael@0 1271 }
michael@0 1272
michael@0 1273 void
michael@0 1274 CodeGeneratorARM::emitRoundDouble(const FloatRegister &src, const Register &dest, Label *fail)
michael@0 1275 {
michael@0 1276 masm.ma_vcvt_F64_I32(src, ScratchFloatReg);
michael@0 1277 masm.ma_vxfer(ScratchFloatReg, dest);
michael@0 1278 masm.ma_cmp(dest, Imm32(0x7fffffff));
michael@0 1279 masm.ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual);
michael@0 1280 masm.ma_b(fail, Assembler::Equal);
michael@0 1281 }
michael@0 1282
michael@0 1283 bool
michael@0 1284 CodeGeneratorARM::visitTruncateDToInt32(LTruncateDToInt32 *ins)
michael@0 1285 {
michael@0 1286 return emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()));
michael@0 1287 }
michael@0 1288
michael@0 1289 bool
michael@0 1290 CodeGeneratorARM::visitTruncateFToInt32(LTruncateFToInt32 *ins)
michael@0 1291 {
michael@0 1292 return emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()));
michael@0 1293 }
michael@0 1294
michael@0 1295 static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };
michael@0 1296
michael@0 1297 FrameSizeClass
michael@0 1298 FrameSizeClass::FromDepth(uint32_t frameDepth)
michael@0 1299 {
michael@0 1300 for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) {
michael@0 1301 if (frameDepth < FrameSizes[i])
michael@0 1302 return FrameSizeClass(i);
michael@0 1303 }
michael@0 1304
michael@0 1305 return FrameSizeClass::None();
michael@0 1306 }
michael@0 1307
michael@0 1308 FrameSizeClass
michael@0 1309 FrameSizeClass::ClassLimit()
michael@0 1310 {
michael@0 1311 return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes));
michael@0 1312 }
michael@0 1313
michael@0 1314 uint32_t
michael@0 1315 FrameSizeClass::frameSize() const
michael@0 1316 {
michael@0 1317 JS_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
michael@0 1318 JS_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes));
michael@0 1319
michael@0 1320 return FrameSizes[class_];
michael@0 1321 }
michael@0 1322
michael@0 1323 ValueOperand
michael@0 1324 CodeGeneratorARM::ToValue(LInstruction *ins, size_t pos)
michael@0 1325 {
michael@0 1326 Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
michael@0 1327 Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
michael@0 1328 return ValueOperand(typeReg, payloadReg);
michael@0 1329 }
michael@0 1330
michael@0 1331 ValueOperand
michael@0 1332 CodeGeneratorARM::ToOutValue(LInstruction *ins)
michael@0 1333 {
michael@0 1334 Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
michael@0 1335 Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
michael@0 1336 return ValueOperand(typeReg, payloadReg);
michael@0 1337 }
michael@0 1338
michael@0 1339 ValueOperand
michael@0 1340 CodeGeneratorARM::ToTempValue(LInstruction *ins, size_t pos)
michael@0 1341 {
michael@0 1342 Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
michael@0 1343 Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
michael@0 1344 return ValueOperand(typeReg, payloadReg);
michael@0 1345 }
michael@0 1346
michael@0 1347 bool
michael@0 1348 CodeGeneratorARM::visitValue(LValue *value)
michael@0 1349 {
michael@0 1350 const ValueOperand out = ToOutValue(value);
michael@0 1351
michael@0 1352 masm.moveValue(value->value(), out);
michael@0 1353 return true;
michael@0 1354 }
michael@0 1355
michael@0 1356 bool
michael@0 1357 CodeGeneratorARM::visitBox(LBox *box)
michael@0 1358 {
michael@0 1359 const LDefinition *type = box->getDef(TYPE_INDEX);
michael@0 1360
michael@0 1361 JS_ASSERT(!box->getOperand(0)->isConstant());
michael@0 1362
michael@0 1363 // On x86, the input operand and the output payload have the same
michael@0 1364 // virtual register. All that needs to be written is the type tag for
michael@0 1365 // the type definition.
michael@0 1366 masm.ma_mov(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
michael@0 1367 return true;
michael@0 1368 }
michael@0 1369
michael@0 1370 bool
michael@0 1371 CodeGeneratorARM::visitBoxFloatingPoint(LBoxFloatingPoint *box)
michael@0 1372 {
michael@0 1373 const LDefinition *payload = box->getDef(PAYLOAD_INDEX);
michael@0 1374 const LDefinition *type = box->getDef(TYPE_INDEX);
michael@0 1375 const LAllocation *in = box->getOperand(0);
michael@0 1376
michael@0 1377 FloatRegister reg = ToFloatRegister(in);
michael@0 1378 if (box->type() == MIRType_Float32) {
michael@0 1379 masm.convertFloat32ToDouble(reg, ScratchFloatReg);
michael@0 1380 reg = ScratchFloatReg;
michael@0 1381 }
michael@0 1382
michael@0 1383 //masm.as_vxfer(ToRegister(payload), ToRegister(type),
michael@0 1384 // VFPRegister(ToFloatRegister(in)), Assembler::FloatToCore);
michael@0 1385 masm.ma_vxfer(VFPRegister(reg), ToRegister(payload), ToRegister(type));
michael@0 1386 return true;
michael@0 1387 }
michael@0 1388
michael@0 1389 bool
michael@0 1390 CodeGeneratorARM::visitUnbox(LUnbox *unbox)
michael@0 1391 {
michael@0 1392 // Note that for unbox, the type and payload indexes are switched on the
michael@0 1393 // inputs.
michael@0 1394 MUnbox *mir = unbox->mir();
michael@0 1395 Register type = ToRegister(unbox->type());
michael@0 1396
michael@0 1397 if (mir->fallible()) {
michael@0 1398 masm.ma_cmp(type, Imm32(MIRTypeToTag(mir->type())));
michael@0 1399 if (!bailoutIf(Assembler::NotEqual, unbox->snapshot()))
michael@0 1400 return false;
michael@0 1401 }
michael@0 1402 return true;
michael@0 1403 }
michael@0 1404
michael@0 1405 bool
michael@0 1406 CodeGeneratorARM::visitDouble(LDouble *ins)
michael@0 1407 {
michael@0 1408
michael@0 1409 const LDefinition *out = ins->getDef(0);
michael@0 1410
michael@0 1411 masm.ma_vimm(ins->getDouble(), ToFloatRegister(out));
michael@0 1412 return true;
michael@0 1413 }
michael@0 1414
michael@0 1415 bool
michael@0 1416 CodeGeneratorARM::visitFloat32(LFloat32 *ins)
michael@0 1417 {
michael@0 1418 const LDefinition *out = ins->getDef(0);
michael@0 1419 masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out));
michael@0 1420 return true;
michael@0 1421 }
michael@0 1422
michael@0 1423 Register
michael@0 1424 CodeGeneratorARM::splitTagForTest(const ValueOperand &value)
michael@0 1425 {
michael@0 1426 return value.typeReg();
michael@0 1427 }
michael@0 1428
michael@0 1429 bool
michael@0 1430 CodeGeneratorARM::visitTestDAndBranch(LTestDAndBranch *test)
michael@0 1431 {
michael@0 1432 const LAllocation *opd = test->input();
michael@0 1433 masm.ma_vcmpz(ToFloatRegister(opd));
michael@0 1434 masm.as_vmrs(pc);
michael@0 1435
michael@0 1436 MBasicBlock *ifTrue = test->ifTrue();
michael@0 1437 MBasicBlock *ifFalse = test->ifFalse();
michael@0 1438 // If the compare set the 0 bit, then the result
michael@0 1439 // is definately false.
michael@0 1440 jumpToBlock(ifFalse, Assembler::Zero);
michael@0 1441 // it is also false if one of the operands is NAN, which is
michael@0 1442 // shown as Overflow.
michael@0 1443 jumpToBlock(ifFalse, Assembler::Overflow);
michael@0 1444 jumpToBlock(ifTrue);
michael@0 1445 return true;
michael@0 1446 }
michael@0 1447
michael@0 1448 bool
michael@0 1449 CodeGeneratorARM::visitTestFAndBranch(LTestFAndBranch *test)
michael@0 1450 {
michael@0 1451 const LAllocation *opd = test->input();
michael@0 1452 masm.ma_vcmpz_f32(ToFloatRegister(opd));
michael@0 1453 masm.as_vmrs(pc);
michael@0 1454
michael@0 1455 MBasicBlock *ifTrue = test->ifTrue();
michael@0 1456 MBasicBlock *ifFalse = test->ifFalse();
michael@0 1457 // If the compare set the 0 bit, then the result
michael@0 1458 // is definately false.
michael@0 1459 jumpToBlock(ifFalse, Assembler::Zero);
michael@0 1460 // it is also false if one of the operands is NAN, which is
michael@0 1461 // shown as Overflow.
michael@0 1462 jumpToBlock(ifFalse, Assembler::Overflow);
michael@0 1463 jumpToBlock(ifTrue);
michael@0 1464 return true;
michael@0 1465 }
michael@0 1466
michael@0 1467 bool
michael@0 1468 CodeGeneratorARM::visitCompareD(LCompareD *comp)
michael@0 1469 {
michael@0 1470 FloatRegister lhs = ToFloatRegister(comp->left());
michael@0 1471 FloatRegister rhs = ToFloatRegister(comp->right());
michael@0 1472
michael@0 1473 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
michael@0 1474 masm.compareDouble(lhs, rhs);
michael@0 1475 masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()));
michael@0 1476 return true;
michael@0 1477 }
michael@0 1478
michael@0 1479 bool
michael@0 1480 CodeGeneratorARM::visitCompareF(LCompareF *comp)
michael@0 1481 {
michael@0 1482 FloatRegister lhs = ToFloatRegister(comp->left());
michael@0 1483 FloatRegister rhs = ToFloatRegister(comp->right());
michael@0 1484
michael@0 1485 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
michael@0 1486 masm.compareFloat(lhs, rhs);
michael@0 1487 masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()));
michael@0 1488 return true;
michael@0 1489 }
michael@0 1490
michael@0 1491 bool
michael@0 1492 CodeGeneratorARM::visitCompareDAndBranch(LCompareDAndBranch *comp)
michael@0 1493 {
michael@0 1494 FloatRegister lhs = ToFloatRegister(comp->left());
michael@0 1495 FloatRegister rhs = ToFloatRegister(comp->right());
michael@0 1496
michael@0 1497 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
michael@0 1498 masm.compareDouble(lhs, rhs);
michael@0 1499 emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse());
michael@0 1500 return true;
michael@0 1501 }
michael@0 1502
michael@0 1503 bool
michael@0 1504 CodeGeneratorARM::visitCompareFAndBranch(LCompareFAndBranch *comp)
michael@0 1505 {
michael@0 1506 FloatRegister lhs = ToFloatRegister(comp->left());
michael@0 1507 FloatRegister rhs = ToFloatRegister(comp->right());
michael@0 1508
michael@0 1509 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
michael@0 1510 masm.compareFloat(lhs, rhs);
michael@0 1511 emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse());
michael@0 1512 return true;
michael@0 1513 }
michael@0 1514
michael@0 1515 bool
michael@0 1516 CodeGeneratorARM::visitCompareB(LCompareB *lir)
michael@0 1517 {
michael@0 1518 MCompare *mir = lir->mir();
michael@0 1519
michael@0 1520 const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
michael@0 1521 const LAllocation *rhs = lir->rhs();
michael@0 1522 const Register output = ToRegister(lir->output());
michael@0 1523
michael@0 1524 JS_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
michael@0 1525
michael@0 1526 Label notBoolean, done;
michael@0 1527 masm.branchTestBoolean(Assembler::NotEqual, lhs, &notBoolean);
michael@0 1528 {
michael@0 1529 if (rhs->isConstant())
michael@0 1530 masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
michael@0 1531 else
michael@0 1532 masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
michael@0 1533 masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
michael@0 1534 masm.jump(&done);
michael@0 1535 }
michael@0 1536
michael@0 1537 masm.bind(&notBoolean);
michael@0 1538 {
michael@0 1539 masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output);
michael@0 1540 }
michael@0 1541
michael@0 1542 masm.bind(&done);
michael@0 1543 return true;
michael@0 1544 }
michael@0 1545
michael@0 1546 bool
michael@0 1547 CodeGeneratorARM::visitCompareBAndBranch(LCompareBAndBranch *lir)
michael@0 1548 {
michael@0 1549 MCompare *mir = lir->cmpMir();
michael@0 1550 const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
michael@0 1551 const LAllocation *rhs = lir->rhs();
michael@0 1552
michael@0 1553 JS_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
michael@0 1554
michael@0 1555 Assembler::Condition cond = masm.testBoolean(Assembler::NotEqual, lhs);
michael@0 1556 jumpToBlock((mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue(), cond);
michael@0 1557
michael@0 1558 if (rhs->isConstant())
michael@0 1559 masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
michael@0 1560 else
michael@0 1561 masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
michael@0 1562 emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
michael@0 1563 return true;
michael@0 1564 }
michael@0 1565
michael@0 1566 bool
michael@0 1567 CodeGeneratorARM::visitCompareV(LCompareV *lir)
michael@0 1568 {
michael@0 1569 MCompare *mir = lir->mir();
michael@0 1570 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
michael@0 1571 const ValueOperand lhs = ToValue(lir, LCompareV::LhsInput);
michael@0 1572 const ValueOperand rhs = ToValue(lir, LCompareV::RhsInput);
michael@0 1573 const Register output = ToRegister(lir->output());
michael@0 1574
michael@0 1575 JS_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
michael@0 1576 mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
michael@0 1577
michael@0 1578 Label notEqual, done;
michael@0 1579 masm.cmp32(lhs.typeReg(), rhs.typeReg());
michael@0 1580 masm.j(Assembler::NotEqual, &notEqual);
michael@0 1581 {
michael@0 1582 masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
michael@0 1583 masm.emitSet(cond, output);
michael@0 1584 masm.jump(&done);
michael@0 1585 }
michael@0 1586 masm.bind(&notEqual);
michael@0 1587 {
michael@0 1588 masm.move32(Imm32(cond == Assembler::NotEqual), output);
michael@0 1589 }
michael@0 1590
michael@0 1591 masm.bind(&done);
michael@0 1592 return true;
michael@0 1593 }
michael@0 1594
michael@0 1595 bool
michael@0 1596 CodeGeneratorARM::visitCompareVAndBranch(LCompareVAndBranch *lir)
michael@0 1597 {
michael@0 1598 MCompare *mir = lir->cmpMir();
michael@0 1599 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
michael@0 1600 const ValueOperand lhs = ToValue(lir, LCompareVAndBranch::LhsInput);
michael@0 1601 const ValueOperand rhs = ToValue(lir, LCompareVAndBranch::RhsInput);
michael@0 1602
michael@0 1603 JS_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
michael@0 1604 mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
michael@0 1605
michael@0 1606 MBasicBlock *notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
michael@0 1607
michael@0 1608 masm.cmp32(lhs.typeReg(), rhs.typeReg());
michael@0 1609 jumpToBlock(notEqual, Assembler::NotEqual);
michael@0 1610 masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
michael@0 1611 emitBranch(cond, lir->ifTrue(), lir->ifFalse());
michael@0 1612
michael@0 1613 return true;
michael@0 1614 }
michael@0 1615
michael@0 1616 bool
michael@0 1617 CodeGeneratorARM::visitBitAndAndBranch(LBitAndAndBranch *baab)
michael@0 1618 {
michael@0 1619 if (baab->right()->isConstant())
michael@0 1620 masm.ma_tst(ToRegister(baab->left()), Imm32(ToInt32(baab->right())));
michael@0 1621 else
michael@0 1622 masm.ma_tst(ToRegister(baab->left()), ToRegister(baab->right()));
michael@0 1623 emitBranch(Assembler::NonZero, baab->ifTrue(), baab->ifFalse());
michael@0 1624 return true;
michael@0 1625 }
michael@0 1626
michael@0 1627 bool
michael@0 1628 CodeGeneratorARM::visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble *lir)
michael@0 1629 {
michael@0 1630 masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
michael@0 1631 return true;
michael@0 1632 }
michael@0 1633
michael@0 1634 bool
michael@0 1635 CodeGeneratorARM::visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir)
michael@0 1636 {
michael@0 1637 masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output()));
michael@0 1638 return true;
michael@0 1639 }
michael@0 1640
michael@0 1641 bool
michael@0 1642 CodeGeneratorARM::visitNotI(LNotI *ins)
michael@0 1643 {
michael@0 1644 // It is hard to optimize !x, so just do it the basic way for now.
michael@0 1645 masm.ma_cmp(ToRegister(ins->input()), Imm32(0));
michael@0 1646 masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
michael@0 1647 return true;
michael@0 1648 }
michael@0 1649
michael@0 1650 bool
michael@0 1651 CodeGeneratorARM::visitNotD(LNotD *ins)
michael@0 1652 {
michael@0 1653 // Since this operation is not, we want to set a bit if
michael@0 1654 // the double is falsey, which means 0.0, -0.0 or NaN.
michael@0 1655 // when comparing with 0, an input of 0 will set the Z bit (30)
michael@0 1656 // and NaN will set the V bit (28) of the APSR.
michael@0 1657 FloatRegister opd = ToFloatRegister(ins->input());
michael@0 1658 Register dest = ToRegister(ins->output());
michael@0 1659
michael@0 1660 // Do the compare
michael@0 1661 masm.ma_vcmpz(opd);
michael@0 1662 // TODO There are three variations here to compare performance-wise.
michael@0 1663 bool nocond = true;
michael@0 1664 if (nocond) {
michael@0 1665 // Load the value into the dest register
michael@0 1666 masm.as_vmrs(dest);
michael@0 1667 masm.ma_lsr(Imm32(28), dest, dest);
michael@0 1668 masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30
michael@0 1669 masm.ma_and(Imm32(1), dest);
michael@0 1670 } else {
michael@0 1671 masm.as_vmrs(pc);
michael@0 1672 masm.ma_mov(Imm32(0), dest);
michael@0 1673 masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal);
michael@0 1674 masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow);
michael@0 1675 }
michael@0 1676 return true;
michael@0 1677 }
michael@0 1678
michael@0 1679 bool
michael@0 1680 CodeGeneratorARM::visitNotF(LNotF *ins)
michael@0 1681 {
michael@0 1682 // Since this operation is not, we want to set a bit if
michael@0 1683 // the double is falsey, which means 0.0, -0.0 or NaN.
michael@0 1684 // when comparing with 0, an input of 0 will set the Z bit (30)
michael@0 1685 // and NaN will set the V bit (28) of the APSR.
michael@0 1686 FloatRegister opd = ToFloatRegister(ins->input());
michael@0 1687 Register dest = ToRegister(ins->output());
michael@0 1688
michael@0 1689 // Do the compare
michael@0 1690 masm.ma_vcmpz_f32(opd);
michael@0 1691 // TODO There are three variations here to compare performance-wise.
michael@0 1692 bool nocond = true;
michael@0 1693 if (nocond) {
michael@0 1694 // Load the value into the dest register
michael@0 1695 masm.as_vmrs(dest);
michael@0 1696 masm.ma_lsr(Imm32(28), dest, dest);
michael@0 1697 masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30
michael@0 1698 masm.ma_and(Imm32(1), dest);
michael@0 1699 } else {
michael@0 1700 masm.as_vmrs(pc);
michael@0 1701 masm.ma_mov(Imm32(0), dest);
michael@0 1702 masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal);
michael@0 1703 masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow);
michael@0 1704 }
michael@0 1705 return true;
michael@0 1706 }
michael@0 1707
michael@0 1708 bool
michael@0 1709 CodeGeneratorARM::visitLoadSlotV(LLoadSlotV *load)
michael@0 1710 {
michael@0 1711 const ValueOperand out = ToOutValue(load);
michael@0 1712 Register base = ToRegister(load->input());
michael@0 1713 int32_t offset = load->mir()->slot() * sizeof(js::Value);
michael@0 1714
michael@0 1715 masm.loadValue(Address(base, offset), out);
michael@0 1716 return true;
michael@0 1717 }
michael@0 1718
michael@0 1719 bool
michael@0 1720 CodeGeneratorARM::visitLoadSlotT(LLoadSlotT *load)
michael@0 1721 {
michael@0 1722 Register base = ToRegister(load->input());
michael@0 1723 int32_t offset = load->mir()->slot() * sizeof(js::Value);
michael@0 1724
michael@0 1725 if (load->mir()->type() == MIRType_Double)
michael@0 1726 masm.loadInt32OrDouble(Operand(base, offset), ToFloatRegister(load->output()));
michael@0 1727 else
michael@0 1728 masm.ma_ldr(Operand(base, offset + NUNBOX32_PAYLOAD_OFFSET), ToRegister(load->output()));
michael@0 1729 return true;
michael@0 1730 }
michael@0 1731
michael@0 1732 bool
michael@0 1733 CodeGeneratorARM::visitStoreSlotT(LStoreSlotT *store)
michael@0 1734 {
michael@0 1735
michael@0 1736 Register base = ToRegister(store->slots());
michael@0 1737 int32_t offset = store->mir()->slot() * sizeof(js::Value);
michael@0 1738
michael@0 1739 const LAllocation *value = store->value();
michael@0 1740 MIRType valueType = store->mir()->value()->type();
michael@0 1741
michael@0 1742 if (store->mir()->needsBarrier())
michael@0 1743 emitPreBarrier(Address(base, offset), store->mir()->slotType());
michael@0 1744
michael@0 1745 if (valueType == MIRType_Double) {
michael@0 1746 masm.ma_vstr(ToFloatRegister(value), Operand(base, offset));
michael@0 1747 return true;
michael@0 1748 }
michael@0 1749
michael@0 1750 // Store the type tag if needed.
michael@0 1751 if (valueType != store->mir()->slotType())
michael@0 1752 masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), Operand(base, offset));
michael@0 1753
michael@0 1754 // Store the payload.
michael@0 1755 if (value->isConstant())
michael@0 1756 masm.storePayload(*value->toConstant(), Operand(base, offset));
michael@0 1757 else
michael@0 1758 masm.storePayload(ToRegister(value), Operand(base, offset));
michael@0 1759
michael@0 1760 return true;
michael@0 1761 }
michael@0 1762
michael@0 1763 bool
michael@0 1764 CodeGeneratorARM::visitLoadElementT(LLoadElementT *load)
michael@0 1765 {
michael@0 1766 Register base = ToRegister(load->elements());
michael@0 1767 if (load->mir()->type() == MIRType_Double) {
michael@0 1768 FloatRegister fpreg = ToFloatRegister(load->output());
michael@0 1769 if (load->index()->isConstant()) {
michael@0 1770 Address source(base, ToInt32(load->index()) * sizeof(Value));
michael@0 1771 if (load->mir()->loadDoubles())
michael@0 1772 masm.loadDouble(source, fpreg);
michael@0 1773 else
michael@0 1774 masm.loadInt32OrDouble(source, fpreg);
michael@0 1775 } else {
michael@0 1776 Register index = ToRegister(load->index());
michael@0 1777 if (load->mir()->loadDoubles())
michael@0 1778 masm.loadDouble(BaseIndex(base, index, TimesEight), fpreg);
michael@0 1779 else
michael@0 1780 masm.loadInt32OrDouble(base, index, fpreg);
michael@0 1781 }
michael@0 1782 } else {
michael@0 1783 if (load->index()->isConstant()) {
michael@0 1784 Address source(base, ToInt32(load->index()) * sizeof(Value));
michael@0 1785 masm.load32(source, ToRegister(load->output()));
michael@0 1786 } else {
michael@0 1787 masm.ma_ldr(DTRAddr(base, DtrRegImmShift(ToRegister(load->index()), LSL, 3)),
michael@0 1788 ToRegister(load->output()));
michael@0 1789 }
michael@0 1790 }
michael@0 1791 JS_ASSERT(!load->mir()->needsHoleCheck());
michael@0 1792 return true;
michael@0 1793 }
michael@0 1794
michael@0 1795 void
michael@0 1796 CodeGeneratorARM::storeElementTyped(const LAllocation *value, MIRType valueType, MIRType elementType,
michael@0 1797 const Register &elements, const LAllocation *index)
michael@0 1798 {
michael@0 1799 if (index->isConstant()) {
michael@0 1800 Address dest = Address(elements, ToInt32(index) * sizeof(Value));
michael@0 1801 if (valueType == MIRType_Double) {
michael@0 1802 masm.ma_vstr(ToFloatRegister(value), Operand(dest));
michael@0 1803 return;
michael@0 1804 }
michael@0 1805
michael@0 1806 // Store the type tag if needed.
michael@0 1807 if (valueType != elementType)
michael@0 1808 masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
michael@0 1809
michael@0 1810 // Store the payload.
michael@0 1811 if (value->isConstant())
michael@0 1812 masm.storePayload(*value->toConstant(), dest);
michael@0 1813 else
michael@0 1814 masm.storePayload(ToRegister(value), dest);
michael@0 1815 } else {
michael@0 1816 Register indexReg = ToRegister(index);
michael@0 1817 if (valueType == MIRType_Double) {
michael@0 1818 masm.ma_vstr(ToFloatRegister(value), elements, indexReg);
michael@0 1819 return;
michael@0 1820 }
michael@0 1821
michael@0 1822 // Store the type tag if needed.
michael@0 1823 if (valueType != elementType)
michael@0 1824 masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), elements, indexReg);
michael@0 1825
michael@0 1826 // Store the payload.
michael@0 1827 if (value->isConstant())
michael@0 1828 masm.storePayload(*value->toConstant(), elements, indexReg);
michael@0 1829 else
michael@0 1830 masm.storePayload(ToRegister(value), elements, indexReg);
michael@0 1831 }
michael@0 1832 }
michael@0 1833
michael@0 1834 bool
michael@0 1835 CodeGeneratorARM::visitGuardShape(LGuardShape *guard)
michael@0 1836 {
michael@0 1837 Register obj = ToRegister(guard->input());
michael@0 1838 Register tmp = ToRegister(guard->tempInt());
michael@0 1839
michael@0 1840 masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfShape())), tmp);
michael@0 1841 masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->shape()));
michael@0 1842
michael@0 1843 return bailoutIf(Assembler::NotEqual, guard->snapshot());
michael@0 1844 }
michael@0 1845
michael@0 1846 bool
michael@0 1847 CodeGeneratorARM::visitGuardObjectType(LGuardObjectType *guard)
michael@0 1848 {
michael@0 1849 Register obj = ToRegister(guard->input());
michael@0 1850 Register tmp = ToRegister(guard->tempInt());
michael@0 1851
michael@0 1852 masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfType())), tmp);
michael@0 1853 masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->typeObject()));
michael@0 1854
michael@0 1855 Assembler::Condition cond =
michael@0 1856 guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
michael@0 1857 return bailoutIf(cond, guard->snapshot());
michael@0 1858 }
michael@0 1859
michael@0 1860 bool
michael@0 1861 CodeGeneratorARM::visitGuardClass(LGuardClass *guard)
michael@0 1862 {
michael@0 1863 Register obj = ToRegister(guard->input());
michael@0 1864 Register tmp = ToRegister(guard->tempInt());
michael@0 1865
michael@0 1866 masm.loadObjClass(obj, tmp);
michael@0 1867 masm.ma_cmp(tmp, Imm32((uint32_t)guard->mir()->getClass()));
michael@0 1868 if (!bailoutIf(Assembler::NotEqual, guard->snapshot()))
michael@0 1869 return false;
michael@0 1870 return true;
michael@0 1871 }
michael@0 1872
michael@0 1873 bool
michael@0 1874 CodeGeneratorARM::visitImplicitThis(LImplicitThis *lir)
michael@0 1875 {
michael@0 1876 Register callee = ToRegister(lir->callee());
michael@0 1877 const ValueOperand out = ToOutValue(lir);
michael@0 1878
michael@0 1879 // The implicit |this| is always |undefined| if the function's environment
michael@0 1880 // is the current global.
michael@0 1881 masm.ma_ldr(DTRAddr(callee, DtrOffImm(JSFunction::offsetOfEnvironment())), out.typeReg());
michael@0 1882 masm.ma_cmp(out.typeReg(), ImmGCPtr(&gen->info().script()->global()));
michael@0 1883
michael@0 1884 // TODO: OOL stub path.
michael@0 1885 if (!bailoutIf(Assembler::NotEqual, lir->snapshot()))
michael@0 1886 return false;
michael@0 1887
michael@0 1888 masm.moveValue(UndefinedValue(), out);
michael@0 1889 return true;
michael@0 1890 }
michael@0 1891
michael@0 1892 bool
michael@0 1893 CodeGeneratorARM::visitInterruptCheck(LInterruptCheck *lir)
michael@0 1894 {
michael@0 1895 OutOfLineCode *ool = oolCallVM(InterruptCheckInfo, lir, (ArgList()), StoreNothing());
michael@0 1896 if (!ool)
michael@0 1897 return false;
michael@0 1898
michael@0 1899 void *interrupt = (void*)GetIonContext()->runtime->addressOfInterrupt();
michael@0 1900 masm.load32(AbsoluteAddress(interrupt), lr);
michael@0 1901 masm.ma_cmp(lr, Imm32(0));
michael@0 1902 masm.ma_b(ool->entry(), Assembler::NonZero);
michael@0 1903 masm.bind(ool->rejoin());
michael@0 1904 return true;
michael@0 1905 }
michael@0 1906
michael@0 1907 bool
michael@0 1908 CodeGeneratorARM::generateInvalidateEpilogue()
michael@0 1909 {
michael@0 1910 // Ensure that there is enough space in the buffer for the OsiPoint
michael@0 1911 // patching to occur. Otherwise, we could overwrite the invalidation
michael@0 1912 // epilogue.
michael@0 1913 for (size_t i = 0; i < sizeof(void *); i+= Assembler::nopSize())
michael@0 1914 masm.nop();
michael@0 1915
michael@0 1916 masm.bind(&invalidate_);
michael@0 1917
michael@0 1918 // Push the return address of the point that we bailed out at onto the stack
michael@0 1919 masm.Push(lr);
michael@0 1920
michael@0 1921 // Push the Ion script onto the stack (when we determine what that pointer is).
michael@0 1922 invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
michael@0 1923 JitCode *thunk = gen->jitRuntime()->getInvalidationThunk();
michael@0 1924
michael@0 1925 masm.branch(thunk);
michael@0 1926
michael@0 1927 // We should never reach this point in JIT code -- the invalidation thunk should
michael@0 1928 // pop the invalidated JS frame and return directly to its caller.
michael@0 1929 masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
michael@0 1930 return true;
michael@0 1931 }
michael@0 1932
michael@0 1933 void
michael@0 1934 DispatchIonCache::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
michael@0 1935 {
michael@0 1936 // Can always use the scratch register on ARM.
michael@0 1937 addState->dispatchScratch = ScratchRegister;
michael@0 1938 }
michael@0 1939
michael@0 1940 template <class U>
michael@0 1941 Register
michael@0 1942 getBase(U *mir)
michael@0 1943 {
michael@0 1944 switch (mir->base()) {
michael@0 1945 case U::Heap: return HeapReg;
michael@0 1946 case U::Global: return GlobalReg;
michael@0 1947 }
michael@0 1948 return InvalidReg;
michael@0 1949 }
michael@0 1950
michael@0 1951 bool
michael@0 1952 CodeGeneratorARM::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins)
michael@0 1953 {
michael@0 1954 MOZ_ASSUME_UNREACHABLE("NYI");
michael@0 1955 }
michael@0 1956
michael@0 1957 bool
michael@0 1958 CodeGeneratorARM::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins)
michael@0 1959 {
michael@0 1960 MOZ_ASSUME_UNREACHABLE("NYI");
michael@0 1961 }
michael@0 1962
michael@0 1963 bool
michael@0 1964 CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
michael@0 1965 {
michael@0 1966 const MAsmJSLoadHeap *mir = ins->mir();
michael@0 1967 bool isSigned;
michael@0 1968 int size;
michael@0 1969 bool isFloat = false;
michael@0 1970 switch (mir->viewType()) {
michael@0 1971 case ArrayBufferView::TYPE_INT8: isSigned = true; size = 8; break;
michael@0 1972 case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break;
michael@0 1973 case ArrayBufferView::TYPE_INT16: isSigned = true; size = 16; break;
michael@0 1974 case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break;
michael@0 1975 case ArrayBufferView::TYPE_INT32:
michael@0 1976 case ArrayBufferView::TYPE_UINT32: isSigned = true; size = 32; break;
michael@0 1977 case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break;
michael@0 1978 case ArrayBufferView::TYPE_FLOAT32: isFloat = true; size = 32; break;
michael@0 1979 default: MOZ_ASSUME_UNREACHABLE("unexpected array type");
michael@0 1980 }
michael@0 1981
michael@0 1982 const LAllocation *ptr = ins->ptr();
michael@0 1983
michael@0 1984 if (ptr->isConstant()) {
michael@0 1985 JS_ASSERT(mir->skipBoundsCheck());
michael@0 1986 int32_t ptrImm = ptr->toConstant()->toInt32();
michael@0 1987 JS_ASSERT(ptrImm >= 0);
michael@0 1988 if (isFloat) {
michael@0 1989 VFPRegister vd(ToFloatRegister(ins->output()));
michael@0 1990 if (size == 32)
michael@0 1991 masm.ma_vldr(Operand(HeapReg, ptrImm), vd.singleOverlay(), Assembler::Always);
michael@0 1992 else
michael@0 1993 masm.ma_vldr(Operand(HeapReg, ptrImm), vd, Assembler::Always);
michael@0 1994 } else {
michael@0 1995 masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm),
michael@0 1996 ToRegister(ins->output()), Offset, Assembler::Always);
michael@0 1997 }
michael@0 1998 return true;
michael@0 1999 }
michael@0 2000
michael@0 2001 Register ptrReg = ToRegister(ptr);
michael@0 2002
michael@0 2003 if (mir->skipBoundsCheck()) {
michael@0 2004 if (isFloat) {
michael@0 2005 VFPRegister vd(ToFloatRegister(ins->output()));
michael@0 2006 if (size == 32)
michael@0 2007 masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Always);
michael@0 2008 else
michael@0 2009 masm.ma_vldr(vd, HeapReg, ptrReg, 0, Assembler::Always);
michael@0 2010 } else {
michael@0 2011 masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg,
michael@0 2012 ToRegister(ins->output()), Offset, Assembler::Always);
michael@0 2013 }
michael@0 2014 return true;
michael@0 2015 }
michael@0 2016
michael@0 2017 BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
michael@0 2018 if (isFloat) {
michael@0 2019 FloatRegister dst = ToFloatRegister(ins->output());
michael@0 2020 VFPRegister vd(dst);
michael@0 2021 if (size == 32) {
michael@0 2022 masm.convertDoubleToFloat32(NANReg, dst, Assembler::AboveOrEqual);
michael@0 2023 masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Below);
michael@0 2024 } else {
michael@0 2025 masm.ma_vmov(NANReg, dst, Assembler::AboveOrEqual);
michael@0 2026 masm.ma_vldr(vd, HeapReg, ptrReg, 0, Assembler::Below);
michael@0 2027 }
michael@0 2028 } else {
michael@0 2029 Register d = ToRegister(ins->output());
michael@0 2030 masm.ma_mov(Imm32(0), d, NoSetCond, Assembler::AboveOrEqual);
michael@0 2031 masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, d, Offset, Assembler::Below);
michael@0 2032 }
michael@0 2033 return masm.append(AsmJSHeapAccess(bo.getOffset()));
michael@0 2034 }
michael@0 2035
michael@0 2036 bool
michael@0 2037 CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
michael@0 2038 {
michael@0 2039 const MAsmJSStoreHeap *mir = ins->mir();
michael@0 2040 bool isSigned;
michael@0 2041 int size;
michael@0 2042 bool isFloat = false;
michael@0 2043 switch (mir->viewType()) {
michael@0 2044 case ArrayBufferView::TYPE_INT8:
michael@0 2045 case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break;
michael@0 2046 case ArrayBufferView::TYPE_INT16:
michael@0 2047 case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break;
michael@0 2048 case ArrayBufferView::TYPE_INT32:
michael@0 2049 case ArrayBufferView::TYPE_UINT32: isSigned = true; size = 32; break;
michael@0 2050 case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break;
michael@0 2051 case ArrayBufferView::TYPE_FLOAT32: isFloat = true; size = 32; break;
michael@0 2052 default: MOZ_ASSUME_UNREACHABLE("unexpected array type");
michael@0 2053 }
michael@0 2054 const LAllocation *ptr = ins->ptr();
michael@0 2055 if (ptr->isConstant()) {
michael@0 2056 JS_ASSERT(mir->skipBoundsCheck());
michael@0 2057 int32_t ptrImm = ptr->toConstant()->toInt32();
michael@0 2058 JS_ASSERT(ptrImm >= 0);
michael@0 2059 if (isFloat) {
michael@0 2060 VFPRegister vd(ToFloatRegister(ins->value()));
michael@0 2061 if (size == 32)
michael@0 2062 masm.ma_vstr(vd.singleOverlay(), Operand(HeapReg, ptrImm), Assembler::Always);
michael@0 2063 else
michael@0 2064 masm.ma_vstr(vd, Operand(HeapReg, ptrImm), Assembler::Always);
michael@0 2065 } else {
michael@0 2066 masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm),
michael@0 2067 ToRegister(ins->value()), Offset, Assembler::Always);
michael@0 2068 }
michael@0 2069 return true;
michael@0 2070 }
michael@0 2071
michael@0 2072 Register ptrReg = ToRegister(ptr);
michael@0 2073
michael@0 2074 if (mir->skipBoundsCheck()) {
michael@0 2075 Register ptrReg = ToRegister(ptr);
michael@0 2076 if (isFloat) {
michael@0 2077 VFPRegister vd(ToFloatRegister(ins->value()));
michael@0 2078 if (size == 32)
michael@0 2079 masm.ma_vstr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Always);
michael@0 2080 else
michael@0 2081 masm.ma_vstr(vd, HeapReg, ptrReg, 0, Assembler::Always);
michael@0 2082 } else {
michael@0 2083 masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg,
michael@0 2084 ToRegister(ins->value()), Offset, Assembler::Always);
michael@0 2085 }
michael@0 2086 return true;
michael@0 2087 }
michael@0 2088
michael@0 2089 BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
michael@0 2090 if (isFloat) {
michael@0 2091 VFPRegister vd(ToFloatRegister(ins->value()));
michael@0 2092 if (size == 32)
michael@0 2093 masm.ma_vstr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Below);
michael@0 2094 else
michael@0 2095 masm.ma_vstr(vd, HeapReg, ptrReg, 0, Assembler::Below);
michael@0 2096 } else {
michael@0 2097 masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg,
michael@0 2098 ToRegister(ins->value()), Offset, Assembler::Below);
michael@0 2099 }
michael@0 2100 return masm.append(AsmJSHeapAccess(bo.getOffset()));
michael@0 2101 }
michael@0 2102
michael@0 2103 bool
michael@0 2104 CodeGeneratorARM::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins)
michael@0 2105 {
michael@0 2106 const MAsmJSPassStackArg *mir = ins->mir();
michael@0 2107 Operand dst(StackPointer, mir->spOffset());
michael@0 2108 if (ins->arg()->isConstant()) {
michael@0 2109 //masm.as_bkpt();
michael@0 2110 masm.ma_storeImm(Imm32(ToInt32(ins->arg())), dst);
michael@0 2111 } else {
michael@0 2112 if (ins->arg()->isGeneralReg())
michael@0 2113 masm.ma_str(ToRegister(ins->arg()), dst);
michael@0 2114 else
michael@0 2115 masm.ma_vstr(ToFloatRegister(ins->arg()), dst);
michael@0 2116 }
michael@0 2117
michael@0 2118 return true;
michael@0 2119 }
michael@0 2120
michael@0 2121 bool
michael@0 2122 CodeGeneratorARM::visitUDiv(LUDiv *ins)
michael@0 2123 {
michael@0 2124 Register lhs = ToRegister(ins->lhs());
michael@0 2125 Register rhs = ToRegister(ins->rhs());
michael@0 2126 Register output = ToRegister(ins->output());
michael@0 2127
michael@0 2128 Label done;
michael@0 2129 if (ins->mir()->canBeDivideByZero()) {
michael@0 2130 masm.ma_cmp(rhs, Imm32(0));
michael@0 2131 if (ins->mir()->isTruncated()) {
michael@0 2132 // Infinity|0 == 0
michael@0 2133 Label skip;
michael@0 2134 masm.ma_b(&skip, Assembler::NotEqual);
michael@0 2135 masm.ma_mov(Imm32(0), output);
michael@0 2136 masm.ma_b(&done);
michael@0 2137 masm.bind(&skip);
michael@0 2138 } else {
michael@0 2139 JS_ASSERT(ins->mir()->fallible());
michael@0 2140 if (!bailoutIf(Assembler::Equal, ins->snapshot()))
michael@0 2141 return false;
michael@0 2142 }
michael@0 2143 }
michael@0 2144
michael@0 2145 masm.ma_udiv(lhs, rhs, output);
michael@0 2146
michael@0 2147 if (!ins->mir()->isTruncated()) {
michael@0 2148 masm.ma_cmp(output, Imm32(0));
michael@0 2149 if (!bailoutIf(Assembler::LessThan, ins->snapshot()))
michael@0 2150 return false;
michael@0 2151 }
michael@0 2152
michael@0 2153 masm.bind(&done);
michael@0 2154 return true;
michael@0 2155 }
michael@0 2156
michael@0 2157 bool
michael@0 2158 CodeGeneratorARM::visitUMod(LUMod *ins)
michael@0 2159 {
michael@0 2160 Register lhs = ToRegister(ins->lhs());
michael@0 2161 Register rhs = ToRegister(ins->rhs());
michael@0 2162 Register output = ToRegister(ins->output());
michael@0 2163 Label done;
michael@0 2164
michael@0 2165 if (ins->mir()->canBeDivideByZero()) {
michael@0 2166 masm.ma_cmp(rhs, Imm32(0));
michael@0 2167 if (ins->mir()->isTruncated()) {
michael@0 2168 // Infinity|0 == 0
michael@0 2169 Label skip;
michael@0 2170 masm.ma_b(&skip, Assembler::NotEqual);
michael@0 2171 masm.ma_mov(Imm32(0), output);
michael@0 2172 masm.ma_b(&done);
michael@0 2173 masm.bind(&skip);
michael@0 2174 } else {
michael@0 2175 JS_ASSERT(ins->mir()->fallible());
michael@0 2176 if (!bailoutIf(Assembler::Equal, ins->snapshot()))
michael@0 2177 return false;
michael@0 2178 }
michael@0 2179 }
michael@0 2180
michael@0 2181 masm.ma_umod(lhs, rhs, output);
michael@0 2182
michael@0 2183 if (!ins->mir()->isTruncated()) {
michael@0 2184 masm.ma_cmp(output, Imm32(0));
michael@0 2185 if (!bailoutIf(Assembler::LessThan, ins->snapshot()))
michael@0 2186 return false;
michael@0 2187 }
michael@0 2188
michael@0 2189 masm.bind(&done);
michael@0 2190 return true;
michael@0 2191 }
michael@0 2192
michael@0 2193 bool
michael@0 2194 CodeGeneratorARM::visitSoftUDivOrMod(LSoftUDivOrMod *ins)
michael@0 2195 {
michael@0 2196 Register lhs = ToRegister(ins->lhs());
michael@0 2197 Register rhs = ToRegister(ins->rhs());
michael@0 2198 Register output = ToRegister(ins->output());
michael@0 2199
michael@0 2200 JS_ASSERT(lhs == r0);
michael@0 2201 JS_ASSERT(rhs == r1);
michael@0 2202 JS_ASSERT(ins->mirRaw()->isDiv() || ins->mirRaw()->isMod());
michael@0 2203 JS_ASSERT_IF(ins->mirRaw()->isDiv(), output == r0);
michael@0 2204 JS_ASSERT_IF(ins->mirRaw()->isMod(), output == r1);
michael@0 2205
michael@0 2206 Label afterDiv;
michael@0 2207
michael@0 2208 masm.ma_cmp(rhs, Imm32(0));
michael@0 2209 Label notzero;
michael@0 2210 masm.ma_b(&notzero, Assembler::NonZero);
michael@0 2211 masm.ma_mov(Imm32(0), output);
michael@0 2212 masm.ma_b(&afterDiv);
michael@0 2213 masm.bind(&notzero);
michael@0 2214
michael@0 2215 masm.setupAlignedABICall(2);
michael@0 2216 masm.passABIArg(lhs);
michael@0 2217 masm.passABIArg(rhs);
michael@0 2218 if (gen->compilingAsmJS())
michael@0 2219 masm.callWithABI(AsmJSImm_aeabi_uidivmod);
michael@0 2220 else
michael@0 2221 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_uidivmod));
michael@0 2222
michael@0 2223 masm.bind(&afterDiv);
michael@0 2224 return true;
michael@0 2225 }
michael@0 2226
michael@0 2227 bool
michael@0 2228 CodeGeneratorARM::visitEffectiveAddress(LEffectiveAddress *ins)
michael@0 2229 {
michael@0 2230 const MEffectiveAddress *mir = ins->mir();
michael@0 2231 Register base = ToRegister(ins->base());
michael@0 2232 Register index = ToRegister(ins->index());
michael@0 2233 Register output = ToRegister(ins->output());
michael@0 2234 masm.as_add(output, base, lsl(index, mir->scale()));
michael@0 2235 masm.ma_add(Imm32(mir->displacement()), output);
michael@0 2236 return true;
michael@0 2237 }
michael@0 2238
michael@0 2239 bool
michael@0 2240 CodeGeneratorARM::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
michael@0 2241 {
michael@0 2242 const MAsmJSLoadGlobalVar *mir = ins->mir();
michael@0 2243 unsigned addr = mir->globalDataOffset();
michael@0 2244 if (mir->type() == MIRType_Int32) {
michael@0 2245 masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr), ToRegister(ins->output()));
michael@0 2246 } else if (mir->type() == MIRType_Float32) {
michael@0 2247 VFPRegister vd(ToFloatRegister(ins->output()));
michael@0 2248 masm.ma_vldr(Operand(GlobalReg, addr), vd.singleOverlay());
michael@0 2249 } else {
michael@0 2250 masm.ma_vldr(Operand(GlobalReg, addr), ToFloatRegister(ins->output()));
michael@0 2251 }
michael@0 2252 return true;
michael@0 2253 }
michael@0 2254
michael@0 2255 bool
michael@0 2256 CodeGeneratorARM::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins)
michael@0 2257 {
michael@0 2258 const MAsmJSStoreGlobalVar *mir = ins->mir();
michael@0 2259
michael@0 2260 MIRType type = mir->value()->type();
michael@0 2261 JS_ASSERT(IsNumberType(type));
michael@0 2262 unsigned addr = mir->globalDataOffset();
michael@0 2263 if (mir->value()->type() == MIRType_Int32) {
michael@0 2264 masm.ma_dtr(IsStore, GlobalReg, Imm32(addr), ToRegister(ins->value()));
michael@0 2265 } else if (mir->value()->type() == MIRType_Float32) {
michael@0 2266 VFPRegister vd(ToFloatRegister(ins->value()));
michael@0 2267 masm.ma_vstr(vd.singleOverlay(), Operand(GlobalReg, addr));
michael@0 2268 } else {
michael@0 2269 masm.ma_vstr(ToFloatRegister(ins->value()), Operand(GlobalReg, addr));
michael@0 2270 }
michael@0 2271 return true;
michael@0 2272 }
michael@0 2273
michael@0 2274 bool
michael@0 2275 CodeGeneratorARM::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins)
michael@0 2276 {
michael@0 2277 const MAsmJSLoadFuncPtr *mir = ins->mir();
michael@0 2278
michael@0 2279 Register index = ToRegister(ins->index());
michael@0 2280 Register tmp = ToRegister(ins->temp());
michael@0 2281 Register out = ToRegister(ins->output());
michael@0 2282 unsigned addr = mir->globalDataOffset();
michael@0 2283 masm.ma_mov(Imm32(addr), tmp);
michael@0 2284 masm.as_add(tmp, tmp, lsl(index, 2));
michael@0 2285 masm.ma_ldr(DTRAddr(GlobalReg, DtrRegImmShift(tmp, LSL, 0)), out);
michael@0 2286
michael@0 2287 return true;
michael@0 2288 }
michael@0 2289
michael@0 2290 bool
michael@0 2291 CodeGeneratorARM::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins)
michael@0 2292 {
michael@0 2293 const MAsmJSLoadFFIFunc *mir = ins->mir();
michael@0 2294
michael@0 2295 masm.ma_ldr(Operand(GlobalReg, mir->globalDataOffset()), ToRegister(ins->output()));
michael@0 2296
michael@0 2297 return true;
michael@0 2298 }
michael@0 2299
michael@0 2300 bool
michael@0 2301 CodeGeneratorARM::visitNegI(LNegI *ins)
michael@0 2302 {
michael@0 2303 Register input = ToRegister(ins->input());
michael@0 2304 masm.ma_neg(input, ToRegister(ins->output()));
michael@0 2305 return true;
michael@0 2306 }
michael@0 2307
michael@0 2308 bool
michael@0 2309 CodeGeneratorARM::visitNegD(LNegD *ins)
michael@0 2310 {
michael@0 2311 FloatRegister input = ToFloatRegister(ins->input());
michael@0 2312 masm.ma_vneg(input, ToFloatRegister(ins->output()));
michael@0 2313 return true;
michael@0 2314 }
michael@0 2315
michael@0 2316 bool
michael@0 2317 CodeGeneratorARM::visitNegF(LNegF *ins)
michael@0 2318 {
michael@0 2319 FloatRegister input = ToFloatRegister(ins->input());
michael@0 2320 masm.ma_vneg_f32(input, ToFloatRegister(ins->output()));
michael@0 2321 return true;
michael@0 2322 }
michael@0 2323
michael@0 2324 bool
michael@0 2325 CodeGeneratorARM::visitForkJoinGetSlice(LForkJoinGetSlice *ins)
michael@0 2326 {
michael@0 2327 MOZ_ASSUME_UNREACHABLE("NYI");
michael@0 2328 }
michael@0 2329
michael@0 2330 JitCode *
michael@0 2331 JitRuntime::generateForkJoinGetSliceStub(JSContext *cx)
michael@0 2332 {
michael@0 2333 MOZ_ASSUME_UNREACHABLE("NYI");
michael@0 2334 }

mercurial