js/src/jit/arm/Lowering-arm.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

     1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     2  * vim: set ts=8 sts=4 et sw=4 tw=99:
     3  * This Source Code Form is subject to the terms of the Mozilla Public
     4  * License, v. 2.0. If a copy of the MPL was not distributed with this
     5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     7 #include "mozilla/MathAlgorithms.h"
     9 #include "jit/arm/Assembler-arm.h"
    10 #include "jit/Lowering.h"
    11 #include "jit/MIR.h"
    13 #include "jit/shared/Lowering-shared-inl.h"
    15 using namespace js;
    16 using namespace js::jit;
    18 using mozilla::FloorLog2;
    20 bool
    21 LIRGeneratorARM::useBox(LInstruction *lir, size_t n, MDefinition *mir,
    22                         LUse::Policy policy, bool useAtStart)
    23 {
    24     JS_ASSERT(mir->type() == MIRType_Value);
    25     if (!ensureDefined(mir))
    26         return false;
    27     lir->setOperand(n, LUse(mir->virtualRegister(), policy, useAtStart));
    28     lir->setOperand(n + 1, LUse(VirtualRegisterOfPayload(mir), policy, useAtStart));
    29     return true;
    30 }
    32 bool
    33 LIRGeneratorARM::useBoxFixed(LInstruction *lir, size_t n, MDefinition *mir, Register reg1,
    34                              Register reg2)
    35 {
    36     JS_ASSERT(mir->type() == MIRType_Value);
    37     JS_ASSERT(reg1 != reg2);
    39     if (!ensureDefined(mir))
    40         return false;
    41     lir->setOperand(n, LUse(reg1, mir->virtualRegister()));
    42     lir->setOperand(n + 1, LUse(reg2, VirtualRegisterOfPayload(mir)));
    43     return true;
    44 }
    46 LAllocation
    47 LIRGeneratorARM::useByteOpRegister(MDefinition *mir)
    48 {
    49     return useRegister(mir);
    50 }
    52 LAllocation
    53 LIRGeneratorARM::useByteOpRegisterOrNonDoubleConstant(MDefinition *mir)
    54 {
    55     return useRegisterOrNonDoubleConstant(mir);
    56 }
    58 bool
    59 LIRGeneratorARM::lowerConstantDouble(double d, MInstruction *mir)
    60 {
    61     return define(new(alloc()) LDouble(d), mir);
    62 }
    64 bool
    65 LIRGeneratorARM::lowerConstantFloat32(float d, MInstruction *mir)
    66 {
    67     return define(new(alloc()) LFloat32(d), mir);
    68 }
    70 bool
    71 LIRGeneratorARM::visitConstant(MConstant *ins)
    72 {
    73     if (ins->type() == MIRType_Double)
    74         return lowerConstantDouble(ins->value().toDouble(), ins);
    76     if (ins->type() == MIRType_Float32)
    77         return lowerConstantFloat32(ins->value().toDouble(), ins);
    79     // Emit non-double constants at their uses.
    80     if (ins->canEmitAtUses())
    81         return emitAtUses(ins);
    83     return LIRGeneratorShared::visitConstant(ins);
    84 }
    86 bool
    87 LIRGeneratorARM::visitBox(MBox *box)
    88 {
    89     MDefinition *inner = box->getOperand(0);
    91     // If the box wrapped a double, it needs a new register.
    92     if (IsFloatingPointType(inner->type()))
    93         return defineBox(new(alloc()) LBoxFloatingPoint(useRegisterAtStart(inner), tempCopy(inner, 0),
    94                                                         inner->type()), box);
    96     if (box->canEmitAtUses())
    97         return emitAtUses(box);
    99     if (inner->isConstant())
   100         return defineBox(new(alloc()) LValue(inner->toConstant()->value()), box);
   102     LBox *lir = new(alloc()) LBox(use(inner), inner->type());
   104     // Otherwise, we should not define a new register for the payload portion
   105     // of the output, so bypass defineBox().
   106     uint32_t vreg = getVirtualRegister();
   107     if (vreg >= MAX_VIRTUAL_REGISTERS)
   108         return false;
   110     // Note that because we're using PASSTHROUGH, we do not change the type of
   111     // the definition. We also do not define the first output as "TYPE",
   112     // because it has no corresponding payload at (vreg + 1). Also note that
   113     // although we copy the input's original type for the payload half of the
   114     // definition, this is only for clarity. PASSTHROUGH definitions are
   115     // ignored.
   116     lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
   117     lir->setDef(1, LDefinition(inner->virtualRegister(), LDefinition::TypeFrom(inner->type()),
   118                                LDefinition::PASSTHROUGH));
   119     box->setVirtualRegister(vreg);
   120     return add(lir);
   121 }
   123 bool
   124 LIRGeneratorARM::visitUnbox(MUnbox *unbox)
   125 {
   126     // An unbox on arm reads in a type tag (either in memory or a register) and
   127     // a payload. Unlike most instructions conusming a box, we ask for the type
   128     // second, so that the result can re-use the first input.
   129     MDefinition *inner = unbox->getOperand(0);
   131     if (!ensureDefined(inner))
   132         return false;
   134     if (IsFloatingPointType(unbox->type())) {
   135         LUnboxFloatingPoint *lir = new(alloc()) LUnboxFloatingPoint(unbox->type());
   136         if (unbox->fallible() && !assignSnapshot(lir, unbox->bailoutKind()))
   137             return false;
   138         if (!useBox(lir, LUnboxFloatingPoint::Input, inner))
   139             return false;
   140         return define(lir, unbox);
   141     }
   143     // Swap the order we use the box pieces so we can re-use the payload register.
   144     LUnbox *lir = new(alloc()) LUnbox;
   145     lir->setOperand(0, usePayloadInRegisterAtStart(inner));
   146     lir->setOperand(1, useType(inner, LUse::REGISTER));
   148     if (unbox->fallible() && !assignSnapshot(lir, unbox->bailoutKind()))
   149         return false;
   151     // Note that PASSTHROUGH here is illegal, since types and payloads form two
   152     // separate intervals. If the type becomes dead before the payload, it
   153     // could be used as a Value without the type being recoverable. Unbox's
   154     // purpose is to eagerly kill the definition of a type tag, so keeping both
   155     // alive (for the purpose of gcmaps) is unappealing. Instead, we create a
   156     // new virtual register.
   157     return defineReuseInput(lir, unbox, 0);
   158 }
   160 bool
   161 LIRGeneratorARM::visitReturn(MReturn *ret)
   162 {
   163     MDefinition *opd = ret->getOperand(0);
   164     JS_ASSERT(opd->type() == MIRType_Value);
   166     LReturn *ins = new(alloc()) LReturn;
   167     ins->setOperand(0, LUse(JSReturnReg_Type));
   168     ins->setOperand(1, LUse(JSReturnReg_Data));
   169     return fillBoxUses(ins, 0, opd) && add(ins);
   170 }
   172 // x = !y
   173 bool
   174 LIRGeneratorARM::lowerForALU(LInstructionHelper<1, 1, 0> *ins, MDefinition *mir, MDefinition *input)
   175 {
   176     ins->setOperand(0, useRegister(input));
   177     return define(ins, mir,
   178                   LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::DEFAULT));
   179 }
   181 // z = x+y
   182 bool
   183 LIRGeneratorARM::lowerForALU(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir, MDefinition *lhs, MDefinition *rhs)
   184 {
   185     ins->setOperand(0, useRegister(lhs));
   186     ins->setOperand(1, useRegisterOrConstant(rhs));
   187     return define(ins, mir,
   188                   LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::DEFAULT));
   189 }
   191 bool
   192 LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 1, 0> *ins, MDefinition *mir, MDefinition *input)
   193 {
   194     ins->setOperand(0, useRegister(input));
   195     return define(ins, mir,
   196                   LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::DEFAULT));
   198 }
   200 bool
   201 LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir, MDefinition *lhs, MDefinition *rhs)
   202 {
   203     ins->setOperand(0, useRegister(lhs));
   204     ins->setOperand(1, useRegister(rhs));
   205     return define(ins, mir,
   206                   LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::DEFAULT));
   207 }
   209 bool
   210 LIRGeneratorARM::lowerForBitAndAndBranch(LBitAndAndBranch *baab, MInstruction *mir,
   211                                          MDefinition *lhs, MDefinition *rhs)
   212 {
   213     baab->setOperand(0, useRegisterAtStart(lhs));
   214     baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
   215     return add(baab, mir);
   216 }
   218 bool
   219 LIRGeneratorARM::defineUntypedPhi(MPhi *phi, size_t lirIndex)
   220 {
   221     LPhi *type = current->getPhi(lirIndex + VREG_TYPE_OFFSET);
   222     LPhi *payload = current->getPhi(lirIndex + VREG_DATA_OFFSET);
   224     uint32_t typeVreg = getVirtualRegister();
   225     if (typeVreg >= MAX_VIRTUAL_REGISTERS)
   226         return false;
   228     phi->setVirtualRegister(typeVreg);
   230     uint32_t payloadVreg = getVirtualRegister();
   231     if (payloadVreg >= MAX_VIRTUAL_REGISTERS)
   232         return false;
   233     JS_ASSERT(typeVreg + 1 == payloadVreg);
   235     type->setDef(0, LDefinition(typeVreg, LDefinition::TYPE));
   236     payload->setDef(0, LDefinition(payloadVreg, LDefinition::PAYLOAD));
   237     annotate(type);
   238     annotate(payload);
   239     return true;
   240 }
   242 void
   243 LIRGeneratorARM::lowerUntypedPhiInput(MPhi *phi, uint32_t inputPosition, LBlock *block, size_t lirIndex)
   244 {
   245     // oh god, what is this code?
   246     MDefinition *operand = phi->getOperand(inputPosition);
   247     LPhi *type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
   248     LPhi *payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
   249     type->setOperand(inputPosition, LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
   250     payload->setOperand(inputPosition, LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
   251 }
   253 bool
   254 LIRGeneratorARM::lowerForShift(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir, MDefinition *lhs, MDefinition *rhs)
   255 {
   257     ins->setOperand(0, useRegister(lhs));
   258     ins->setOperand(1, useRegisterOrConstant(rhs));
   259     return define(ins, mir);
   260 }
   262 bool
   263 LIRGeneratorARM::lowerDivI(MDiv *div)
   264 {
   265     if (div->isUnsigned())
   266         return lowerUDiv(div);
   268     // Division instructions are slow. Division by constant denominators can be
   269     // rewritten to use other instructions.
   270     if (div->rhs()->isConstant()) {
   271         int32_t rhs = div->rhs()->toConstant()->value().toInt32();
   272         // Check for division by a positive power of two, which is an easy and
   273         // important case to optimize. Note that other optimizations are also
   274         // possible; division by negative powers of two can be optimized in a
   275         // similar manner as positive powers of two, and division by other
   276         // constants can be optimized by a reciprocal multiplication technique.
   277         int32_t shift = FloorLog2(rhs);
   278         if (rhs > 0 && 1 << shift == rhs) {
   279             LDivPowTwoI *lir = new(alloc()) LDivPowTwoI(useRegisterAtStart(div->lhs()), shift);
   280             if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
   281                 return false;
   282             return define(lir, div);
   283         }
   284     }
   286     if (hasIDIV()) {
   287         LDivI *lir = new(alloc()) LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
   288         if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
   289             return false;
   290         return define(lir, div);
   291     }
   293     LSoftDivI *lir = new(alloc()) LSoftDivI(useFixedAtStart(div->lhs(), r0), useFixedAtStart(div->rhs(), r1),
   294                                             tempFixed(r1), tempFixed(r2), tempFixed(r3));
   295     if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
   296         return false;
   297     return defineFixed(lir, div, LAllocation(AnyRegister(r0)));
   298 }
   300 bool
   301 LIRGeneratorARM::lowerMulI(MMul *mul, MDefinition *lhs, MDefinition *rhs)
   302 {
   303     LMulI *lir = new(alloc()) LMulI;
   304     if (mul->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
   305         return false;
   306     return lowerForALU(lir, mul, lhs, rhs);
   307 }
   309 bool
   310 LIRGeneratorARM::lowerModI(MMod *mod)
   311 {
   312     if (mod->isUnsigned())
   313         return lowerUMod(mod);
   315     if (mod->rhs()->isConstant()) {
   316         int32_t rhs = mod->rhs()->toConstant()->value().toInt32();
   317         int32_t shift = FloorLog2(rhs);
   318         if (rhs > 0 && 1 << shift == rhs) {
   319             LModPowTwoI *lir = new(alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
   320             if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
   321                 return false;
   322             return define(lir, mod);
   323         } else if (shift < 31 && (1 << (shift+1)) - 1 == rhs) {
   324             LModMaskI *lir = new(alloc()) LModMaskI(useRegister(mod->lhs()), temp(), temp(), shift+1);
   325             if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
   326                 return false;
   327             return define(lir, mod);
   328         }
   329     }
   331     if (hasIDIV()) {
   332         LModI *lir = new(alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
   333         if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
   334             return false;
   335         return define(lir, mod);
   336     }
   338     LSoftModI *lir = new(alloc()) LSoftModI(useFixedAtStart(mod->lhs(), r0), useFixedAtStart(mod->rhs(), r1),
   339                                             tempFixed(r0), tempFixed(r2), tempFixed(r3),
   340                                             temp(LDefinition::GENERAL));
   341     if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
   342         return false;
   343     return defineFixed(lir, mod, LAllocation(AnyRegister(r1)));
   344 }
   346 bool
   347 LIRGeneratorARM::visitPowHalf(MPowHalf *ins)
   348 {
   349     MDefinition *input = ins->input();
   350     JS_ASSERT(input->type() == MIRType_Double);
   351     LPowHalfD *lir = new(alloc()) LPowHalfD(useRegisterAtStart(input));
   352     return defineReuseInput(lir, ins, 0);
   353 }
   355 LTableSwitch *
   356 LIRGeneratorARM::newLTableSwitch(const LAllocation &in, const LDefinition &inputCopy,
   357                                        MTableSwitch *tableswitch)
   358 {
   359     return new(alloc()) LTableSwitch(in, inputCopy, tableswitch);
   360 }
   362 LTableSwitchV *
   363 LIRGeneratorARM::newLTableSwitchV(MTableSwitch *tableswitch)
   364 {
   365     return new(alloc()) LTableSwitchV(temp(), tempDouble(), tableswitch);
   366 }
   368 bool
   369 LIRGeneratorARM::visitGuardShape(MGuardShape *ins)
   370 {
   371     JS_ASSERT(ins->obj()->type() == MIRType_Object);
   373     LDefinition tempObj = temp(LDefinition::OBJECT);
   374     LGuardShape *guard = new(alloc()) LGuardShape(useRegister(ins->obj()), tempObj);
   375     if (!assignSnapshot(guard, ins->bailoutKind()))
   376         return false;
   377     if (!add(guard, ins))
   378         return false;
   379     return redefine(ins, ins->obj());
   380 }
   382 bool
   383 LIRGeneratorARM::visitGuardObjectType(MGuardObjectType *ins)
   384 {
   385     JS_ASSERT(ins->obj()->type() == MIRType_Object);
   387     LDefinition tempObj = temp(LDefinition::OBJECT);
   388     LGuardObjectType *guard = new(alloc()) LGuardObjectType(useRegister(ins->obj()), tempObj);
   389     if (!assignSnapshot(guard))
   390         return false;
   391     if (!add(guard, ins))
   392         return false;
   393     return redefine(ins, ins->obj());
   394 }
   396 bool
   397 LIRGeneratorARM::lowerUrshD(MUrsh *mir)
   398 {
   399     MDefinition *lhs = mir->lhs();
   400     MDefinition *rhs = mir->rhs();
   402     JS_ASSERT(lhs->type() == MIRType_Int32);
   403     JS_ASSERT(rhs->type() == MIRType_Int32);
   405     LUrshD *lir = new(alloc()) LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
   406     return define(lir, mir);
   407 }
   409 bool
   410 LIRGeneratorARM::visitAsmJSNeg(MAsmJSNeg *ins)
   411 {
   412     if (ins->type() == MIRType_Int32)
   413         return define(new(alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
   415     if(ins->type() == MIRType_Float32)
   416         return define(new(alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
   418     JS_ASSERT(ins->type() == MIRType_Double);
   419     return define(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
   420 }
   422 bool
   423 LIRGeneratorARM::lowerUDiv(MDiv *div)
   424 {
   425     MDefinition *lhs = div->getOperand(0);
   426     MDefinition *rhs = div->getOperand(1);
   428     if (hasIDIV()) {
   429         LUDiv *lir = new(alloc()) LUDiv;
   430         lir->setOperand(0, useRegister(lhs));
   431         lir->setOperand(1, useRegister(rhs));
   432         if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
   433             return false;
   434         return define(lir, div);
   435     } else {
   436         LSoftUDivOrMod *lir = new(alloc()) LSoftUDivOrMod(useFixedAtStart(lhs, r0), useFixedAtStart(rhs, r1),
   437                                                           tempFixed(r1), tempFixed(r2), tempFixed(r3));
   438         if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
   439             return false;
   440         return defineFixed(lir, div, LAllocation(AnyRegister(r0)));
   441     }
   442 }
   444 bool
   445 LIRGeneratorARM::lowerUMod(MMod *mod)
   446 {
   447     MDefinition *lhs = mod->getOperand(0);
   448     MDefinition *rhs = mod->getOperand(1);
   450     if (hasIDIV()) {
   451         LUMod *lir = new(alloc()) LUMod;
   452         lir->setOperand(0, useRegister(lhs));
   453         lir->setOperand(1, useRegister(rhs));
   454         if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
   455             return false;
   456         return define(lir, mod);
   457     } else {
   458         LSoftUDivOrMod *lir = new(alloc()) LSoftUDivOrMod(useFixedAtStart(lhs, r0), useFixedAtStart(rhs, r1),
   459                                                           tempFixed(r0), tempFixed(r2), tempFixed(r3));
   460         if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
   461             return false;
   462         return defineFixed(lir, mod, LAllocation(AnyRegister(r1)));
   463     }
   464 }
   466 bool
   467 LIRGeneratorARM::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins)
   468 {
   469     JS_ASSERT(ins->input()->type() == MIRType_Int32);
   470     LAsmJSUInt32ToDouble *lir = new(alloc()) LAsmJSUInt32ToDouble(useRegisterAtStart(ins->input()));
   471     return define(lir, ins);
   472 }
   474 bool
   475 LIRGeneratorARM::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 *ins)
   476 {
   477     JS_ASSERT(ins->input()->type() == MIRType_Int32);
   478     LAsmJSUInt32ToFloat32 *lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()));
   479     return define(lir, ins);
   480 }
   482 bool
   483 LIRGeneratorARM::visitAsmJSLoadHeap(MAsmJSLoadHeap *ins)
   484 {
   485     MDefinition *ptr = ins->ptr();
   486     JS_ASSERT(ptr->type() == MIRType_Int32);
   487     LAllocation ptrAlloc;
   489     // For the ARM it is best to keep the 'ptr' in a register if a bounds check is needed.
   490     if (ptr->isConstant() && ins->skipBoundsCheck()) {
   491         int32_t ptrValue = ptr->toConstant()->value().toInt32();
   492         // A bounds check is only skipped for a positive index.
   493         JS_ASSERT(ptrValue >= 0);
   494         ptrAlloc = LAllocation(ptr->toConstant()->vp());
   495     } else
   496         ptrAlloc = useRegisterAtStart(ptr);
   498     return define(new(alloc()) LAsmJSLoadHeap(ptrAlloc), ins);
   499 }
   501 bool
   502 LIRGeneratorARM::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins)
   503 {
   504     MDefinition *ptr = ins->ptr();
   505     JS_ASSERT(ptr->type() == MIRType_Int32);
   506     LAllocation ptrAlloc;
   508     if (ptr->isConstant() && ins->skipBoundsCheck()) {
   509         JS_ASSERT(ptr->toConstant()->value().toInt32() >= 0);
   510         ptrAlloc = LAllocation(ptr->toConstant()->vp());
   511     } else
   512         ptrAlloc = useRegisterAtStart(ptr);
   514     return add(new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value())), ins);
   515 }
   517 bool
   518 LIRGeneratorARM::visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins)
   519 {
   520     return define(new(alloc()) LAsmJSLoadFuncPtr(useRegister(ins->index()), temp()), ins);
   521 }
   523 bool
   524 LIRGeneratorARM::lowerTruncateDToInt32(MTruncateToInt32 *ins)
   525 {
   526     MDefinition *opd = ins->input();
   527     JS_ASSERT(opd->type() == MIRType_Double);
   529     return define(new(alloc()) LTruncateDToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
   530 }
   532 bool
   533 LIRGeneratorARM::lowerTruncateFToInt32(MTruncateToInt32 *ins)
   534 {
   535     MDefinition *opd = ins->input();
   536     JS_ASSERT(opd->type() == MIRType_Float32);
   538     return define(new(alloc()) LTruncateFToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
   539 }
   541 bool
   542 LIRGeneratorARM::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins)
   543 {
   544     MOZ_ASSUME_UNREACHABLE("NYI");
   545 }
   547 bool
   548 LIRGeneratorARM::visitForkJoinGetSlice(MForkJoinGetSlice *ins)
   549 {
   550     MOZ_ASSUME_UNREACHABLE("NYI");
   551 }
   553 //__aeabi_uidiv

mercurial