js/src/jit/IonMacroAssembler.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/jit/IonMacroAssembler.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,1465 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#ifndef jit_IonMacroAssembler_h
    1.11 +#define jit_IonMacroAssembler_h
    1.12 +
    1.13 +#ifdef JS_ION
    1.14 +
    1.15 +#include "jscompartment.h"
    1.16 +
    1.17 +#if defined(JS_CODEGEN_X86)
    1.18 +# include "jit/x86/MacroAssembler-x86.h"
    1.19 +#elif defined(JS_CODEGEN_X64)
    1.20 +# include "jit/x64/MacroAssembler-x64.h"
    1.21 +#elif defined(JS_CODEGEN_ARM)
    1.22 +# include "jit/arm/MacroAssembler-arm.h"
    1.23 +#elif defined(JS_CODEGEN_MIPS)
    1.24 +# include "jit/mips/MacroAssembler-mips.h"
    1.25 +#else
    1.26 +# error "Unknown architecture!"
    1.27 +#endif
    1.28 +#include "jit/IonInstrumentation.h"
    1.29 +#include "jit/JitCompartment.h"
    1.30 +#include "jit/VMFunctions.h"
    1.31 +#include "vm/ProxyObject.h"
    1.32 +#include "vm/Shape.h"
    1.33 +
    1.34 +namespace js {
    1.35 +namespace jit {
    1.36 +
    1.37 +// The public entrypoint for emitting assembly. Note that a MacroAssembler can
    1.38 +// use cx->lifoAlloc, so take care not to interleave masm use with other
    1.39 +// lifoAlloc use if one will be destroyed before the other.
    1.40 +class MacroAssembler : public MacroAssemblerSpecific
    1.41 +{
    1.42 +    MacroAssembler *thisFromCtor() {
    1.43 +        return this;
    1.44 +    }
    1.45 +
    1.46 +  public:
    1.47 +    class AutoRooter : public AutoGCRooter
    1.48 +    {
    1.49 +        MacroAssembler *masm_;
    1.50 +
    1.51 +      public:
    1.52 +        AutoRooter(JSContext *cx, MacroAssembler *masm)
    1.53 +          : AutoGCRooter(cx, IONMASM),
    1.54 +            masm_(masm)
    1.55 +        { }
    1.56 +
    1.57 +        MacroAssembler *masm() const {
    1.58 +            return masm_;
    1.59 +        }
    1.60 +    };
    1.61 +
    1.62 +    /*
    1.63 +     * Base class for creating a branch.
    1.64 +     */
    1.65 +    class Branch
    1.66 +    {
    1.67 +        bool init_;
    1.68 +        Condition cond_;
    1.69 +        Label *jump_;
    1.70 +        Register reg_;
    1.71 +
    1.72 +      public:
    1.73 +        Branch()
    1.74 +          : init_(false),
    1.75 +            cond_(Equal),
    1.76 +            jump_(nullptr),
    1.77 +            reg_(Register::FromCode(0))      // Quell compiler warnings.
    1.78 +        { }
    1.79 +
    1.80 +        Branch(Condition cond, Register reg, Label *jump)
    1.81 +          : init_(true),
    1.82 +            cond_(cond),
    1.83 +            jump_(jump),
    1.84 +            reg_(reg)
    1.85 +        { }
    1.86 +
    1.87 +        bool isInitialized() const {
    1.88 +            return init_;
    1.89 +        }
    1.90 +
    1.91 +        Condition cond() const {
    1.92 +            return cond_;
    1.93 +        }
    1.94 +
    1.95 +        Label *jump() const {
    1.96 +            return jump_;
    1.97 +        }
    1.98 +
    1.99 +        Register reg() const {
   1.100 +            return reg_;
   1.101 +        }
   1.102 +
   1.103 +        void invertCondition() {
   1.104 +            cond_ = InvertCondition(cond_);
   1.105 +        }
   1.106 +
   1.107 +        void relink(Label *jump) {
   1.108 +            jump_ = jump;
   1.109 +        }
   1.110 +
   1.111 +        virtual void emit(MacroAssembler &masm) = 0;
   1.112 +    };
   1.113 +
   1.114 +    /*
   1.115 +     * Creates a branch based on a specific types::Type.
   1.116 +     * Note: emits number test (int/double) for types::Type::DoubleType()
   1.117 +     */
   1.118 +    class BranchType : public Branch
   1.119 +    {
   1.120 +        types::Type type_;
   1.121 +
   1.122 +      public:
   1.123 +        BranchType()
   1.124 +          : Branch(),
   1.125 +            type_(types::Type::UnknownType())
   1.126 +        { }
   1.127 +
   1.128 +        BranchType(Condition cond, Register reg, types::Type type, Label *jump)
   1.129 +          : Branch(cond, reg, jump),
   1.130 +            type_(type)
   1.131 +        { }
   1.132 +
   1.133 +        void emit(MacroAssembler &masm) {
   1.134 +            JS_ASSERT(isInitialized());
   1.135 +            MIRType mirType = MIRType_None;
   1.136 +
   1.137 +            if (type_.isPrimitive()) {
   1.138 +                if (type_.isMagicArguments())
   1.139 +                    mirType = MIRType_MagicOptimizedArguments;
   1.140 +                else
   1.141 +                    mirType = MIRTypeFromValueType(type_.primitive());
   1.142 +            } else if (type_.isAnyObject()) {
   1.143 +                mirType = MIRType_Object;
   1.144 +            } else {
   1.145 +                MOZ_ASSUME_UNREACHABLE("Unknown conversion to mirtype");
   1.146 +            }
   1.147 +
   1.148 +            if (mirType == MIRType_Double)
   1.149 +                masm.branchTestNumber(cond(), reg(), jump());
   1.150 +            else
   1.151 +                masm.branchTestMIRType(cond(), reg(), mirType, jump());
   1.152 +        }
   1.153 +
   1.154 +    };
   1.155 +
   1.156 +    /*
   1.157 +     * Creates a branch based on a GCPtr.
   1.158 +     */
   1.159 +    class BranchGCPtr : public Branch
   1.160 +    {
   1.161 +        ImmGCPtr ptr_;
   1.162 +
   1.163 +      public:
   1.164 +        BranchGCPtr()
   1.165 +          : Branch(),
   1.166 +            ptr_(ImmGCPtr(nullptr))
   1.167 +        { }
   1.168 +
   1.169 +        BranchGCPtr(Condition cond, Register reg, ImmGCPtr ptr, Label *jump)
   1.170 +          : Branch(cond, reg, jump),
   1.171 +            ptr_(ptr)
   1.172 +        { }
   1.173 +
   1.174 +        void emit(MacroAssembler &masm) {
   1.175 +            JS_ASSERT(isInitialized());
   1.176 +            masm.branchPtr(cond(), reg(), ptr_, jump());
   1.177 +        }
   1.178 +    };
   1.179 +
   1.180 +    mozilla::Maybe<AutoRooter> autoRooter_;
   1.181 +    mozilla::Maybe<IonContext> ionContext_;
   1.182 +    mozilla::Maybe<AutoIonContextAlloc> alloc_;
   1.183 +    bool enoughMemory_;
   1.184 +    bool embedsNurseryPointers_;
   1.185 +
   1.186 +    // SPS instrumentation, only used for Ion caches.
   1.187 +    mozilla::Maybe<IonInstrumentation> spsInstrumentation_;
   1.188 +    jsbytecode *spsPc_;
   1.189 +
   1.190 +  private:
   1.191 +    // This field is used to manage profiling instrumentation output. If
   1.192 +    // provided and enabled, then instrumentation will be emitted around call
   1.193 +    // sites. The IonInstrumentation instance is hosted inside of
   1.194 +    // CodeGeneratorShared and is the manager of when instrumentation is
   1.195 +    // actually emitted or not. If nullptr, then no instrumentation is emitted.
   1.196 +    IonInstrumentation *sps_;
   1.197 +
   1.198 +    // Labels for handling exceptions and failures.
   1.199 +    NonAssertingLabel sequentialFailureLabel_;
   1.200 +    NonAssertingLabel parallelFailureLabel_;
   1.201 +
   1.202 +  public:
   1.203 +    // If instrumentation should be emitted, then the sps parameter should be
   1.204 +    // provided, but otherwise it can be safely omitted to prevent all
   1.205 +    // instrumentation from being emitted.
   1.206 +    MacroAssembler()
   1.207 +      : enoughMemory_(true),
   1.208 +        embedsNurseryPointers_(false),
   1.209 +        sps_(nullptr)
   1.210 +    {
   1.211 +        IonContext *icx = GetIonContext();
   1.212 +        JSContext *cx = icx->cx;
   1.213 +        if (cx)
   1.214 +            constructRoot(cx);
   1.215 +
   1.216 +        if (!icx->temp) {
   1.217 +            JS_ASSERT(cx);
   1.218 +            alloc_.construct(cx);
   1.219 +        }
   1.220 +
   1.221 +        moveResolver_.setAllocator(*icx->temp);
   1.222 +#ifdef JS_CODEGEN_ARM
   1.223 +        initWithAllocator();
   1.224 +        m_buffer.id = icx->getNextAssemblerId();
   1.225 +#endif
   1.226 +    }
   1.227 +
   1.228 +    // This constructor should only be used when there is no IonContext active
   1.229 +    // (for example, Trampoline-$(ARCH).cpp and IonCaches.cpp).
   1.230 +    MacroAssembler(JSContext *cx, IonScript *ion = nullptr,
   1.231 +                   JSScript *script = nullptr, jsbytecode *pc = nullptr)
   1.232 +      : enoughMemory_(true),
   1.233 +        embedsNurseryPointers_(false),
   1.234 +        sps_(nullptr)
   1.235 +    {
   1.236 +        constructRoot(cx);
   1.237 +        ionContext_.construct(cx, (js::jit::TempAllocator *)nullptr);
   1.238 +        alloc_.construct(cx);
   1.239 +        moveResolver_.setAllocator(*ionContext_.ref().temp);
   1.240 +#ifdef JS_CODEGEN_ARM
   1.241 +        initWithAllocator();
   1.242 +        m_buffer.id = GetIonContext()->getNextAssemblerId();
   1.243 +#endif
   1.244 +        if (ion) {
   1.245 +            setFramePushed(ion->frameSize());
   1.246 +            if (pc && cx->runtime()->spsProfiler.enabled()) {
   1.247 +                // We have to update the SPS pc when this IC stub calls into
   1.248 +                // the VM.
   1.249 +                spsPc_ = pc;
   1.250 +                spsInstrumentation_.construct(&cx->runtime()->spsProfiler, &spsPc_);
   1.251 +                sps_ = spsInstrumentation_.addr();
   1.252 +                sps_->setPushed(script);
   1.253 +            }
   1.254 +        }
   1.255 +    }
   1.256 +
   1.257 +    // asm.js compilation handles its own IonContet-pushing
   1.258 +    struct AsmJSToken {};
   1.259 +    MacroAssembler(AsmJSToken)
   1.260 +      : enoughMemory_(true),
   1.261 +        embedsNurseryPointers_(false),
   1.262 +        sps_(nullptr)
   1.263 +    {
   1.264 +#ifdef JS_CODEGEN_ARM
   1.265 +        initWithAllocator();
   1.266 +        m_buffer.id = 0;
   1.267 +#endif
   1.268 +    }
   1.269 +
   1.270 +    void setInstrumentation(IonInstrumentation *sps) {
   1.271 +        sps_ = sps;
   1.272 +    }
   1.273 +
   1.274 +    void resetForNewCodeGenerator(TempAllocator &alloc) {
   1.275 +        setFramePushed(0);
   1.276 +        moveResolver_.clearTempObjectPool();
   1.277 +        moveResolver_.setAllocator(alloc);
   1.278 +    }
   1.279 +
   1.280 +    void constructRoot(JSContext *cx) {
   1.281 +        autoRooter_.construct(cx, this);
   1.282 +    }
   1.283 +
   1.284 +    MoveResolver &moveResolver() {
   1.285 +        return moveResolver_;
   1.286 +    }
   1.287 +
   1.288 +    size_t instructionsSize() const {
   1.289 +        return size();
   1.290 +    }
   1.291 +
   1.292 +    void propagateOOM(bool success) {
   1.293 +        enoughMemory_ &= success;
   1.294 +    }
   1.295 +    bool oom() const {
   1.296 +        return !enoughMemory_ || MacroAssemblerSpecific::oom();
   1.297 +    }
   1.298 +
   1.299 +    bool embedsNurseryPointers() const {
   1.300 +        return embedsNurseryPointers_;
   1.301 +    }
   1.302 +
   1.303 +    // Emits a test of a value against all types in a TypeSet. A scratch
   1.304 +    // register is required.
   1.305 +    template <typename Source, typename TypeSet>
   1.306 +    void guardTypeSet(const Source &address, const TypeSet *types, Register scratch, Label *miss);
   1.307 +    template <typename TypeSet>
   1.308 +    void guardObjectType(Register obj, const TypeSet *types, Register scratch, Label *miss);
   1.309 +    template <typename Source>
   1.310 +    void guardType(const Source &address, types::Type type, Register scratch, Label *miss);
   1.311 +
   1.312 +    void loadObjShape(Register objReg, Register dest) {
   1.313 +        loadPtr(Address(objReg, JSObject::offsetOfShape()), dest);
   1.314 +    }
   1.315 +    void loadBaseShape(Register objReg, Register dest) {
   1.316 +        loadPtr(Address(objReg, JSObject::offsetOfShape()), dest);
   1.317 +
   1.318 +        loadPtr(Address(dest, Shape::offsetOfBase()), dest);
   1.319 +    }
   1.320 +    void loadObjClass(Register objReg, Register dest) {
   1.321 +        loadPtr(Address(objReg, JSObject::offsetOfType()), dest);
   1.322 +        loadPtr(Address(dest, types::TypeObject::offsetOfClasp()), dest);
   1.323 +    }
   1.324 +    void branchTestObjClass(Condition cond, Register obj, Register scratch, const js::Class *clasp,
   1.325 +                            Label *label) {
   1.326 +        loadPtr(Address(obj, JSObject::offsetOfType()), scratch);
   1.327 +        branchPtr(cond, Address(scratch, types::TypeObject::offsetOfClasp()), ImmPtr(clasp), label);
   1.328 +    }
   1.329 +    void branchTestObjShape(Condition cond, Register obj, const Shape *shape, Label *label) {
   1.330 +        branchPtr(cond, Address(obj, JSObject::offsetOfShape()), ImmGCPtr(shape), label);
   1.331 +    }
   1.332 +    void branchTestObjShape(Condition cond, Register obj, Register shape, Label *label) {
   1.333 +        branchPtr(cond, Address(obj, JSObject::offsetOfShape()), shape, label);
   1.334 +    }
   1.335 +    void branchTestProxyHandlerFamily(Condition cond, Register proxy, Register scratch,
   1.336 +                                      const void *handlerp, Label *label) {
   1.337 +        Address handlerAddr(proxy, ProxyObject::offsetOfHandler());
   1.338 +        loadPrivate(handlerAddr, scratch);
   1.339 +        Address familyAddr(scratch, BaseProxyHandler::offsetOfFamily());
   1.340 +        branchPtr(cond, familyAddr, ImmPtr(handlerp), label);
   1.341 +    }
   1.342 +
   1.343 +    template <typename Value>
   1.344 +    void branchTestMIRType(Condition cond, const Value &val, MIRType type, Label *label) {
   1.345 +        switch (type) {
   1.346 +          case MIRType_Null:      return branchTestNull(cond, val, label);
   1.347 +          case MIRType_Undefined: return branchTestUndefined(cond, val, label);
   1.348 +          case MIRType_Boolean:   return branchTestBoolean(cond, val, label);
   1.349 +          case MIRType_Int32:     return branchTestInt32(cond, val, label);
   1.350 +          case MIRType_String:    return branchTestString(cond, val, label);
   1.351 +          case MIRType_Object:    return branchTestObject(cond, val, label);
   1.352 +          case MIRType_Double:    return branchTestDouble(cond, val, label);
   1.353 +          case MIRType_MagicOptimizedArguments: // Fall through.
   1.354 +          case MIRType_MagicIsConstructing:
   1.355 +          case MIRType_MagicHole: return branchTestMagic(cond, val, label);
   1.356 +          default:
   1.357 +            MOZ_ASSUME_UNREACHABLE("Bad MIRType");
   1.358 +        }
   1.359 +    }
   1.360 +
   1.361 +    // Branches to |label| if |reg| is false. |reg| should be a C++ bool.
   1.362 +    void branchIfFalseBool(Register reg, Label *label) {
   1.363 +        // Note that C++ bool is only 1 byte, so ignore the higher-order bits.
   1.364 +        branchTest32(Assembler::Zero, reg, Imm32(0xFF), label);
   1.365 +    }
   1.366 +
   1.367 +    // Branches to |label| if |reg| is true. |reg| should be a C++ bool.
   1.368 +    void branchIfTrueBool(Register reg, Label *label) {
   1.369 +        // Note that C++ bool is only 1 byte, so ignore the higher-order bits.
   1.370 +        branchTest32(Assembler::NonZero, reg, Imm32(0xFF), label);
   1.371 +    }
   1.372 +
   1.373 +    void loadObjPrivate(Register obj, uint32_t nfixed, Register dest) {
   1.374 +        loadPtr(Address(obj, JSObject::getPrivateDataOffset(nfixed)), dest);
   1.375 +    }
   1.376 +
   1.377 +    void loadObjProto(Register obj, Register dest) {
   1.378 +        loadPtr(Address(obj, JSObject::offsetOfType()), dest);
   1.379 +        loadPtr(Address(dest, types::TypeObject::offsetOfProto()), dest);
   1.380 +    }
   1.381 +
   1.382 +    void loadStringLength(Register str, Register dest) {
   1.383 +        loadPtr(Address(str, JSString::offsetOfLengthAndFlags()), dest);
   1.384 +        rshiftPtr(Imm32(JSString::LENGTH_SHIFT), dest);
   1.385 +    }
   1.386 +
   1.387 +    void loadSliceBounds(Register worker, Register dest) {
   1.388 +        loadPtr(Address(worker, ThreadPoolWorker::offsetOfSliceBounds()), dest);
   1.389 +    }
   1.390 +
   1.391 +    void loadJSContext(Register dest) {
   1.392 +        loadPtr(AbsoluteAddress(GetIonContext()->runtime->addressOfJSContext()), dest);
   1.393 +    }
   1.394 +    void loadJitActivation(Register dest) {
   1.395 +        loadPtr(AbsoluteAddress(GetIonContext()->runtime->addressOfActivation()), dest);
   1.396 +    }
   1.397 +
   1.398 +    template<typename T>
   1.399 +    void loadTypedOrValue(const T &src, TypedOrValueRegister dest) {
   1.400 +        if (dest.hasValue())
   1.401 +            loadValue(src, dest.valueReg());
   1.402 +        else
   1.403 +            loadUnboxedValue(src, dest.type(), dest.typedReg());
   1.404 +    }
   1.405 +
   1.406 +    template<typename T>
   1.407 +    void loadElementTypedOrValue(const T &src, TypedOrValueRegister dest, bool holeCheck,
   1.408 +                                 Label *hole) {
   1.409 +        if (dest.hasValue()) {
   1.410 +            loadValue(src, dest.valueReg());
   1.411 +            if (holeCheck)
   1.412 +                branchTestMagic(Assembler::Equal, dest.valueReg(), hole);
   1.413 +        } else {
   1.414 +            if (holeCheck)
   1.415 +                branchTestMagic(Assembler::Equal, src, hole);
   1.416 +            loadUnboxedValue(src, dest.type(), dest.typedReg());
   1.417 +        }
   1.418 +    }
   1.419 +
   1.420 +    template <typename T>
   1.421 +    void storeTypedOrValue(TypedOrValueRegister src, const T &dest) {
   1.422 +        if (src.hasValue()) {
   1.423 +            storeValue(src.valueReg(), dest);
   1.424 +        } else if (IsFloatingPointType(src.type())) {
   1.425 +            FloatRegister reg = src.typedReg().fpu();
   1.426 +            if (src.type() == MIRType_Float32) {
   1.427 +                convertFloat32ToDouble(reg, ScratchFloatReg);
   1.428 +                reg = ScratchFloatReg;
   1.429 +            }
   1.430 +            storeDouble(reg, dest);
   1.431 +        } else {
   1.432 +            storeValue(ValueTypeFromMIRType(src.type()), src.typedReg().gpr(), dest);
   1.433 +        }
   1.434 +    }
   1.435 +
   1.436 +    template <typename T>
   1.437 +    void storeConstantOrRegister(ConstantOrRegister src, const T &dest) {
   1.438 +        if (src.constant())
   1.439 +            storeValue(src.value(), dest);
   1.440 +        else
   1.441 +            storeTypedOrValue(src.reg(), dest);
   1.442 +    }
   1.443 +
   1.444 +    void storeCallResult(Register reg) {
   1.445 +        if (reg != ReturnReg)
   1.446 +            mov(ReturnReg, reg);
   1.447 +    }
   1.448 +
   1.449 +    void storeCallFloatResult(const FloatRegister &reg) {
   1.450 +        if (reg != ReturnFloatReg)
   1.451 +            moveDouble(ReturnFloatReg, reg);
   1.452 +    }
   1.453 +
   1.454 +    void storeCallResultValue(AnyRegister dest) {
   1.455 +#if defined(JS_NUNBOX32)
   1.456 +        unboxValue(ValueOperand(JSReturnReg_Type, JSReturnReg_Data), dest);
   1.457 +#elif defined(JS_PUNBOX64)
   1.458 +        unboxValue(ValueOperand(JSReturnReg), dest);
   1.459 +#else
   1.460 +#error "Bad architecture"
   1.461 +#endif
   1.462 +    }
   1.463 +
   1.464 +    void storeCallResultValue(ValueOperand dest) {
   1.465 +#if defined(JS_NUNBOX32)
   1.466 +        // reshuffle the return registers used for a call result to store into
   1.467 +        // dest, using ReturnReg as a scratch register if necessary. This must
   1.468 +        // only be called after returning from a call, at a point when the
   1.469 +        // return register is not live. XXX would be better to allow wrappers
   1.470 +        // to store the return value to different places.
   1.471 +        if (dest.typeReg() == JSReturnReg_Data) {
   1.472 +            if (dest.payloadReg() == JSReturnReg_Type) {
   1.473 +                // swap the two registers.
   1.474 +                mov(JSReturnReg_Type, ReturnReg);
   1.475 +                mov(JSReturnReg_Data, JSReturnReg_Type);
   1.476 +                mov(ReturnReg, JSReturnReg_Data);
   1.477 +            } else {
   1.478 +                mov(JSReturnReg_Data, dest.payloadReg());
   1.479 +                mov(JSReturnReg_Type, dest.typeReg());
   1.480 +            }
   1.481 +        } else {
   1.482 +            mov(JSReturnReg_Type, dest.typeReg());
   1.483 +            mov(JSReturnReg_Data, dest.payloadReg());
   1.484 +        }
   1.485 +#elif defined(JS_PUNBOX64)
   1.486 +        if (dest.valueReg() != JSReturnReg)
   1.487 +            movq(JSReturnReg, dest.valueReg());
   1.488 +#else
   1.489 +#error "Bad architecture"
   1.490 +#endif
   1.491 +    }
   1.492 +
   1.493 +    void storeCallResultValue(TypedOrValueRegister dest) {
   1.494 +        if (dest.hasValue())
   1.495 +            storeCallResultValue(dest.valueReg());
   1.496 +        else
   1.497 +            storeCallResultValue(dest.typedReg());
   1.498 +    }
   1.499 +
   1.500 +    template <typename T>
   1.501 +    Register extractString(const T &source, Register scratch) {
   1.502 +        return extractObject(source, scratch);
   1.503 +    }
   1.504 +
   1.505 +    void PushRegsInMask(RegisterSet set);
   1.506 +    void PushRegsInMask(GeneralRegisterSet set) {
   1.507 +        PushRegsInMask(RegisterSet(set, FloatRegisterSet()));
   1.508 +    }
   1.509 +    void PopRegsInMask(RegisterSet set) {
   1.510 +        PopRegsInMaskIgnore(set, RegisterSet());
   1.511 +    }
   1.512 +    void PopRegsInMask(GeneralRegisterSet set) {
   1.513 +        PopRegsInMask(RegisterSet(set, FloatRegisterSet()));
   1.514 +    }
   1.515 +    void PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore);
   1.516 +
   1.517 +    void branchIfFunctionHasNoScript(Register fun, Label *label) {
   1.518 +        // 16-bit loads are slow and unaligned 32-bit loads may be too so
   1.519 +        // perform an aligned 32-bit load and adjust the bitmask accordingly.
   1.520 +        JS_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
   1.521 +        JS_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
   1.522 +        JS_STATIC_ASSERT(IS_LITTLE_ENDIAN);
   1.523 +        Address address(fun, JSFunction::offsetOfNargs());
   1.524 +        uint32_t bit = JSFunction::INTERPRETED << 16;
   1.525 +        branchTest32(Assembler::Zero, address, Imm32(bit), label);
   1.526 +    }
   1.527 +    void branchIfInterpreted(Register fun, Label *label) {
   1.528 +        // 16-bit loads are slow and unaligned 32-bit loads may be too so
   1.529 +        // perform an aligned 32-bit load and adjust the bitmask accordingly.
   1.530 +        JS_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
   1.531 +        JS_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
   1.532 +        JS_STATIC_ASSERT(IS_LITTLE_ENDIAN);
   1.533 +        Address address(fun, JSFunction::offsetOfNargs());
   1.534 +        uint32_t bit = JSFunction::INTERPRETED << 16;
   1.535 +        branchTest32(Assembler::NonZero, address, Imm32(bit), label);
   1.536 +    }
   1.537 +
   1.538 +    void branchIfNotInterpretedConstructor(Register fun, Register scratch, Label *label);
   1.539 +
   1.540 +    using MacroAssemblerSpecific::Push;
   1.541 +    using MacroAssemblerSpecific::Pop;
   1.542 +
   1.543 +    void Push(jsid id, Register scratchReg) {
   1.544 +        if (JSID_IS_GCTHING(id)) {
   1.545 +            // If we're pushing a gcthing, then we can't just push the tagged jsid
   1.546 +            // value since the GC won't have any idea that the push instruction
   1.547 +            // carries a reference to a gcthing.  Need to unpack the pointer,
   1.548 +            // push it using ImmGCPtr, and then rematerialize the id at runtime.
   1.549 +
   1.550 +            // double-checking this here to ensure we don't lose sync
   1.551 +            // with implementation of JSID_IS_GCTHING.
   1.552 +            if (JSID_IS_OBJECT(id)) {
   1.553 +                JSObject *obj = JSID_TO_OBJECT(id);
   1.554 +                movePtr(ImmGCPtr(obj), scratchReg);
   1.555 +                JS_ASSERT(((size_t)obj & JSID_TYPE_MASK) == 0);
   1.556 +                orPtr(Imm32(JSID_TYPE_OBJECT), scratchReg);
   1.557 +                Push(scratchReg);
   1.558 +            } else {
   1.559 +                JSString *str = JSID_TO_STRING(id);
   1.560 +                JS_ASSERT(((size_t)str & JSID_TYPE_MASK) == 0);
   1.561 +                JS_ASSERT(JSID_TYPE_STRING == 0x0);
   1.562 +                Push(ImmGCPtr(str));
   1.563 +            }
   1.564 +        } else {
   1.565 +            Push(ImmWord(JSID_BITS(id)));
   1.566 +        }
   1.567 +    }
   1.568 +
   1.569 +    void Push(TypedOrValueRegister v) {
   1.570 +        if (v.hasValue()) {
   1.571 +            Push(v.valueReg());
   1.572 +        } else if (IsFloatingPointType(v.type())) {
   1.573 +            FloatRegister reg = v.typedReg().fpu();
   1.574 +            if (v.type() == MIRType_Float32) {
   1.575 +                convertFloat32ToDouble(reg, ScratchFloatReg);
   1.576 +                reg = ScratchFloatReg;
   1.577 +            }
   1.578 +            Push(reg);
   1.579 +        } else {
   1.580 +            Push(ValueTypeFromMIRType(v.type()), v.typedReg().gpr());
   1.581 +        }
   1.582 +    }
   1.583 +
   1.584 +    void Push(ConstantOrRegister v) {
   1.585 +        if (v.constant())
   1.586 +            Push(v.value());
   1.587 +        else
   1.588 +            Push(v.reg());
   1.589 +    }
   1.590 +
   1.591 +    void Push(const ValueOperand &val) {
   1.592 +        pushValue(val);
   1.593 +        framePushed_ += sizeof(Value);
   1.594 +    }
   1.595 +
   1.596 +    void Push(const Value &val) {
   1.597 +        pushValue(val);
   1.598 +        framePushed_ += sizeof(Value);
   1.599 +    }
   1.600 +
   1.601 +    void Push(JSValueType type, Register reg) {
   1.602 +        pushValue(type, reg);
   1.603 +        framePushed_ += sizeof(Value);
   1.604 +    }
   1.605 +
   1.606 +    void PushValue(const Address &addr) {
   1.607 +        JS_ASSERT(addr.base != StackPointer);
   1.608 +        pushValue(addr);
   1.609 +        framePushed_ += sizeof(Value);
   1.610 +    }
   1.611 +
   1.612 +    void PushEmptyRooted(VMFunction::RootType rootType);
   1.613 +    void popRooted(VMFunction::RootType rootType, Register cellReg, const ValueOperand &valueReg);
   1.614 +
   1.615 +    void adjustStack(int amount) {
   1.616 +        if (amount > 0)
   1.617 +            freeStack(amount);
   1.618 +        else if (amount < 0)
   1.619 +            reserveStack(-amount);
   1.620 +    }
   1.621 +
   1.622 +    void bumpKey(Int32Key *key, int diff) {
   1.623 +        if (key->isRegister())
   1.624 +            add32(Imm32(diff), key->reg());
   1.625 +        else
   1.626 +            key->bumpConstant(diff);
   1.627 +    }
   1.628 +
   1.629 +    void storeKey(const Int32Key &key, const Address &dest) {
   1.630 +        if (key.isRegister())
   1.631 +            store32(key.reg(), dest);
   1.632 +        else
   1.633 +            store32(Imm32(key.constant()), dest);
   1.634 +    }
   1.635 +
   1.636 +    template<typename T>
   1.637 +    void branchKey(Condition cond, const T &length, const Int32Key &key, Label *label) {
   1.638 +        if (key.isRegister())
   1.639 +            branch32(cond, length, key.reg(), label);
   1.640 +        else
   1.641 +            branch32(cond, length, Imm32(key.constant()), label);
   1.642 +    }
   1.643 +
   1.644 +    void branchTestNeedsBarrier(Condition cond, Register scratch, Label *label) {
   1.645 +        JS_ASSERT(cond == Zero || cond == NonZero);
   1.646 +        CompileZone *zone = GetIonContext()->compartment->zone();
   1.647 +        movePtr(ImmPtr(zone->addressOfNeedsBarrier()), scratch);
   1.648 +        Address needsBarrierAddr(scratch, 0);
   1.649 +        branchTest32(cond, needsBarrierAddr, Imm32(0x1), label);
   1.650 +    }
   1.651 +
   1.652 +    template <typename T>
   1.653 +    void callPreBarrier(const T &address, MIRType type) {
   1.654 +        JS_ASSERT(type == MIRType_Value ||
   1.655 +                  type == MIRType_String ||
   1.656 +                  type == MIRType_Object ||
   1.657 +                  type == MIRType_Shape);
   1.658 +        Label done;
   1.659 +
   1.660 +        if (type == MIRType_Value)
   1.661 +            branchTestGCThing(Assembler::NotEqual, address, &done);
   1.662 +
   1.663 +        Push(PreBarrierReg);
   1.664 +        computeEffectiveAddress(address, PreBarrierReg);
   1.665 +
   1.666 +        const JitRuntime *rt = GetIonContext()->runtime->jitRuntime();
   1.667 +        JitCode *preBarrier = (type == MIRType_Shape)
   1.668 +                              ? rt->shapePreBarrier()
   1.669 +                              : rt->valuePreBarrier();
   1.670 +
   1.671 +        call(preBarrier);
   1.672 +        Pop(PreBarrierReg);
   1.673 +
   1.674 +        bind(&done);
   1.675 +    }
   1.676 +
   1.677 +    template <typename T>
   1.678 +    void patchableCallPreBarrier(const T &address, MIRType type) {
   1.679 +        JS_ASSERT(type == MIRType_Value ||
   1.680 +                  type == MIRType_String ||
   1.681 +                  type == MIRType_Object ||
   1.682 +                  type == MIRType_Shape);
   1.683 +
   1.684 +        Label done;
   1.685 +
   1.686 +        // All barriers are off by default.
   1.687 +        // They are enabled if necessary at the end of CodeGenerator::generate().
   1.688 +        CodeOffsetLabel nopJump = toggledJump(&done);
   1.689 +        writePrebarrierOffset(nopJump);
   1.690 +
   1.691 +        callPreBarrier(address, type);
   1.692 +        jump(&done);
   1.693 +
   1.694 +        align(8);
   1.695 +        bind(&done);
   1.696 +    }
   1.697 +
   1.698 +    void branchNurseryPtr(Condition cond, const Address &ptr1, const ImmMaybeNurseryPtr &ptr2,
   1.699 +                          Label *label);
   1.700 +    void moveNurseryPtr(const ImmMaybeNurseryPtr &ptr, Register reg);
   1.701 +
   1.702 +    void canonicalizeDouble(FloatRegister reg) {
   1.703 +        Label notNaN;
   1.704 +        branchDouble(DoubleOrdered, reg, reg, &notNaN);
   1.705 +        loadConstantDouble(JS::GenericNaN(), reg);
   1.706 +        bind(&notNaN);
   1.707 +    }
   1.708 +
   1.709 +    void canonicalizeFloat(FloatRegister reg) {
   1.710 +        Label notNaN;
   1.711 +        branchFloat(DoubleOrdered, reg, reg, &notNaN);
   1.712 +        loadConstantFloat32(float(JS::GenericNaN()), reg);
   1.713 +        bind(&notNaN);
   1.714 +    }
   1.715 +
   1.716 +    template<typename T>
   1.717 +    void loadFromTypedArray(int arrayType, const T &src, AnyRegister dest, Register temp, Label *fail);
   1.718 +
   1.719 +    template<typename T>
   1.720 +    void loadFromTypedArray(int arrayType, const T &src, const ValueOperand &dest, bool allowDouble,
   1.721 +                            Register temp, Label *fail);
   1.722 +
   1.723 +    template<typename S, typename T>
   1.724 +    void storeToTypedIntArray(int arrayType, const S &value, const T &dest) {
   1.725 +        switch (arrayType) {
   1.726 +          case ScalarTypeDescr::TYPE_INT8:
   1.727 +          case ScalarTypeDescr::TYPE_UINT8:
   1.728 +          case ScalarTypeDescr::TYPE_UINT8_CLAMPED:
   1.729 +            store8(value, dest);
   1.730 +            break;
   1.731 +          case ScalarTypeDescr::TYPE_INT16:
   1.732 +          case ScalarTypeDescr::TYPE_UINT16:
   1.733 +            store16(value, dest);
   1.734 +            break;
   1.735 +          case ScalarTypeDescr::TYPE_INT32:
   1.736 +          case ScalarTypeDescr::TYPE_UINT32:
   1.737 +            store32(value, dest);
   1.738 +            break;
   1.739 +          default:
   1.740 +            MOZ_ASSUME_UNREACHABLE("Invalid typed array type");
   1.741 +        }
   1.742 +    }
   1.743 +
   1.744 +    void storeToTypedFloatArray(int arrayType, const FloatRegister &value, const BaseIndex &dest);
   1.745 +    void storeToTypedFloatArray(int arrayType, const FloatRegister &value, const Address &dest);
   1.746 +
   1.747 +    Register extractString(const Address &address, Register scratch) {
   1.748 +        return extractObject(address, scratch);
   1.749 +    }
   1.750 +    Register extractString(const ValueOperand &value, Register scratch) {
   1.751 +        return extractObject(value, scratch);
   1.752 +    }
   1.753 +
   1.754 +    using MacroAssemblerSpecific::extractTag;
   1.755 +    Register extractTag(const TypedOrValueRegister &reg, Register scratch) {
   1.756 +        if (reg.hasValue())
   1.757 +            return extractTag(reg.valueReg(), scratch);
   1.758 +        mov(ImmWord(MIRTypeToTag(reg.type())), scratch);
   1.759 +        return scratch;
   1.760 +    }
   1.761 +
   1.762 +    using MacroAssemblerSpecific::extractObject;
   1.763 +    Register extractObject(const TypedOrValueRegister &reg, Register scratch) {
   1.764 +        if (reg.hasValue())
   1.765 +            return extractObject(reg.valueReg(), scratch);
   1.766 +        JS_ASSERT(reg.type() == MIRType_Object);
   1.767 +        return reg.typedReg().gpr();
   1.768 +    }
   1.769 +
   1.770 +    // Inline version of js_TypedArray_uint8_clamp_double.
   1.771 +    // This function clobbers the input register.
   1.772 +    void clampDoubleToUint8(FloatRegister input, Register output);
   1.773 +
   1.774 +    using MacroAssemblerSpecific::ensureDouble;
   1.775 +
   1.776 +    template <typename S>
   1.777 +    void ensureDouble(const S &source, FloatRegister dest, Label *failure) {
   1.778 +        Label isDouble, done;
   1.779 +        branchTestDouble(Assembler::Equal, source, &isDouble);
   1.780 +        branchTestInt32(Assembler::NotEqual, source, failure);
   1.781 +
   1.782 +        convertInt32ToDouble(source, dest);
   1.783 +        jump(&done);
   1.784 +
   1.785 +        bind(&isDouble);
   1.786 +        unboxDouble(source, dest);
   1.787 +
   1.788 +        bind(&done);
   1.789 +    }
   1.790 +
   1.791 +    // Emit type case branch on tag matching if the type tag in the definition
   1.792 +    // might actually be that type.
   1.793 +    void branchEqualTypeIfNeeded(MIRType type, MDefinition *maybeDef, Register tag, Label *label);
   1.794 +
   1.795 +    // Inline allocation.
   1.796 +    void newGCThing(Register result, Register temp, gc::AllocKind allocKind, Label *fail,
   1.797 +                    gc::InitialHeap initialHeap = gc::DefaultHeap);
   1.798 +    void newGCThing(Register result, Register temp, JSObject *templateObject, Label *fail,
   1.799 +                    gc::InitialHeap initialHeap);
   1.800 +    void newGCString(Register result, Register temp, Label *fail);
   1.801 +    void newGCFatInlineString(Register result, Register temp, Label *fail);
   1.802 +
   1.803 +    void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
   1.804 +                       gc::AllocKind allocKind, Label *fail);
   1.805 +    void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
   1.806 +                       JSObject *templateObject, Label *fail);
   1.807 +    void newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
   1.808 +                        Label *fail);
   1.809 +    void newGCFatInlineStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
   1.810 +                                 Label *fail);
   1.811 +
   1.812 +    void copySlotsFromTemplate(Register obj, Register temp, const JSObject *templateObj,
   1.813 +                               uint32_t start, uint32_t end);
   1.814 +    void fillSlotsWithUndefined(Register obj, Register temp, const JSObject *templateObj,
   1.815 +                                uint32_t start, uint32_t end);
   1.816 +    void initGCSlots(Register obj, Register temp, JSObject *templateObj);
   1.817 +    void initGCThing(Register obj, Register temp, JSObject *templateObj);
   1.818 +
   1.819 +    // Compares two strings for equality based on the JSOP.
   1.820 +    // This checks for identical pointers, atoms and length and fails for everything else.
   1.821 +    void compareStrings(JSOp op, Register left, Register right, Register result,
   1.822 +                        Register temp, Label *fail);
   1.823 +
   1.824 +    // Checks the flags that signal that parallel code may need to interrupt or
   1.825 +    // abort.  Branches to fail in that case.
   1.826 +    void checkInterruptFlagPar(Register tempReg, Label *fail);
   1.827 +
   1.828 +    // If the JitCode that created this assembler needs to transition into the VM,
   1.829 +    // we want to store the JitCode on the stack in order to mark it during a GC.
   1.830 +    // This is a reference to a patch location where the JitCode* will be written.
   1.831 +  private:
   1.832 +    CodeOffsetLabel exitCodePatch_;
   1.833 +
   1.834 +  public:
   1.835 +    void enterExitFrame(const VMFunction *f = nullptr) {
   1.836 +        linkExitFrame();
   1.837 +        // Push the ioncode. (Bailout or VM wrapper)
   1.838 +        exitCodePatch_ = PushWithPatch(ImmWord(-1));
   1.839 +        // Push VMFunction pointer, to mark arguments.
   1.840 +        Push(ImmPtr(f));
   1.841 +    }
   1.842 +    void enterFakeExitFrame(JitCode *codeVal = nullptr) {
   1.843 +        linkExitFrame();
   1.844 +        Push(ImmPtr(codeVal));
   1.845 +        Push(ImmPtr(nullptr));
   1.846 +    }
   1.847 +
   1.848 +    void loadThreadPool(Register pool) {
   1.849 +        // JitRuntimes are tied to JSRuntimes and there is one ThreadPool per
   1.850 +        // JSRuntime, so we can hardcode the ThreadPool address here.
   1.851 +        movePtr(ImmPtr(GetIonContext()->runtime->addressOfThreadPool()), pool);
   1.852 +    }
   1.853 +
   1.854 +    void loadForkJoinContext(Register cx, Register scratch);
   1.855 +    void loadContext(Register cxReg, Register scratch, ExecutionMode executionMode);
   1.856 +
   1.857 +    void enterParallelExitFrameAndLoadContext(const VMFunction *f, Register cx,
   1.858 +                                              Register scratch);
   1.859 +
   1.860 +    void enterExitFrameAndLoadContext(const VMFunction *f, Register cxReg, Register scratch,
   1.861 +                                      ExecutionMode executionMode);
   1.862 +
   1.863 +    void enterFakeParallelExitFrame(Register cx, Register scratch,
   1.864 +                                    JitCode *codeVal = nullptr);
   1.865 +
   1.866 +    void enterFakeExitFrame(Register cxReg, Register scratch,
   1.867 +                            ExecutionMode executionMode,
   1.868 +                            JitCode *codeVal = nullptr);
   1.869 +
   1.870 +    void leaveExitFrame() {
   1.871 +        freeStack(IonExitFooterFrame::Size());
   1.872 +    }
   1.873 +
   1.874 +    bool hasEnteredExitFrame() const {
   1.875 +        return exitCodePatch_.offset() != 0;
   1.876 +    }
   1.877 +
   1.878 +    void link(JitCode *code) {
   1.879 +        JS_ASSERT(!oom());
   1.880 +        // If this code can transition to C++ code and witness a GC, then we need to store
   1.881 +        // the JitCode onto the stack in order to GC it correctly.  exitCodePatch should
   1.882 +        // be unset if the code never needed to push its JitCode*.
   1.883 +        if (hasEnteredExitFrame()) {
   1.884 +            exitCodePatch_.fixup(this);
   1.885 +            patchDataWithValueCheck(CodeLocationLabel(code, exitCodePatch_),
   1.886 +                                    ImmPtr(code),
   1.887 +                                    ImmPtr((void*)-1));
   1.888 +        }
   1.889 +
   1.890 +    }
   1.891 +
   1.892 +    // Generates code used to complete a bailout.
   1.893 +    void generateBailoutTail(Register scratch, Register bailoutInfo);
   1.894 +
   1.895 +    // These functions exist as small wrappers around sites where execution can
   1.896 +    // leave the currently running stream of instructions. They exist so that
   1.897 +    // instrumentation may be put in place around them if necessary and the
   1.898 +    // instrumentation is enabled. For the functions that return a uint32_t,
   1.899 +    // they are returning the offset of the assembler just after the call has
   1.900 +    // been made so that a safepoint can be made at that location.
   1.901 +
   1.902 +    template <typename T>
   1.903 +    void callWithABINoProfiling(const T &fun, MoveOp::Type result = MoveOp::GENERAL) {
   1.904 +        MacroAssemblerSpecific::callWithABI(fun, result);
   1.905 +    }
   1.906 +
   1.907 +    template <typename T>
   1.908 +    void callWithABI(const T &fun, MoveOp::Type result = MoveOp::GENERAL) {
   1.909 +        leaveSPSFrame();
   1.910 +        callWithABINoProfiling(fun, result);
   1.911 +        reenterSPSFrame();
   1.912 +    }
   1.913 +
   1.914 +    // see above comment for what is returned
   1.915 +    uint32_t callIon(Register callee) {
   1.916 +        leaveSPSFrame();
   1.917 +        MacroAssemblerSpecific::callIon(callee);
   1.918 +        uint32_t ret = currentOffset();
   1.919 +        reenterSPSFrame();
   1.920 +        return ret;
   1.921 +    }
   1.922 +
   1.923 +    // see above comment for what is returned
   1.924 +    uint32_t callWithExitFrame(JitCode *target) {
   1.925 +        leaveSPSFrame();
   1.926 +        MacroAssemblerSpecific::callWithExitFrame(target);
   1.927 +        uint32_t ret = currentOffset();
   1.928 +        reenterSPSFrame();
   1.929 +        return ret;
   1.930 +    }
   1.931 +
   1.932 +    // see above comment for what is returned
   1.933 +    uint32_t callWithExitFrame(JitCode *target, Register dynStack) {
   1.934 +        leaveSPSFrame();
   1.935 +        MacroAssemblerSpecific::callWithExitFrame(target, dynStack);
   1.936 +        uint32_t ret = currentOffset();
   1.937 +        reenterSPSFrame();
   1.938 +        return ret;
   1.939 +    }
   1.940 +
   1.941 +    void branchTestObjectTruthy(bool truthy, Register objReg, Register scratch,
   1.942 +                                Label *slowCheck, Label *checked)
   1.943 +    {
   1.944 +        // The branches to out-of-line code here implement a conservative version
   1.945 +        // of the JSObject::isWrapper test performed in EmulatesUndefined.  If none
   1.946 +        // of the branches are taken, we can check class flags directly.
   1.947 +        loadObjClass(objReg, scratch);
   1.948 +        Address flags(scratch, Class::offsetOfFlags());
   1.949 +
   1.950 +        branchTest32(Assembler::NonZero, flags, Imm32(JSCLASS_IS_PROXY), slowCheck);
   1.951 +
   1.952 +        Condition cond = truthy ? Assembler::Zero : Assembler::NonZero;
   1.953 +        branchTest32(cond, flags, Imm32(JSCLASS_EMULATES_UNDEFINED), checked);
   1.954 +    }
   1.955 +
   1.956 +  private:
   1.957 +    // These two functions are helpers used around call sites throughout the
   1.958 +    // assembler. They are called from the above call wrappers to emit the
   1.959 +    // necessary instrumentation.
   1.960 +    void leaveSPSFrame() {
   1.961 +        if (!sps_ || !sps_->enabled())
   1.962 +            return;
   1.963 +        // No registers are guaranteed to be available, so push/pop a register
   1.964 +        // so we can use one
   1.965 +        push(CallTempReg0);
   1.966 +        sps_->leave(*this, CallTempReg0);
   1.967 +        pop(CallTempReg0);
   1.968 +    }
   1.969 +
   1.970 +    void reenterSPSFrame() {
   1.971 +        if (!sps_ || !sps_->enabled())
   1.972 +            return;
   1.973 +        // Attempt to use a now-free register within a given set, but if the
   1.974 +        // architecture being built doesn't have an available register, resort
   1.975 +        // to push/pop
   1.976 +        GeneralRegisterSet regs(Registers::TempMask & ~Registers::JSCallMask &
   1.977 +                                                      ~Registers::CallMask);
   1.978 +        if (regs.empty()) {
   1.979 +            push(CallTempReg0);
   1.980 +            sps_->reenter(*this, CallTempReg0);
   1.981 +            pop(CallTempReg0);
   1.982 +        } else {
   1.983 +            sps_->reenter(*this, regs.getAny());
   1.984 +        }
   1.985 +    }
   1.986 +
   1.987 +    void spsProfileEntryAddress(SPSProfiler *p, int offset, Register temp,
   1.988 +                                Label *full)
   1.989 +    {
   1.990 +        movePtr(ImmPtr(p->sizePointer()), temp);
   1.991 +        load32(Address(temp, 0), temp);
   1.992 +        if (offset != 0)
   1.993 +            add32(Imm32(offset), temp);
   1.994 +        branch32(Assembler::GreaterThanOrEqual, temp, Imm32(p->maxSize()), full);
   1.995 +
   1.996 +        // 4 * sizeof(void*) * idx = idx << (2 + log(sizeof(void*)))
   1.997 +        JS_STATIC_ASSERT(sizeof(ProfileEntry) == 4 * sizeof(void*));
   1.998 +        lshiftPtr(Imm32(2 + (sizeof(void*) == 4 ? 2 : 3)), temp);
   1.999 +        addPtr(ImmPtr(p->stack()), temp);
  1.1000 +    }
  1.1001 +
  1.1002 +    // The safe version of the above method refrains from assuming that the fields
  1.1003 +    // of the SPSProfiler class are going to stay the same across different runs of
  1.1004 +    // the jitcode.  Ion can use the more efficient unsafe version because ion jitcode
  1.1005 +    // will not survive changes to to the profiler settings.  Baseline jitcode, however,
  1.1006 +    // can span these changes, so any hardcoded field values will be incorrect afterwards.
  1.1007 +    // All the sps-related methods used by baseline call |spsProfileEntryAddressSafe|.
  1.1008 +    void spsProfileEntryAddressSafe(SPSProfiler *p, int offset, Register temp,
  1.1009 +                                    Label *full)
  1.1010 +    {
  1.1011 +        // Load size pointer
  1.1012 +        loadPtr(AbsoluteAddress(p->addressOfSizePointer()), temp);
  1.1013 +
  1.1014 +        // Load size
  1.1015 +        load32(Address(temp, 0), temp);
  1.1016 +        if (offset != 0)
  1.1017 +            add32(Imm32(offset), temp);
  1.1018 +
  1.1019 +        // Test against max size.
  1.1020 +        branch32(Assembler::LessThanOrEqual, AbsoluteAddress(p->addressOfMaxSize()), temp, full);
  1.1021 +
  1.1022 +        // 4 * sizeof(void*) * idx = idx << (2 + log(sizeof(void*)))
  1.1023 +        JS_STATIC_ASSERT(sizeof(ProfileEntry) == 4 * sizeof(void*));
  1.1024 +        lshiftPtr(Imm32(2 + (sizeof(void*) == 4 ? 2 : 3)), temp);
  1.1025 +        push(temp);
  1.1026 +        loadPtr(AbsoluteAddress(p->addressOfStack()), temp);
  1.1027 +        addPtr(Address(StackPointer, 0), temp);
  1.1028 +        addPtr(Imm32(sizeof(size_t)), StackPointer);
  1.1029 +    }
  1.1030 +
  1.1031 +  public:
  1.1032 +    // These functions are needed by the IonInstrumentation interface defined in
  1.1033 +    // vm/SPSProfiler.h.  They will modify the pseudostack provided to SPS to
  1.1034 +    // perform the actual instrumentation.
  1.1035 +
  1.1036 +    void spsUpdatePCIdx(SPSProfiler *p, int32_t idx, Register temp) {
  1.1037 +        Label stackFull;
  1.1038 +        spsProfileEntryAddress(p, -1, temp, &stackFull);
  1.1039 +        store32(Imm32(idx), Address(temp, ProfileEntry::offsetOfPCIdx()));
  1.1040 +        bind(&stackFull);
  1.1041 +    }
  1.1042 +
  1.1043 +    void spsUpdatePCIdx(SPSProfiler *p, Register idx, Register temp) {
  1.1044 +        Label stackFull;
  1.1045 +        spsProfileEntryAddressSafe(p, -1, temp, &stackFull);
  1.1046 +        store32(idx, Address(temp, ProfileEntry::offsetOfPCIdx()));
  1.1047 +        bind(&stackFull);
  1.1048 +    }
  1.1049 +
  1.1050 +    // spsPushFrame variant for Ion-optimized scripts.
  1.1051 +    void spsPushFrame(SPSProfiler *p, const char *str, JSScript *s, Register temp) {
  1.1052 +        Label stackFull;
  1.1053 +        spsProfileEntryAddress(p, 0, temp, &stackFull);
  1.1054 +
  1.1055 +        storePtr(ImmPtr(str),  Address(temp, ProfileEntry::offsetOfString()));
  1.1056 +        storePtr(ImmGCPtr(s),  Address(temp, ProfileEntry::offsetOfScript()));
  1.1057 +        storePtr(ImmPtr((void*) ProfileEntry::SCRIPT_OPT_STACKPOINTER),
  1.1058 +                 Address(temp, ProfileEntry::offsetOfStackAddress()));
  1.1059 +        store32(Imm32(ProfileEntry::NullPCIndex), Address(temp, ProfileEntry::offsetOfPCIdx()));
  1.1060 +
  1.1061 +        /* Always increment the stack size, whether or not we actually pushed. */
  1.1062 +        bind(&stackFull);
  1.1063 +        movePtr(ImmPtr(p->sizePointer()), temp);
  1.1064 +        add32(Imm32(1), Address(temp, 0));
  1.1065 +    }
  1.1066 +
  1.1067 +    // spsPushFrame variant for Baseline-optimized scripts.
  1.1068 +    void spsPushFrame(SPSProfiler *p, const Address &str, const Address &script,
  1.1069 +                      Register temp, Register temp2)
  1.1070 +    {
  1.1071 +        Label stackFull;
  1.1072 +        spsProfileEntryAddressSafe(p, 0, temp, &stackFull);
  1.1073 +
  1.1074 +        loadPtr(str, temp2);
  1.1075 +        storePtr(temp2, Address(temp, ProfileEntry::offsetOfString()));
  1.1076 +
  1.1077 +        loadPtr(script, temp2);
  1.1078 +        storePtr(temp2, Address(temp, ProfileEntry::offsetOfScript()));
  1.1079 +
  1.1080 +        storePtr(ImmPtr(nullptr), Address(temp, ProfileEntry::offsetOfStackAddress()));
  1.1081 +
  1.1082 +        // Store 0 for PCIdx because that's what interpreter does.
  1.1083 +        // (See probes::EnterScript, which calls spsProfiler.enter, which pushes an entry
  1.1084 +        //  with 0 pcIdx).
  1.1085 +        store32(Imm32(0), Address(temp, ProfileEntry::offsetOfPCIdx()));
  1.1086 +
  1.1087 +        /* Always increment the stack size, whether or not we actually pushed. */
  1.1088 +        bind(&stackFull);
  1.1089 +        movePtr(ImmPtr(p->addressOfSizePointer()), temp);
  1.1090 +        loadPtr(Address(temp, 0), temp);
  1.1091 +        add32(Imm32(1), Address(temp, 0));
  1.1092 +    }
  1.1093 +
  1.1094 +    void spsPopFrame(SPSProfiler *p, Register temp) {
  1.1095 +        movePtr(ImmPtr(p->sizePointer()), temp);
  1.1096 +        add32(Imm32(-1), Address(temp, 0));
  1.1097 +    }
  1.1098 +
  1.1099 +    // spsPropFrameSafe does not assume |profiler->sizePointer()| will stay constant.
  1.1100 +    void spsPopFrameSafe(SPSProfiler *p, Register temp) {
  1.1101 +        loadPtr(AbsoluteAddress(p->addressOfSizePointer()), temp);
  1.1102 +        add32(Imm32(-1), Address(temp, 0));
  1.1103 +    }
  1.1104 +
  1.1105 +    static const char enterJitLabel[];
  1.1106 +    void spsMarkJit(SPSProfiler *p, Register framePtr, Register temp);
  1.1107 +    void spsUnmarkJit(SPSProfiler *p, Register temp);
  1.1108 +
  1.1109 +    void loadBaselineOrIonRaw(Register script, Register dest, ExecutionMode mode, Label *failure);
  1.1110 +    void loadBaselineOrIonNoArgCheck(Register callee, Register dest, ExecutionMode mode, Label *failure);
  1.1111 +
  1.1112 +    void loadBaselineFramePtr(Register framePtr, Register dest);
  1.1113 +
  1.1114 +    void pushBaselineFramePtr(Register framePtr, Register scratch) {
  1.1115 +        loadBaselineFramePtr(framePtr, scratch);
  1.1116 +        push(scratch);
  1.1117 +    }
  1.1118 +
  1.1119 +  private:
  1.1120 +    void handleFailure(ExecutionMode executionMode);
  1.1121 +
  1.1122 +  public:
  1.1123 +    Label *exceptionLabel() {
  1.1124 +        // Exceptions are currently handled the same way as sequential failures.
  1.1125 +        return &sequentialFailureLabel_;
  1.1126 +    }
  1.1127 +
  1.1128 +    Label *failureLabel(ExecutionMode executionMode) {
  1.1129 +        switch (executionMode) {
  1.1130 +          case SequentialExecution: return &sequentialFailureLabel_;
  1.1131 +          case ParallelExecution: return &parallelFailureLabel_;
  1.1132 +          default: MOZ_ASSUME_UNREACHABLE("Unexpected execution mode");
  1.1133 +        }
  1.1134 +    }
  1.1135 +
  1.1136 +    void finish();
  1.1137 +
  1.1138 +    void assumeUnreachable(const char *output);
  1.1139 +    void printf(const char *output);
  1.1140 +    void printf(const char *output, Register value);
  1.1141 +
  1.1142 +#ifdef JS_TRACE_LOGGING
  1.1143 +    void tracelogStart(Register logger, uint32_t textId);
  1.1144 +    void tracelogStart(Register logger, Register textId);
  1.1145 +    void tracelogStop(Register logger, uint32_t textId);
  1.1146 +    void tracelogStop(Register logger, Register textId);
  1.1147 +    void tracelogStop(Register logger);
  1.1148 +#endif
  1.1149 +
  1.1150 +#define DISPATCH_FLOATING_POINT_OP(method, type, arg1d, arg1f, arg2)    \
  1.1151 +    JS_ASSERT(IsFloatingPointType(type));                               \
  1.1152 +    if (type == MIRType_Double)                                         \
  1.1153 +        method##Double(arg1d, arg2);                                    \
  1.1154 +    else                                                                \
  1.1155 +        method##Float32(arg1f, arg2);                                   \
  1.1156 +
  1.1157 +    void loadConstantFloatingPoint(double d, float f, FloatRegister dest, MIRType destType) {
  1.1158 +        DISPATCH_FLOATING_POINT_OP(loadConstant, destType, d, f, dest);
  1.1159 +    }
  1.1160 +    void boolValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) {
  1.1161 +        DISPATCH_FLOATING_POINT_OP(boolValueTo, destType, value, value, dest);
  1.1162 +    }
  1.1163 +    void int32ValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) {
  1.1164 +        DISPATCH_FLOATING_POINT_OP(int32ValueTo, destType, value, value, dest);
  1.1165 +    }
  1.1166 +    void convertInt32ToFloatingPoint(Register src, FloatRegister dest, MIRType destType) {
  1.1167 +        DISPATCH_FLOATING_POINT_OP(convertInt32To, destType, src, src, dest);
  1.1168 +    }
  1.1169 +
  1.1170 +#undef DISPATCH_FLOATING_POINT_OP
  1.1171 +
  1.1172 +    void convertValueToFloatingPoint(ValueOperand value, FloatRegister output, Label *fail,
  1.1173 +                                     MIRType outputType);
  1.1174 +    bool convertValueToFloatingPoint(JSContext *cx, const Value &v, FloatRegister output,
  1.1175 +                                     Label *fail, MIRType outputType);
  1.1176 +    bool convertConstantOrRegisterToFloatingPoint(JSContext *cx, ConstantOrRegister src,
  1.1177 +                                                  FloatRegister output, Label *fail,
  1.1178 +                                                  MIRType outputType);
  1.1179 +    void convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output,
  1.1180 +                                            Label *fail, MIRType outputType);
  1.1181 +
  1.1182 +    void convertInt32ValueToDouble(const Address &address, Register scratch, Label *done);
  1.1183 +    void convertValueToDouble(ValueOperand value, FloatRegister output, Label *fail) {
  1.1184 +        convertValueToFloatingPoint(value, output, fail, MIRType_Double);
  1.1185 +    }
  1.1186 +    bool convertValueToDouble(JSContext *cx, const Value &v, FloatRegister output, Label *fail) {
  1.1187 +        return convertValueToFloatingPoint(cx, v, output, fail, MIRType_Double);
  1.1188 +    }
  1.1189 +    bool convertConstantOrRegisterToDouble(JSContext *cx, ConstantOrRegister src,
  1.1190 +                                           FloatRegister output, Label *fail)
  1.1191 +    {
  1.1192 +        return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType_Double);
  1.1193 +    }
  1.1194 +    void convertTypedOrValueToDouble(TypedOrValueRegister src, FloatRegister output, Label *fail) {
  1.1195 +        convertTypedOrValueToFloatingPoint(src, output, fail, MIRType_Double);
  1.1196 +    }
  1.1197 +
  1.1198 +    void convertValueToFloat(ValueOperand value, FloatRegister output, Label *fail) {
  1.1199 +        convertValueToFloatingPoint(value, output, fail, MIRType_Float32);
  1.1200 +    }
  1.1201 +    bool convertValueToFloat(JSContext *cx, const Value &v, FloatRegister output, Label *fail) {
  1.1202 +        return convertValueToFloatingPoint(cx, v, output, fail, MIRType_Float32);
  1.1203 +    }
  1.1204 +    bool convertConstantOrRegisterToFloat(JSContext *cx, ConstantOrRegister src,
  1.1205 +                                          FloatRegister output, Label *fail)
  1.1206 +    {
  1.1207 +        return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType_Float32);
  1.1208 +    }
  1.1209 +    void convertTypedOrValueToFloat(TypedOrValueRegister src, FloatRegister output, Label *fail) {
  1.1210 +        convertTypedOrValueToFloatingPoint(src, output, fail, MIRType_Float32);
  1.1211 +    }
  1.1212 +
  1.1213 +    enum IntConversionBehavior {
  1.1214 +        IntConversion_Normal,
  1.1215 +        IntConversion_NegativeZeroCheck,
  1.1216 +        IntConversion_Truncate,
  1.1217 +        IntConversion_ClampToUint8,
  1.1218 +    };
  1.1219 +
  1.1220 +    enum IntConversionInputKind {
  1.1221 +        IntConversion_NumbersOnly,
  1.1222 +        IntConversion_NumbersOrBoolsOnly,
  1.1223 +        IntConversion_Any
  1.1224 +    };
  1.1225 +
  1.1226 +    //
  1.1227 +    // Functions for converting values to int.
  1.1228 +    //
  1.1229 +    void convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp,
  1.1230 +                            Label *truncateFail, Label *fail, IntConversionBehavior behavior);
  1.1231 +
  1.1232 +    // Strings may be handled by providing labels to jump to when the behavior
  1.1233 +    // is truncation or clamping. The subroutine, usually an OOL call, is
  1.1234 +    // passed the unboxed string in |stringReg| and should convert it to a
  1.1235 +    // double store into |temp|.
  1.1236 +    void convertValueToInt(ValueOperand value, MDefinition *input,
  1.1237 +                           Label *handleStringEntry, Label *handleStringRejoin,
  1.1238 +                           Label *truncateDoubleSlow,
  1.1239 +                           Register stringReg, FloatRegister temp, Register output,
  1.1240 +                           Label *fail, IntConversionBehavior behavior,
  1.1241 +                           IntConversionInputKind conversion = IntConversion_Any);
  1.1242 +    void convertValueToInt(ValueOperand value, FloatRegister temp, Register output, Label *fail,
  1.1243 +                           IntConversionBehavior behavior)
  1.1244 +    {
  1.1245 +        convertValueToInt(value, nullptr, nullptr, nullptr, nullptr, InvalidReg, temp, output,
  1.1246 +                          fail, behavior);
  1.1247 +    }
  1.1248 +    bool convertValueToInt(JSContext *cx, const Value &v, Register output, Label *fail,
  1.1249 +                           IntConversionBehavior behavior);
  1.1250 +    bool convertConstantOrRegisterToInt(JSContext *cx, ConstantOrRegister src, FloatRegister temp,
  1.1251 +                                        Register output, Label *fail, IntConversionBehavior behavior);
  1.1252 +    void convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp, Register output,
  1.1253 +                                  Label *fail, IntConversionBehavior behavior);
  1.1254 +
  1.1255 +    //
  1.1256 +    // Convenience functions for converting values to int32.
  1.1257 +    //
  1.1258 +    void convertValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label *fail,
  1.1259 +                             bool negativeZeroCheck)
  1.1260 +    {
  1.1261 +        convertValueToInt(value, temp, output, fail, negativeZeroCheck
  1.1262 +                          ? IntConversion_NegativeZeroCheck
  1.1263 +                          : IntConversion_Normal);
  1.1264 +    }
  1.1265 +    void convertValueToInt32(ValueOperand value, MDefinition *input,
  1.1266 +                             FloatRegister temp, Register output, Label *fail,
  1.1267 +                             bool negativeZeroCheck, IntConversionInputKind conversion = IntConversion_Any)
  1.1268 +    {
  1.1269 +        convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
  1.1270 +                          negativeZeroCheck
  1.1271 +                          ? IntConversion_NegativeZeroCheck
  1.1272 +                          : IntConversion_Normal,
  1.1273 +                          conversion);
  1.1274 +    }
  1.1275 +    bool convertValueToInt32(JSContext *cx, const Value &v, Register output, Label *fail,
  1.1276 +                             bool negativeZeroCheck)
  1.1277 +    {
  1.1278 +        return convertValueToInt(cx, v, output, fail, negativeZeroCheck
  1.1279 +                                 ? IntConversion_NegativeZeroCheck
  1.1280 +                                 : IntConversion_Normal);
  1.1281 +    }
  1.1282 +    bool convertConstantOrRegisterToInt32(JSContext *cx, ConstantOrRegister src, FloatRegister temp,
  1.1283 +                                          Register output, Label *fail, bool negativeZeroCheck)
  1.1284 +    {
  1.1285 +        return convertConstantOrRegisterToInt(cx, src, temp, output, fail, negativeZeroCheck
  1.1286 +                                              ? IntConversion_NegativeZeroCheck
  1.1287 +                                              : IntConversion_Normal);
  1.1288 +    }
  1.1289 +    void convertTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output,
  1.1290 +                                    Label *fail, bool negativeZeroCheck)
  1.1291 +    {
  1.1292 +        convertTypedOrValueToInt(src, temp, output, fail, negativeZeroCheck
  1.1293 +                                 ? IntConversion_NegativeZeroCheck
  1.1294 +                                 : IntConversion_Normal);
  1.1295 +    }
  1.1296 +
  1.1297 +    //
  1.1298 +    // Convenience functions for truncating values to int32.
  1.1299 +    //
  1.1300 +    void truncateValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label *fail) {
  1.1301 +        convertValueToInt(value, temp, output, fail, IntConversion_Truncate);
  1.1302 +    }
  1.1303 +    void truncateValueToInt32(ValueOperand value, MDefinition *input,
  1.1304 +                              Label *handleStringEntry, Label *handleStringRejoin,
  1.1305 +                              Label *truncateDoubleSlow,
  1.1306 +                              Register stringReg, FloatRegister temp, Register output, Label *fail)
  1.1307 +    {
  1.1308 +        convertValueToInt(value, input, handleStringEntry, handleStringRejoin, truncateDoubleSlow,
  1.1309 +                          stringReg, temp, output, fail, IntConversion_Truncate);
  1.1310 +    }
  1.1311 +    void truncateValueToInt32(ValueOperand value, MDefinition *input,
  1.1312 +                              FloatRegister temp, Register output, Label *fail)
  1.1313 +    {
  1.1314 +        convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
  1.1315 +                          IntConversion_Truncate);
  1.1316 +    }
  1.1317 +    bool truncateValueToInt32(JSContext *cx, const Value &v, Register output, Label *fail) {
  1.1318 +        return convertValueToInt(cx, v, output, fail, IntConversion_Truncate);
  1.1319 +    }
  1.1320 +    bool truncateConstantOrRegisterToInt32(JSContext *cx, ConstantOrRegister src, FloatRegister temp,
  1.1321 +                                           Register output, Label *fail)
  1.1322 +    {
  1.1323 +        return convertConstantOrRegisterToInt(cx, src, temp, output, fail, IntConversion_Truncate);
  1.1324 +    }
  1.1325 +    void truncateTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output,
  1.1326 +                                     Label *fail)
  1.1327 +    {
  1.1328 +        convertTypedOrValueToInt(src, temp, output, fail, IntConversion_Truncate);
  1.1329 +    }
  1.1330 +
  1.1331 +    // Convenience functions for clamping values to uint8.
  1.1332 +    void clampValueToUint8(ValueOperand value, FloatRegister temp, Register output, Label *fail) {
  1.1333 +        convertValueToInt(value, temp, output, fail, IntConversion_ClampToUint8);
  1.1334 +    }
  1.1335 +    void clampValueToUint8(ValueOperand value, MDefinition *input,
  1.1336 +                           Label *handleStringEntry, Label *handleStringRejoin,
  1.1337 +                           Register stringReg, FloatRegister temp, Register output, Label *fail)
  1.1338 +    {
  1.1339 +        convertValueToInt(value, input, handleStringEntry, handleStringRejoin, nullptr,
  1.1340 +                          stringReg, temp, output, fail, IntConversion_ClampToUint8);
  1.1341 +    }
  1.1342 +    void clampValueToUint8(ValueOperand value, MDefinition *input,
  1.1343 +                           FloatRegister temp, Register output, Label *fail)
  1.1344 +    {
  1.1345 +        convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
  1.1346 +                          IntConversion_ClampToUint8);
  1.1347 +    }
  1.1348 +    bool clampValueToUint8(JSContext *cx, const Value &v, Register output, Label *fail) {
  1.1349 +        return convertValueToInt(cx, v, output, fail, IntConversion_ClampToUint8);
  1.1350 +    }
  1.1351 +    bool clampConstantOrRegisterToUint8(JSContext *cx, ConstantOrRegister src, FloatRegister temp,
  1.1352 +                                        Register output, Label *fail)
  1.1353 +    {
  1.1354 +        return convertConstantOrRegisterToInt(cx, src, temp, output, fail,
  1.1355 +                                              IntConversion_ClampToUint8);
  1.1356 +    }
  1.1357 +    void clampTypedOrValueToUint8(TypedOrValueRegister src, FloatRegister temp, Register output,
  1.1358 +                                  Label *fail)
  1.1359 +    {
  1.1360 +        convertTypedOrValueToInt(src, temp, output, fail, IntConversion_ClampToUint8);
  1.1361 +    }
  1.1362 +
  1.1363 +  public:
  1.1364 +    class AfterICSaveLive {
  1.1365 +        friend class MacroAssembler;
  1.1366 +        AfterICSaveLive(uint32_t initialStack)
  1.1367 +#ifdef JS_DEBUG
  1.1368 +          : initialStack(initialStack)
  1.1369 +#endif
  1.1370 +        {}
  1.1371 +
  1.1372 +#ifdef JS_DEBUG
  1.1373 +      public:
  1.1374 +        uint32_t initialStack;
  1.1375 +#endif
  1.1376 +    };
  1.1377 +
  1.1378 +    AfterICSaveLive icSaveLive(RegisterSet &liveRegs) {
  1.1379 +        PushRegsInMask(liveRegs);
  1.1380 +        return AfterICSaveLive(framePushed());
  1.1381 +    }
  1.1382 +
  1.1383 +    bool icBuildOOLFakeExitFrame(void *fakeReturnAddr, AfterICSaveLive &aic) {
  1.1384 +        return buildOOLFakeExitFrame(fakeReturnAddr);
  1.1385 +    }
  1.1386 +
  1.1387 +    void icRestoreLive(RegisterSet &liveRegs, AfterICSaveLive &aic) {
  1.1388 +        JS_ASSERT(framePushed() == aic.initialStack);
  1.1389 +        PopRegsInMask(liveRegs);
  1.1390 +    }
  1.1391 +};
  1.1392 +
  1.1393 +static inline Assembler::DoubleCondition
  1.1394 +JSOpToDoubleCondition(JSOp op)
  1.1395 +{
  1.1396 +    switch (op) {
  1.1397 +      case JSOP_EQ:
  1.1398 +      case JSOP_STRICTEQ:
  1.1399 +        return Assembler::DoubleEqual;
  1.1400 +      case JSOP_NE:
  1.1401 +      case JSOP_STRICTNE:
  1.1402 +        return Assembler::DoubleNotEqualOrUnordered;
  1.1403 +      case JSOP_LT:
  1.1404 +        return Assembler::DoubleLessThan;
  1.1405 +      case JSOP_LE:
  1.1406 +        return Assembler::DoubleLessThanOrEqual;
  1.1407 +      case JSOP_GT:
  1.1408 +        return Assembler::DoubleGreaterThan;
  1.1409 +      case JSOP_GE:
  1.1410 +        return Assembler::DoubleGreaterThanOrEqual;
  1.1411 +      default:
  1.1412 +        MOZ_ASSUME_UNREACHABLE("Unexpected comparison operation");
  1.1413 +    }
  1.1414 +}
  1.1415 +
  1.1416 +// Note: the op may have been inverted during lowering (to put constants in a
  1.1417 +// position where they can be immediates), so it is important to use the
  1.1418 +// lir->jsop() instead of the mir->jsop() when it is present.
  1.1419 +static inline Assembler::Condition
  1.1420 +JSOpToCondition(JSOp op, bool isSigned)
  1.1421 +{
  1.1422 +    if (isSigned) {
  1.1423 +        switch (op) {
  1.1424 +          case JSOP_EQ:
  1.1425 +          case JSOP_STRICTEQ:
  1.1426 +            return Assembler::Equal;
  1.1427 +          case JSOP_NE:
  1.1428 +          case JSOP_STRICTNE:
  1.1429 +            return Assembler::NotEqual;
  1.1430 +          case JSOP_LT:
  1.1431 +            return Assembler::LessThan;
  1.1432 +          case JSOP_LE:
  1.1433 +            return Assembler::LessThanOrEqual;
  1.1434 +          case JSOP_GT:
  1.1435 +            return Assembler::GreaterThan;
  1.1436 +          case JSOP_GE:
  1.1437 +            return Assembler::GreaterThanOrEqual;
  1.1438 +          default:
  1.1439 +            MOZ_ASSUME_UNREACHABLE("Unrecognized comparison operation");
  1.1440 +        }
  1.1441 +    } else {
  1.1442 +        switch (op) {
  1.1443 +          case JSOP_EQ:
  1.1444 +          case JSOP_STRICTEQ:
  1.1445 +            return Assembler::Equal;
  1.1446 +          case JSOP_NE:
  1.1447 +          case JSOP_STRICTNE:
  1.1448 +            return Assembler::NotEqual;
  1.1449 +          case JSOP_LT:
  1.1450 +            return Assembler::Below;
  1.1451 +          case JSOP_LE:
  1.1452 +            return Assembler::BelowOrEqual;
  1.1453 +          case JSOP_GT:
  1.1454 +            return Assembler::Above;
  1.1455 +          case JSOP_GE:
  1.1456 +            return Assembler::AboveOrEqual;
  1.1457 +          default:
  1.1458 +            MOZ_ASSUME_UNREACHABLE("Unrecognized comparison operation");
  1.1459 +        }
  1.1460 +    }
  1.1461 +}
  1.1462 +
  1.1463 +} // namespace jit
  1.1464 +} // namespace js
  1.1465 +
  1.1466 +#endif // JS_ION
  1.1467 +
  1.1468 +#endif /* jit_IonMacroAssembler_h */

mercurial