js/src/jit/BacktrackingAllocator.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/jit/BacktrackingAllocator.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,252 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#ifndef jit_BacktrackingAllocator_h
    1.11 +#define jit_BacktrackingAllocator_h
    1.12 +
    1.13 +#include "mozilla/Array.h"
    1.14 +
    1.15 +#include "ds/PriorityQueue.h"
    1.16 +#include "ds/SplayTree.h"
    1.17 +#include "jit/LiveRangeAllocator.h"
    1.18 +
    1.19 +// Backtracking priority queue based register allocator based on that described
    1.20 +// in the following blog post:
    1.21 +//
    1.22 +// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
    1.23 +
    1.24 +namespace js {
    1.25 +namespace jit {
    1.26 +
    1.27 +// Information about a group of registers. Registers may be grouped together
    1.28 +// when (a) all of their lifetimes are disjoint, (b) they are of the same type
    1.29 +// (double / non-double) and (c) it is desirable that they have the same
    1.30 +// allocation.
    1.31 +struct VirtualRegisterGroup : public TempObject
    1.32 +{
    1.33 +    // All virtual registers in the group.
    1.34 +    Vector<uint32_t, 2, IonAllocPolicy> registers;
    1.35 +
    1.36 +    // Desired physical register to use for registers in the group.
    1.37 +    LAllocation allocation;
    1.38 +
    1.39 +    // Spill location to be shared by registers in the group.
    1.40 +    LAllocation spill;
    1.41 +
    1.42 +    explicit VirtualRegisterGroup(TempAllocator &alloc)
    1.43 +      : registers(alloc), allocation(LUse(0, LUse::ANY)), spill(LUse(0, LUse::ANY))
    1.44 +    {}
    1.45 +
    1.46 +    uint32_t canonicalReg() {
    1.47 +        uint32_t minimum = registers[0];
    1.48 +        for (size_t i = 1; i < registers.length(); i++)
    1.49 +            minimum = Min(minimum, registers[i]);
    1.50 +        return minimum;
    1.51 +    }
    1.52 +};
    1.53 +
    1.54 +class BacktrackingVirtualRegister : public VirtualRegister
    1.55 +{
    1.56 +    // If this register's definition is MUST_REUSE_INPUT, whether a copy must
    1.57 +    // be introduced before the definition that relaxes the policy.
    1.58 +    bool mustCopyInput_;
    1.59 +
    1.60 +    // Spill location to use for this register.
    1.61 +    LAllocation canonicalSpill_;
    1.62 +
    1.63 +    // Code position above which the canonical spill cannot be used; such
    1.64 +    // intervals may overlap other registers in the same group.
    1.65 +    CodePosition canonicalSpillExclude_;
    1.66 +
    1.67 +    // If this register is associated with a group of other registers,
    1.68 +    // information about the group. This structure is shared between all
    1.69 +    // registers in the group.
    1.70 +    VirtualRegisterGroup *group_;
    1.71 +
    1.72 +  public:
    1.73 +    explicit BacktrackingVirtualRegister(TempAllocator &alloc)
    1.74 +      : VirtualRegister(alloc)
    1.75 +    {}
    1.76 +    void setMustCopyInput() {
    1.77 +        mustCopyInput_ = true;
    1.78 +    }
    1.79 +    bool mustCopyInput() {
    1.80 +        return mustCopyInput_;
    1.81 +    }
    1.82 +
    1.83 +    void setCanonicalSpill(LAllocation alloc) {
    1.84 +        JS_ASSERT(!alloc.isUse());
    1.85 +        canonicalSpill_ = alloc;
    1.86 +    }
    1.87 +    const LAllocation *canonicalSpill() const {
    1.88 +        return canonicalSpill_.isUse() ? nullptr : &canonicalSpill_;
    1.89 +    }
    1.90 +
    1.91 +    void setCanonicalSpillExclude(CodePosition pos) {
    1.92 +        canonicalSpillExclude_ = pos;
    1.93 +    }
    1.94 +    bool hasCanonicalSpillExclude() const {
    1.95 +        return canonicalSpillExclude_.pos() != 0;
    1.96 +    }
    1.97 +    CodePosition canonicalSpillExclude() const {
    1.98 +        JS_ASSERT(hasCanonicalSpillExclude());
    1.99 +        return canonicalSpillExclude_;
   1.100 +    }
   1.101 +
   1.102 +    void setGroup(VirtualRegisterGroup *group) {
   1.103 +        group_ = group;
   1.104 +    }
   1.105 +    VirtualRegisterGroup *group() {
   1.106 +        return group_;
   1.107 +    }
   1.108 +};
   1.109 +
   1.110 +// A sequence of code positions, for tellings BacktrackingAllocator::splitAt
   1.111 +// where to split.
   1.112 +typedef js::Vector<CodePosition, 4, SystemAllocPolicy> SplitPositionVector;
   1.113 +
   1.114 +class BacktrackingAllocator
   1.115 +  : private LiveRangeAllocator<BacktrackingVirtualRegister, /* forLSRA = */ false>
   1.116 +{
   1.117 +    // Priority queue element: either an interval or group of intervals and the
   1.118 +    // associated priority.
   1.119 +    struct QueueItem
   1.120 +    {
   1.121 +        LiveInterval *interval;
   1.122 +        VirtualRegisterGroup *group;
   1.123 +
   1.124 +        QueueItem(LiveInterval *interval, size_t priority)
   1.125 +          : interval(interval), group(nullptr), priority_(priority)
   1.126 +        {}
   1.127 +
   1.128 +        QueueItem(VirtualRegisterGroup *group, size_t priority)
   1.129 +          : interval(nullptr), group(group), priority_(priority)
   1.130 +        {}
   1.131 +
   1.132 +        static size_t priority(const QueueItem &v) {
   1.133 +            return v.priority_;
   1.134 +        }
   1.135 +
   1.136 +      private:
   1.137 +        size_t priority_;
   1.138 +    };
   1.139 +
   1.140 +    PriorityQueue<QueueItem, QueueItem, 0, SystemAllocPolicy> allocationQueue;
   1.141 +
   1.142 +    // A subrange over which a physical register is allocated.
   1.143 +    struct AllocatedRange {
   1.144 +        LiveInterval *interval;
   1.145 +        const LiveInterval::Range *range;
   1.146 +
   1.147 +        AllocatedRange()
   1.148 +          : interval(nullptr), range(nullptr)
   1.149 +        {}
   1.150 +
   1.151 +        AllocatedRange(LiveInterval *interval, const LiveInterval::Range *range)
   1.152 +          : interval(interval), range(range)
   1.153 +        {}
   1.154 +
   1.155 +        static int compare(const AllocatedRange &v0, const AllocatedRange &v1) {
   1.156 +            // LiveInterval::Range includes 'from' but excludes 'to'.
   1.157 +            if (v0.range->to.pos() <= v1.range->from.pos())
   1.158 +                return -1;
   1.159 +            if (v0.range->from.pos() >= v1.range->to.pos())
   1.160 +                return 1;
   1.161 +            return 0;
   1.162 +        }
   1.163 +    };
   1.164 +
   1.165 +    typedef SplayTree<AllocatedRange, AllocatedRange> AllocatedRangeSet;
   1.166 +
   1.167 +    // Each physical register is associated with the set of ranges over which
   1.168 +    // that register is currently allocated.
   1.169 +    struct PhysicalRegister {
   1.170 +        bool allocatable;
   1.171 +        AnyRegister reg;
   1.172 +        AllocatedRangeSet allocations;
   1.173 +
   1.174 +        PhysicalRegister() : allocatable(false) {}
   1.175 +    };
   1.176 +    mozilla::Array<PhysicalRegister, AnyRegister::Total> registers;
   1.177 +
   1.178 +    // Ranges of code which are considered to be hot, for which good allocation
   1.179 +    // should be prioritized.
   1.180 +    AllocatedRangeSet hotcode;
   1.181 +
   1.182 +  public:
   1.183 +    BacktrackingAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
   1.184 +      : LiveRangeAllocator<BacktrackingVirtualRegister, /* forLSRA = */ false>(mir, lir, graph)
   1.185 +    { }
   1.186 +
   1.187 +    bool go();
   1.188 +
   1.189 +  private:
   1.190 +
   1.191 +    typedef Vector<LiveInterval *, 4, SystemAllocPolicy> LiveIntervalVector;
   1.192 +
   1.193 +    bool init();
   1.194 +    bool canAddToGroup(VirtualRegisterGroup *group, BacktrackingVirtualRegister *reg);
   1.195 +    bool tryGroupRegisters(uint32_t vreg0, uint32_t vreg1);
   1.196 +    bool tryGroupReusedRegister(uint32_t def, uint32_t use);
   1.197 +    bool groupAndQueueRegisters();
   1.198 +    bool tryAllocateFixed(LiveInterval *interval, bool *success, bool *pfixed, LiveInterval **pconflicting);
   1.199 +    bool tryAllocateNonFixed(LiveInterval *interval, bool *success, bool *pfixed, LiveInterval **pconflicting);
   1.200 +    bool processInterval(LiveInterval *interval);
   1.201 +    bool processGroup(VirtualRegisterGroup *group);
   1.202 +    bool setIntervalRequirement(LiveInterval *interval);
   1.203 +    bool tryAllocateRegister(PhysicalRegister &r, LiveInterval *interval,
   1.204 +                             bool *success, bool *pfixed, LiveInterval **pconflicting);
   1.205 +    bool tryAllocateGroupRegister(PhysicalRegister &r, VirtualRegisterGroup *group,
   1.206 +                                  bool *psuccess, bool *pfixed, LiveInterval **pconflicting);
   1.207 +    bool evictInterval(LiveInterval *interval);
   1.208 +    void distributeUses(LiveInterval *interval, const LiveIntervalVector &newIntervals);
   1.209 +    bool split(LiveInterval *interval, const LiveIntervalVector &newIntervals);
   1.210 +    bool requeueIntervals(const LiveIntervalVector &newIntervals);
   1.211 +    void spill(LiveInterval *interval);
   1.212 +
   1.213 +    bool isReusedInput(LUse *use, LInstruction *ins, bool considerCopy);
   1.214 +    bool isRegisterUse(LUse *use, LInstruction *ins, bool considerCopy = false);
   1.215 +    bool isRegisterDefinition(LiveInterval *interval);
   1.216 +    bool addLiveInterval(LiveIntervalVector &intervals, uint32_t vreg,
   1.217 +                         LiveInterval *spillInterval,
   1.218 +                         CodePosition from, CodePosition to);
   1.219 +
   1.220 +    bool resolveControlFlow();
   1.221 +    bool reifyAllocations();
   1.222 +    bool populateSafepoints();
   1.223 +
   1.224 +    void dumpRegisterGroups();
   1.225 +    void dumpLiveness();
   1.226 +    void dumpAllocations();
   1.227 +
   1.228 +    struct PrintLiveIntervalRange;
   1.229 +
   1.230 +    bool minimalDef(const LiveInterval *interval, LInstruction *ins);
   1.231 +    bool minimalUse(const LiveInterval *interval, LInstruction *ins);
   1.232 +    bool minimalInterval(const LiveInterval *interval, bool *pfixed = nullptr);
   1.233 +
   1.234 +    // Heuristic methods.
   1.235 +
   1.236 +    size_t computePriority(const LiveInterval *interval);
   1.237 +    size_t computeSpillWeight(const LiveInterval *interval);
   1.238 +
   1.239 +    size_t computePriority(const VirtualRegisterGroup *group);
   1.240 +    size_t computeSpillWeight(const VirtualRegisterGroup *group);
   1.241 +
   1.242 +    bool chooseIntervalSplit(LiveInterval *interval, LiveInterval *conflict);
   1.243 +
   1.244 +    bool splitAt(LiveInterval *interval,
   1.245 +                 const SplitPositionVector &splitPositions);
   1.246 +    bool trySplitAcrossHotcode(LiveInterval *interval, bool *success);
   1.247 +    bool trySplitAfterLastRegisterUse(LiveInterval *interval, LiveInterval *conflict, bool *success);
   1.248 +    bool splitAtAllRegisterUses(LiveInterval *interval);
   1.249 +    bool splitAcrossCalls(LiveInterval *interval);
   1.250 +};
   1.251 +
   1.252 +} // namespace jit
   1.253 +} // namespace js
   1.254 +
   1.255 +#endif /* jit_BacktrackingAllocator_h */

mercurial