js/src/jit/LiveRangeAllocator.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #ifndef jit_LiveRangeAllocator_h
michael@0 8 #define jit_LiveRangeAllocator_h
michael@0 9
michael@0 10 #include "mozilla/Array.h"
michael@0 11 #include "mozilla/DebugOnly.h"
michael@0 12
michael@0 13 #include "jit/RegisterAllocator.h"
michael@0 14 #include "jit/StackSlotAllocator.h"
michael@0 15
michael@0 16 // Common structures and functions used by register allocators that operate on
michael@0 17 // virtual register live ranges.
michael@0 18
michael@0 19 namespace js {
michael@0 20 namespace jit {
michael@0 21
michael@0 22 class Requirement
michael@0 23 {
michael@0 24 public:
michael@0 25 enum Kind {
michael@0 26 NONE,
michael@0 27 REGISTER,
michael@0 28 FIXED,
michael@0 29 SAME_AS_OTHER
michael@0 30 };
michael@0 31
michael@0 32 Requirement()
michael@0 33 : kind_(NONE)
michael@0 34 { }
michael@0 35
michael@0 36 Requirement(Kind kind)
michael@0 37 : kind_(kind)
michael@0 38 {
michael@0 39 // These have dedicated constructors.
michael@0 40 JS_ASSERT(kind != FIXED && kind != SAME_AS_OTHER);
michael@0 41 }
michael@0 42
michael@0 43 Requirement(Kind kind, CodePosition at)
michael@0 44 : kind_(kind),
michael@0 45 position_(at)
michael@0 46 {
michael@0 47 // These have dedicated constructors.
michael@0 48 JS_ASSERT(kind != FIXED && kind != SAME_AS_OTHER);
michael@0 49 }
michael@0 50
michael@0 51 Requirement(LAllocation fixed)
michael@0 52 : kind_(FIXED),
michael@0 53 allocation_(fixed)
michael@0 54 {
michael@0 55 JS_ASSERT(fixed == LAllocation() || !fixed.isUse());
michael@0 56 }
michael@0 57
michael@0 58 // Only useful as a hint, encodes where the fixed requirement is used to
michael@0 59 // avoid allocating a fixed register too early.
michael@0 60 Requirement(LAllocation fixed, CodePosition at)
michael@0 61 : kind_(FIXED),
michael@0 62 allocation_(fixed),
michael@0 63 position_(at)
michael@0 64 {
michael@0 65 JS_ASSERT(fixed == LAllocation() || !fixed.isUse());
michael@0 66 }
michael@0 67
michael@0 68 Requirement(uint32_t vreg, CodePosition at)
michael@0 69 : kind_(SAME_AS_OTHER),
michael@0 70 allocation_(LUse(vreg, LUse::ANY)),
michael@0 71 position_(at)
michael@0 72 { }
michael@0 73
michael@0 74 Kind kind() const {
michael@0 75 return kind_;
michael@0 76 }
michael@0 77
michael@0 78 LAllocation allocation() const {
michael@0 79 JS_ASSERT(!allocation_.isUse());
michael@0 80 return allocation_;
michael@0 81 }
michael@0 82
michael@0 83 uint32_t virtualRegister() const {
michael@0 84 JS_ASSERT(allocation_.isUse());
michael@0 85 JS_ASSERT(kind() == SAME_AS_OTHER);
michael@0 86 return allocation_.toUse()->virtualRegister();
michael@0 87 }
michael@0 88
michael@0 89 CodePosition pos() const {
michael@0 90 return position_;
michael@0 91 }
michael@0 92
michael@0 93 int priority() const;
michael@0 94
michael@0 95 private:
michael@0 96 Kind kind_;
michael@0 97 LAllocation allocation_;
michael@0 98 CodePosition position_;
michael@0 99 };
michael@0 100
michael@0 101 struct UsePosition : public TempObject,
michael@0 102 public InlineForwardListNode<UsePosition>
michael@0 103 {
michael@0 104 LUse *use;
michael@0 105 CodePosition pos;
michael@0 106
michael@0 107 UsePosition(LUse *use, CodePosition pos) :
michael@0 108 use(use),
michael@0 109 pos(pos)
michael@0 110 { }
michael@0 111 };
michael@0 112
michael@0 113 typedef InlineForwardListIterator<UsePosition> UsePositionIterator;
michael@0 114
michael@0 115 static inline bool
michael@0 116 UseCompatibleWith(const LUse *use, LAllocation alloc)
michael@0 117 {
michael@0 118 switch (use->policy()) {
michael@0 119 case LUse::ANY:
michael@0 120 case LUse::KEEPALIVE:
michael@0 121 return alloc.isRegister() || alloc.isMemory();
michael@0 122 case LUse::REGISTER:
michael@0 123 return alloc.isRegister();
michael@0 124 case LUse::FIXED:
michael@0 125 // Fixed uses are handled using fixed intervals. The
michael@0 126 // UsePosition is only used as hint.
michael@0 127 return alloc.isRegister();
michael@0 128 default:
michael@0 129 MOZ_ASSUME_UNREACHABLE("Unknown use policy");
michael@0 130 }
michael@0 131 }
michael@0 132
michael@0 133 #ifdef DEBUG
michael@0 134
michael@0 135 static inline bool
michael@0 136 DefinitionCompatibleWith(LInstruction *ins, const LDefinition *def, LAllocation alloc)
michael@0 137 {
michael@0 138 if (ins->isPhi()) {
michael@0 139 if (def->isFloatReg())
michael@0 140 return alloc.isFloatReg() || alloc.isStackSlot();
michael@0 141 return alloc.isGeneralReg() || alloc.isStackSlot();
michael@0 142 }
michael@0 143
michael@0 144 switch (def->policy()) {
michael@0 145 case LDefinition::DEFAULT:
michael@0 146 if (!alloc.isRegister())
michael@0 147 return false;
michael@0 148 return alloc.isFloatReg() == def->isFloatReg();
michael@0 149 case LDefinition::PRESET:
michael@0 150 return alloc == *def->output();
michael@0 151 case LDefinition::MUST_REUSE_INPUT:
michael@0 152 if (!alloc.isRegister() || !ins->numOperands())
michael@0 153 return false;
michael@0 154 return alloc == *ins->getOperand(def->getReusedInput());
michael@0 155 case LDefinition::PASSTHROUGH:
michael@0 156 return true;
michael@0 157 default:
michael@0 158 MOZ_ASSUME_UNREACHABLE("Unknown definition policy");
michael@0 159 }
michael@0 160 }
michael@0 161
michael@0 162 #endif // DEBUG
michael@0 163
michael@0 164 static inline LDefinition *
michael@0 165 FindReusingDefinition(LInstruction *ins, LAllocation *alloc)
michael@0 166 {
michael@0 167 for (size_t i = 0; i < ins->numDefs(); i++) {
michael@0 168 LDefinition *def = ins->getDef(i);
michael@0 169 if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
michael@0 170 ins->getOperand(def->getReusedInput()) == alloc)
michael@0 171 return def;
michael@0 172 }
michael@0 173 for (size_t i = 0; i < ins->numTemps(); i++) {
michael@0 174 LDefinition *def = ins->getTemp(i);
michael@0 175 if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
michael@0 176 ins->getOperand(def->getReusedInput()) == alloc)
michael@0 177 return def;
michael@0 178 }
michael@0 179 return nullptr;
michael@0 180 }
michael@0 181
michael@0 182 /*
michael@0 183 * A live interval is a set of disjoint ranges of code positions where a
michael@0 184 * virtual register is live. Register allocation operates on these intervals,
michael@0 185 * splitting them as necessary and assigning allocations to them as it runs.
michael@0 186 */
michael@0 187 class LiveInterval
michael@0 188 : public InlineListNode<LiveInterval>,
michael@0 189 public TempObject
michael@0 190 {
michael@0 191 public:
michael@0 192 /*
michael@0 193 * A range is a contiguous sequence of CodePositions where the virtual
michael@0 194 * register associated with this interval is live.
michael@0 195 */
michael@0 196 struct Range {
michael@0 197 Range()
michael@0 198 : from(),
michael@0 199 to()
michael@0 200 { }
michael@0 201 Range(CodePosition f, CodePosition t)
michael@0 202 : from(f),
michael@0 203 to(t)
michael@0 204 {
michael@0 205 JS_ASSERT(from < to);
michael@0 206 }
michael@0 207 CodePosition from;
michael@0 208
michael@0 209 // The end of this range, exclusive.
michael@0 210 CodePosition to;
michael@0 211
michael@0 212 bool empty() const {
michael@0 213 return from >= to;
michael@0 214 }
michael@0 215
michael@0 216 // Whether this range wholly contains other.
michael@0 217 bool contains(const Range *other) const;
michael@0 218
michael@0 219 // Intersect this range with other, returning the subranges of this
michael@0 220 // that are before, inside, or after other.
michael@0 221 void intersect(const Range *other, Range *pre, Range *inside, Range *post) const;
michael@0 222 };
michael@0 223
michael@0 224 private:
michael@0 225 Vector<Range, 1, IonAllocPolicy> ranges_;
michael@0 226 LAllocation alloc_;
michael@0 227 LiveInterval *spillInterval_;
michael@0 228 uint32_t vreg_;
michael@0 229 uint32_t index_;
michael@0 230 Requirement requirement_;
michael@0 231 Requirement hint_;
michael@0 232 InlineForwardList<UsePosition> uses_;
michael@0 233 size_t lastProcessedRange_;
michael@0 234
michael@0 235 LiveInterval(TempAllocator &alloc, uint32_t vreg, uint32_t index)
michael@0 236 : ranges_(alloc),
michael@0 237 spillInterval_(nullptr),
michael@0 238 vreg_(vreg),
michael@0 239 index_(index),
michael@0 240 lastProcessedRange_(size_t(-1))
michael@0 241 { }
michael@0 242
michael@0 243 LiveInterval(TempAllocator &alloc, uint32_t index)
michael@0 244 : ranges_(alloc),
michael@0 245 spillInterval_(nullptr),
michael@0 246 vreg_(UINT32_MAX),
michael@0 247 index_(index),
michael@0 248 lastProcessedRange_(size_t(-1))
michael@0 249 { }
michael@0 250
michael@0 251 public:
michael@0 252 static LiveInterval *New(TempAllocator &alloc, uint32_t vreg, uint32_t index) {
michael@0 253 return new(alloc) LiveInterval(alloc, vreg, index);
michael@0 254 }
michael@0 255 static LiveInterval *New(TempAllocator &alloc, uint32_t index) {
michael@0 256 return new(alloc) LiveInterval(alloc, index);
michael@0 257 }
michael@0 258
michael@0 259 bool addRange(CodePosition from, CodePosition to);
michael@0 260 bool addRangeAtHead(CodePosition from, CodePosition to);
michael@0 261 void setFrom(CodePosition from);
michael@0 262 CodePosition intersect(LiveInterval *other);
michael@0 263 bool covers(CodePosition pos);
michael@0 264 CodePosition nextCoveredAfter(CodePosition pos);
michael@0 265
michael@0 266 CodePosition start() const {
michael@0 267 JS_ASSERT(!ranges_.empty());
michael@0 268 return ranges_.back().from;
michael@0 269 }
michael@0 270
michael@0 271 CodePosition end() const {
michael@0 272 JS_ASSERT(!ranges_.empty());
michael@0 273 return ranges_.begin()->to;
michael@0 274 }
michael@0 275
michael@0 276 size_t numRanges() const {
michael@0 277 return ranges_.length();
michael@0 278 }
michael@0 279 const Range *getRange(size_t i) const {
michael@0 280 return &ranges_[i];
michael@0 281 }
michael@0 282 void setLastProcessedRange(size_t range, mozilla::DebugOnly<CodePosition> pos) {
michael@0 283 // If the range starts after pos, we may not be able to use
michael@0 284 // it in the next lastProcessedRangeIfValid call.
michael@0 285 JS_ASSERT(ranges_[range].from <= pos);
michael@0 286 lastProcessedRange_ = range;
michael@0 287 }
michael@0 288 size_t lastProcessedRangeIfValid(CodePosition pos) const {
michael@0 289 if (lastProcessedRange_ < ranges_.length() && ranges_[lastProcessedRange_].from <= pos)
michael@0 290 return lastProcessedRange_;
michael@0 291 return ranges_.length() - 1;
michael@0 292 }
michael@0 293
michael@0 294 LAllocation *getAllocation() {
michael@0 295 return &alloc_;
michael@0 296 }
michael@0 297 void setAllocation(LAllocation alloc) {
michael@0 298 alloc_ = alloc;
michael@0 299 }
michael@0 300 void setSpillInterval(LiveInterval *spill) {
michael@0 301 spillInterval_ = spill;
michael@0 302 }
michael@0 303 LiveInterval *spillInterval() {
michael@0 304 return spillInterval_;
michael@0 305 }
michael@0 306 bool hasVreg() const {
michael@0 307 return vreg_ != UINT32_MAX;
michael@0 308 }
michael@0 309 uint32_t vreg() const {
michael@0 310 JS_ASSERT(hasVreg());
michael@0 311 return vreg_;
michael@0 312 }
michael@0 313 uint32_t index() const {
michael@0 314 return index_;
michael@0 315 }
michael@0 316 void setIndex(uint32_t index) {
michael@0 317 index_ = index;
michael@0 318 }
michael@0 319 const Requirement *requirement() const {
michael@0 320 return &requirement_;
michael@0 321 }
michael@0 322 void setRequirement(const Requirement &requirement) {
michael@0 323 // A SAME_AS_OTHER requirement complicates regalloc too much; it
michael@0 324 // should only be used as hint.
michael@0 325 JS_ASSERT(requirement.kind() != Requirement::SAME_AS_OTHER);
michael@0 326 requirement_ = requirement;
michael@0 327 }
michael@0 328 bool addRequirement(const Requirement &newRequirement) {
michael@0 329 // Merge newRequirement with any existing requirement, returning false
michael@0 330 // if the new and old requirements conflict.
michael@0 331 JS_ASSERT(newRequirement.kind() != Requirement::SAME_AS_OTHER);
michael@0 332
michael@0 333 if (newRequirement.kind() == Requirement::FIXED) {
michael@0 334 if (requirement_.kind() == Requirement::FIXED)
michael@0 335 return newRequirement.allocation() == requirement_.allocation();
michael@0 336 requirement_ = newRequirement;
michael@0 337 return true;
michael@0 338 }
michael@0 339
michael@0 340 JS_ASSERT(newRequirement.kind() == Requirement::REGISTER);
michael@0 341 if (requirement_.kind() == Requirement::FIXED)
michael@0 342 return requirement_.allocation().isRegister();
michael@0 343
michael@0 344 requirement_ = newRequirement;
michael@0 345 return true;
michael@0 346 }
michael@0 347 const Requirement *hint() const {
michael@0 348 return &hint_;
michael@0 349 }
michael@0 350 void setHint(const Requirement &hint) {
michael@0 351 hint_ = hint;
michael@0 352 }
michael@0 353 bool isSpill() const {
michael@0 354 return alloc_.isStackSlot();
michael@0 355 }
michael@0 356 bool splitFrom(CodePosition pos, LiveInterval *after);
michael@0 357
michael@0 358 void addUse(UsePosition *use);
michael@0 359 void addUseAtEnd(UsePosition *use);
michael@0 360 UsePosition *nextUseAfter(CodePosition pos);
michael@0 361 CodePosition nextUsePosAfter(CodePosition pos);
michael@0 362 CodePosition firstIncompatibleUse(LAllocation alloc);
michael@0 363
michael@0 364 UsePositionIterator usesBegin() const {
michael@0 365 return uses_.begin();
michael@0 366 }
michael@0 367
michael@0 368 UsePositionIterator usesEnd() const {
michael@0 369 return uses_.end();
michael@0 370 }
michael@0 371
michael@0 372 bool usesEmpty() const {
michael@0 373 return uses_.empty();
michael@0 374 }
michael@0 375
michael@0 376 UsePosition *usesBack() {
michael@0 377 return uses_.back();
michael@0 378 }
michael@0 379
michael@0 380 #ifdef DEBUG
michael@0 381 void validateRanges();
michael@0 382 #endif
michael@0 383
michael@0 384 // Return a string describing the ranges in this LiveInterval. This is
michael@0 385 // not re-entrant!
michael@0 386 const char *rangesToString() const;
michael@0 387
michael@0 388 void dump();
michael@0 389 };
michael@0 390
michael@0 391 /*
michael@0 392 * Represents all of the register allocation state associated with a virtual
michael@0 393 * register, including all associated intervals and pointers to relevant LIR
michael@0 394 * structures.
michael@0 395 */
michael@0 396 class VirtualRegister
michael@0 397 {
michael@0 398 LBlock *block_;
michael@0 399 LInstruction *ins_;
michael@0 400 LDefinition *def_;
michael@0 401 Vector<LiveInterval *, 1, IonAllocPolicy> intervals_;
michael@0 402
michael@0 403 // Whether def_ is a temp or an output.
michael@0 404 bool isTemp_ : 1;
michael@0 405
michael@0 406 void operator=(const VirtualRegister &) MOZ_DELETE;
michael@0 407 VirtualRegister(const VirtualRegister &) MOZ_DELETE;
michael@0 408
michael@0 409 protected:
michael@0 410 explicit VirtualRegister(TempAllocator &alloc)
michael@0 411 : intervals_(alloc)
michael@0 412 {}
michael@0 413
michael@0 414 public:
michael@0 415 bool init(TempAllocator &alloc, LBlock *block, LInstruction *ins, LDefinition *def,
michael@0 416 bool isTemp)
michael@0 417 {
michael@0 418 JS_ASSERT(block && !block_);
michael@0 419 block_ = block;
michael@0 420 ins_ = ins;
michael@0 421 def_ = def;
michael@0 422 isTemp_ = isTemp;
michael@0 423 LiveInterval *initial = LiveInterval::New(alloc, def->virtualRegister(), 0);
michael@0 424 if (!initial)
michael@0 425 return false;
michael@0 426 return intervals_.append(initial);
michael@0 427 }
michael@0 428 LBlock *block() {
michael@0 429 return block_;
michael@0 430 }
michael@0 431 LInstruction *ins() {
michael@0 432 return ins_;
michael@0 433 }
michael@0 434 LDefinition *def() const {
michael@0 435 return def_;
michael@0 436 }
michael@0 437 LDefinition::Type type() const {
michael@0 438 return def()->type();
michael@0 439 }
michael@0 440 bool isTemp() const {
michael@0 441 return isTemp_;
michael@0 442 }
michael@0 443 size_t numIntervals() const {
michael@0 444 return intervals_.length();
michael@0 445 }
michael@0 446 LiveInterval *getInterval(size_t i) const {
michael@0 447 return intervals_[i];
michael@0 448 }
michael@0 449 LiveInterval *lastInterval() const {
michael@0 450 JS_ASSERT(numIntervals() > 0);
michael@0 451 return getInterval(numIntervals() - 1);
michael@0 452 }
michael@0 453 void replaceInterval(LiveInterval *old, LiveInterval *interval) {
michael@0 454 JS_ASSERT(intervals_[old->index()] == old);
michael@0 455 interval->setIndex(old->index());
michael@0 456 intervals_[old->index()] = interval;
michael@0 457 }
michael@0 458 bool addInterval(LiveInterval *interval) {
michael@0 459 JS_ASSERT(interval->numRanges());
michael@0 460
michael@0 461 // Preserve ascending order for faster lookups.
michael@0 462 LiveInterval **found = nullptr;
michael@0 463 LiveInterval **i;
michael@0 464 for (i = intervals_.begin(); i != intervals_.end(); i++) {
michael@0 465 if (!found && interval->start() < (*i)->start())
michael@0 466 found = i;
michael@0 467 if (found)
michael@0 468 (*i)->setIndex((*i)->index() + 1);
michael@0 469 }
michael@0 470 if (!found)
michael@0 471 found = intervals_.end();
michael@0 472 interval->setIndex(found - intervals_.begin());
michael@0 473 return intervals_.insert(found, interval);
michael@0 474 }
michael@0 475 bool isFloatReg() const {
michael@0 476 return def_->isFloatReg();
michael@0 477 }
michael@0 478
michael@0 479 LiveInterval *intervalFor(CodePosition pos);
michael@0 480 LiveInterval *getFirstInterval();
michael@0 481 };
michael@0 482
michael@0 483 // Index of the virtual registers in a graph. VREG is a subclass of
michael@0 484 // VirtualRegister extended with any allocator specific state for the vreg.
michael@0 485 template <typename VREG>
michael@0 486 class VirtualRegisterMap
michael@0 487 {
michael@0 488 private:
michael@0 489 VREG *vregs_;
michael@0 490 uint32_t numVregs_;
michael@0 491
michael@0 492 void operator=(const VirtualRegisterMap &) MOZ_DELETE;
michael@0 493 VirtualRegisterMap(const VirtualRegisterMap &) MOZ_DELETE;
michael@0 494
michael@0 495 public:
michael@0 496 VirtualRegisterMap()
michael@0 497 : vregs_(nullptr),
michael@0 498 numVregs_(0)
michael@0 499 { }
michael@0 500
michael@0 501 bool init(MIRGenerator *gen, uint32_t numVregs) {
michael@0 502 vregs_ = gen->allocate<VREG>(numVregs);
michael@0 503 numVregs_ = numVregs;
michael@0 504 if (!vregs_)
michael@0 505 return false;
michael@0 506 memset(vregs_, 0, sizeof(VREG) * numVregs);
michael@0 507 TempAllocator &alloc = gen->alloc();
michael@0 508 for (uint32_t i = 0; i < numVregs; i++)
michael@0 509 new(&vregs_[i]) VREG(alloc);
michael@0 510 return true;
michael@0 511 }
michael@0 512 VREG &operator[](unsigned int index) {
michael@0 513 JS_ASSERT(index < numVregs_);
michael@0 514 return vregs_[index];
michael@0 515 }
michael@0 516 VREG &operator[](const LAllocation *alloc) {
michael@0 517 JS_ASSERT(alloc->isUse());
michael@0 518 JS_ASSERT(alloc->toUse()->virtualRegister() < numVregs_);
michael@0 519 return vregs_[alloc->toUse()->virtualRegister()];
michael@0 520 }
michael@0 521 VREG &operator[](const LDefinition *def) {
michael@0 522 JS_ASSERT(def->virtualRegister() < numVregs_);
michael@0 523 return vregs_[def->virtualRegister()];
michael@0 524 }
michael@0 525 uint32_t numVirtualRegisters() const {
michael@0 526 return numVregs_;
michael@0 527 }
michael@0 528 };
michael@0 529
michael@0 530 static inline bool
michael@0 531 IsNunbox(VirtualRegister *vreg)
michael@0 532 {
michael@0 533 #ifdef JS_NUNBOX32
michael@0 534 return (vreg->type() == LDefinition::TYPE ||
michael@0 535 vreg->type() == LDefinition::PAYLOAD);
michael@0 536 #else
michael@0 537 return false;
michael@0 538 #endif
michael@0 539 }
michael@0 540
michael@0 541 static inline bool
michael@0 542 IsSlotsOrElements(VirtualRegister *vreg)
michael@0 543 {
michael@0 544 return vreg->type() == LDefinition::SLOTS;
michael@0 545 }
michael@0 546
michael@0 547 static inline bool
michael@0 548 IsTraceable(VirtualRegister *reg)
michael@0 549 {
michael@0 550 if (reg->type() == LDefinition::OBJECT)
michael@0 551 return true;
michael@0 552 #ifdef JS_PUNBOX64
michael@0 553 if (reg->type() == LDefinition::BOX)
michael@0 554 return true;
michael@0 555 #endif
michael@0 556 return false;
michael@0 557 }
michael@0 558
michael@0 559 typedef InlineList<LiveInterval>::iterator IntervalIterator;
michael@0 560 typedef InlineList<LiveInterval>::reverse_iterator IntervalReverseIterator;
michael@0 561
michael@0 562 // The forLSRA parameter indicates whether the underlying allocator is LSRA.
michael@0 563 // This changes the generated live ranges in various ways: inserting additional
michael@0 564 // fixed uses of registers, and shifting the boundaries of live ranges by small
michael@0 565 // amounts. This exists because different allocators handle live ranges
michael@0 566 // differently; ideally, they would all treat live ranges in the same way.
michael@0 567 template <typename VREG, bool forLSRA>
michael@0 568 class LiveRangeAllocator : protected RegisterAllocator
michael@0 569 {
michael@0 570 protected:
michael@0 571 // Computed inforamtion
michael@0 572 BitSet **liveIn;
michael@0 573 VirtualRegisterMap<VREG> vregs;
michael@0 574 mozilla::Array<LiveInterval *, AnyRegister::Total> fixedIntervals;
michael@0 575
michael@0 576 // Union of all ranges in fixedIntervals, used to quickly determine
michael@0 577 // whether an interval intersects with a fixed register.
michael@0 578 LiveInterval *fixedIntervalsUnion;
michael@0 579
michael@0 580 // Allocation state
michael@0 581 StackSlotAllocator stackSlotAllocator;
michael@0 582
michael@0 583 LiveRangeAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
michael@0 584 : RegisterAllocator(mir, lir, graph),
michael@0 585 liveIn(nullptr),
michael@0 586 fixedIntervalsUnion(nullptr)
michael@0 587 {
michael@0 588 }
michael@0 589
michael@0 590 bool buildLivenessInfo();
michael@0 591
michael@0 592 bool init();
michael@0 593
michael@0 594 bool addFixedRangeAtHead(AnyRegister reg, CodePosition from, CodePosition to) {
michael@0 595 if (!fixedIntervals[reg.code()]->addRangeAtHead(from, to))
michael@0 596 return false;
michael@0 597 return fixedIntervalsUnion->addRangeAtHead(from, to);
michael@0 598 }
michael@0 599
michael@0 600 void validateVirtualRegisters()
michael@0 601 {
michael@0 602 #ifdef DEBUG
michael@0 603 if (!js_JitOptions.checkGraphConsistency)
michael@0 604 return;
michael@0 605
michael@0 606 for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
michael@0 607 VirtualRegister *reg = &vregs[i];
michael@0 608
michael@0 609 LiveInterval *prev = nullptr;
michael@0 610 for (size_t j = 0; j < reg->numIntervals(); j++) {
michael@0 611 LiveInterval *interval = reg->getInterval(j);
michael@0 612 JS_ASSERT(interval->vreg() == i);
michael@0 613 JS_ASSERT(interval->index() == j);
michael@0 614
michael@0 615 if (interval->numRanges() == 0)
michael@0 616 continue;
michael@0 617
michael@0 618 JS_ASSERT_IF(prev, prev->end() <= interval->start());
michael@0 619 interval->validateRanges();
michael@0 620
michael@0 621 prev = interval;
michael@0 622 }
michael@0 623 }
michael@0 624 #endif
michael@0 625 }
michael@0 626
michael@0 627 #ifdef JS_NUNBOX32
michael@0 628 VREG *otherHalfOfNunbox(VirtualRegister *vreg) {
michael@0 629 signed offset = OffsetToOtherHalfOfNunbox(vreg->type());
michael@0 630 VREG *other = &vregs[vreg->def()->virtualRegister() + offset];
michael@0 631 AssertTypesFormANunbox(vreg->type(), other->type());
michael@0 632 return other;
michael@0 633 }
michael@0 634 #endif
michael@0 635
michael@0 636 bool addMove(LMoveGroup *moves, LiveInterval *from, LiveInterval *to, LDefinition::Type type) {
michael@0 637 JS_ASSERT(*from->getAllocation() != *to->getAllocation());
michael@0 638 return moves->add(from->getAllocation(), to->getAllocation(), type);
michael@0 639 }
michael@0 640
michael@0 641 bool moveInput(CodePosition pos, LiveInterval *from, LiveInterval *to, LDefinition::Type type) {
michael@0 642 if (*from->getAllocation() == *to->getAllocation())
michael@0 643 return true;
michael@0 644 LMoveGroup *moves = getInputMoveGroup(pos);
michael@0 645 return addMove(moves, from, to, type);
michael@0 646 }
michael@0 647
michael@0 648 bool moveAfter(CodePosition pos, LiveInterval *from, LiveInterval *to, LDefinition::Type type) {
michael@0 649 if (*from->getAllocation() == *to->getAllocation())
michael@0 650 return true;
michael@0 651 LMoveGroup *moves = getMoveGroupAfter(pos);
michael@0 652 return addMove(moves, from, to, type);
michael@0 653 }
michael@0 654
michael@0 655 bool moveAtExit(LBlock *block, LiveInterval *from, LiveInterval *to, LDefinition::Type type) {
michael@0 656 if (*from->getAllocation() == *to->getAllocation())
michael@0 657 return true;
michael@0 658 LMoveGroup *moves = block->getExitMoveGroup(alloc());
michael@0 659 return addMove(moves, from, to, type);
michael@0 660 }
michael@0 661
michael@0 662 bool moveAtEntry(LBlock *block, LiveInterval *from, LiveInterval *to, LDefinition::Type type) {
michael@0 663 if (*from->getAllocation() == *to->getAllocation())
michael@0 664 return true;
michael@0 665 LMoveGroup *moves = block->getEntryMoveGroup(alloc());
michael@0 666 return addMove(moves, from, to, type);
michael@0 667 }
michael@0 668
michael@0 669 size_t findFirstNonCallSafepoint(CodePosition from) const
michael@0 670 {
michael@0 671 size_t i = 0;
michael@0 672 for (; i < graph.numNonCallSafepoints(); i++) {
michael@0 673 const LInstruction *ins = graph.getNonCallSafepoint(i);
michael@0 674 if (from <= inputOf(ins))
michael@0 675 break;
michael@0 676 }
michael@0 677 return i;
michael@0 678 }
michael@0 679
michael@0 680 void addLiveRegistersForInterval(VirtualRegister *reg, LiveInterval *interval)
michael@0 681 {
michael@0 682 // Fill in the live register sets for all non-call safepoints.
michael@0 683 LAllocation *a = interval->getAllocation();
michael@0 684 if (!a->isRegister())
michael@0 685 return;
michael@0 686
michael@0 687 // Don't add output registers to the safepoint.
michael@0 688 CodePosition start = interval->start();
michael@0 689 if (interval->index() == 0 && !reg->isTemp()) {
michael@0 690 #ifdef CHECK_OSIPOINT_REGISTERS
michael@0 691 // We don't add the output register to the safepoint,
michael@0 692 // but it still might get added as one of the inputs.
michael@0 693 // So eagerly add this reg to the safepoint clobbered registers.
michael@0 694 if (LSafepoint *safepoint = reg->ins()->safepoint())
michael@0 695 safepoint->addClobberedRegister(a->toRegister());
michael@0 696 #endif
michael@0 697 start = start.next();
michael@0 698 }
michael@0 699
michael@0 700 size_t i = findFirstNonCallSafepoint(start);
michael@0 701 for (; i < graph.numNonCallSafepoints(); i++) {
michael@0 702 LInstruction *ins = graph.getNonCallSafepoint(i);
michael@0 703 CodePosition pos = inputOf(ins);
michael@0 704
michael@0 705 // Safepoints are sorted, so we can shortcut out of this loop
michael@0 706 // if we go out of range.
michael@0 707 if (interval->end() <= pos)
michael@0 708 break;
michael@0 709
michael@0 710 if (!interval->covers(pos))
michael@0 711 continue;
michael@0 712
michael@0 713 LSafepoint *safepoint = ins->safepoint();
michael@0 714 safepoint->addLiveRegister(a->toRegister());
michael@0 715
michael@0 716 #ifdef CHECK_OSIPOINT_REGISTERS
michael@0 717 if (reg->isTemp())
michael@0 718 safepoint->addClobberedRegister(a->toRegister());
michael@0 719 #endif
michael@0 720 }
michael@0 721 }
michael@0 722
michael@0 723 // Finds the first safepoint that is within range of an interval.
michael@0 724 size_t findFirstSafepoint(const LiveInterval *interval, size_t startFrom) const
michael@0 725 {
michael@0 726 size_t i = startFrom;
michael@0 727 for (; i < graph.numSafepoints(); i++) {
michael@0 728 LInstruction *ins = graph.getSafepoint(i);
michael@0 729 if (interval->start() <= inputOf(ins))
michael@0 730 break;
michael@0 731 }
michael@0 732 return i;
michael@0 733 }
michael@0 734 };
michael@0 735
michael@0 736 } // namespace jit
michael@0 737 } // namespace js
michael@0 738
michael@0 739 #endif /* jit_LiveRangeAllocator_h */

mercurial