tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

BacktrackingAllocator.h (34025B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef jit_BacktrackingAllocator_h
      8 #define jit_BacktrackingAllocator_h
      9 
     10 #include "mozilla/Array.h"
     11 #include "mozilla/Atomics.h"
     12 #include "mozilla/Attributes.h"
     13 #include "mozilla/Maybe.h"
     14 
     15 #include "ds/AvlTree.h"
     16 #include "ds/PriorityQueue.h"
     17 #include "jit/RegisterAllocator.h"
     18 #include "jit/SparseBitSet.h"
     19 #include "jit/StackSlotAllocator.h"
     20 
     21 // Gives better traces in Nightly/debug builds (could be EARLY_BETA_OR_EARLIER)
     22 #if defined(NIGHTLY_BUILD) || defined(DEBUG)
     23 #  define AVOID_INLINE_FOR_DEBUGGING MOZ_NEVER_INLINE
     24 #else
     25 #  define AVOID_INLINE_FOR_DEBUGGING
     26 #endif
     27 
     28 // Backtracking priority queue based register allocator based on that described
     29 // in the following blog post:
     30 //
     31 // http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
     32 
     33 namespace js {
     34 namespace jit {
     35 
     36 class Requirement {
     37 public:
     38  enum Kind { NONE, REGISTER, FIXED };
     39 
     40  Requirement() : kind_(NONE) {}
     41 
     42  explicit Requirement(Kind kind) : kind_(kind) {
     43    // FIXED has a dedicated constructor.
     44    MOZ_ASSERT(kind != FIXED);
     45  }
     46 
     47  explicit Requirement(LAllocation fixed) : kind_(FIXED), allocation_(fixed) {
     48    MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
     49  }
     50 
     51  Kind kind() const { return kind_; }
     52 
     53  LAllocation allocation() const {
     54    MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse());
     55    return allocation_;
     56  }
     57 
     58  [[nodiscard]] bool merge(const Requirement& newRequirement) {
     59    // Merge newRequirement with any existing requirement, returning false
     60    // if the new and old requirements conflict.
     61 
     62    if (newRequirement.kind() == Requirement::FIXED) {
     63      if (kind() == Requirement::FIXED) {
     64        return newRequirement.allocation() == allocation();
     65      }
     66      *this = newRequirement;
     67      return true;
     68    }
     69 
     70    MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
     71    if (kind() == Requirement::FIXED) {
     72      return allocation().isAnyRegister();
     73    }
     74 
     75    *this = newRequirement;
     76    return true;
     77  }
     78 
     79 private:
     80  Kind kind_;
     81  LAllocation allocation_;
     82 };
     83 
     84 struct UsePosition : public TempObject,
     85                     public InlineForwardListNode<UsePosition> {
     86 private:
     87  // A UsePosition is an LUse* with a CodePosition.  UsePosition also has an
     88  // optimization that allows access to the associated LUse::Policy without
     89  // dereferencing memory: the policy is encoded in the low bits of the LUse*.
     90  //
     91  // Note however that because LUse* is uintptr_t-aligned, on 32-bit systems
     92  // there are only 4 encodable values, for more than 4 use policies; in that
     93  // case we allocate the common LUse::ANY, LUse::REGISTER, and LUse::FIXED use
     94  // policies to tags, and use tag 0x3 to indicate that dereferencing the LUse
     95  // is necessary to get the policy (KEEPALIVE or STACK, in that case).
     96  uintptr_t use_;
     97  static_assert(LUse::ANY < 0x3,
     98                "LUse::ANY can be represented in low tag on 32-bit systems");
     99  static_assert(LUse::REGISTER < 0x3,
    100                "LUse::REGISTER can be represented in tag on 32-bit systems");
    101  static_assert(LUse::FIXED < 0x3,
    102                "LUse::FIXED can be represented in tag on 32-bit systems");
    103 
    104  static constexpr uintptr_t PolicyMask = sizeof(uintptr_t) - 1;
    105  static constexpr uintptr_t UseMask = ~PolicyMask;
    106 
    107  void setUse(LUse* use) {
    108    // RECOVERED_INPUT is used by snapshots and ignored when building the
    109    // liveness information. Thus we can safely assume that no such value
    110    // would be seen.
    111    MOZ_ASSERT(use->policy() != LUse::RECOVERED_INPUT);
    112 
    113    uintptr_t policyBits = use->policy();
    114 #ifndef JS_64BIT
    115    // On a 32-bit machine, LUse::KEEPALIVE and LUse::STACK are accessed by
    116    // dereferencing the use pointer.
    117    if (policyBits >= PolicyMask) {
    118      policyBits = PolicyMask;
    119    }
    120 #endif
    121    use_ = uintptr_t(use) | policyBits;
    122    MOZ_ASSERT(use->policy() == usePolicy());
    123  }
    124 
    125 public:
    126  CodePosition pos;
    127 
    128  LUse* use() const { return reinterpret_cast<LUse*>(use_ & UseMask); }
    129 
    130  LUse::Policy usePolicy() const {
    131    uintptr_t bits = use_ & PolicyMask;
    132 #ifndef JS_64BIT
    133    // On 32-bit machines, reach out to memory if it's LUse::KEEPALIVE or
    134    // LUse::STACK.
    135    if (bits == PolicyMask) {
    136      return use()->policy();
    137    }
    138 #endif
    139    LUse::Policy policy = LUse::Policy(bits);
    140    MOZ_ASSERT(use()->policy() == policy);
    141    return policy;
    142  }
    143 
    144  UsePosition(LUse* use, CodePosition pos) : pos(pos) {
    145    // Verify that the usedAtStart() flag is consistent with the
    146    // subposition. For now ignore fixed registers, because they
    147    // are handled specially around calls.
    148    MOZ_ASSERT_IF(!use->isFixedRegister(),
    149                  pos.subpos() == (use->usedAtStart() ? CodePosition::INPUT
    150                                                      : CodePosition::OUTPUT));
    151    setUse(use);
    152  }
    153 };
    154 
    155 using UsePositionIterator = InlineForwardListIterator<UsePosition>;
    156 
    157 // Backtracking allocator data structures overview.
    158 //
    159 // LiveRange: A continuous range of positions where a virtual register is live.
    160 // LiveBundle: A set of LiveRanges which do not overlap.
    161 // VirtualRegister: A set of all LiveRanges used for some LDefinition.
    162 //
    163 // The allocator first performs a liveness ananlysis on the LIR graph which
    164 // constructs LiveRanges for each VirtualRegister, determining where the
    165 // registers are live.
    166 //
    167 // The ranges are then bundled together according to heuristics, and placed on
    168 // the allocation queue.
    169 //
    170 // As bundles are removed from the allocation queue, we attempt to find a
    171 // physical register or stack slot allocation for all ranges in the removed
    172 // bundle, possibly evicting already-allocated bundles. See processBundle()
    173 // for details.
    174 //
    175 // If we are not able to allocate a bundle, it is split according to heuristics
    176 // into two or more smaller bundles which cover all the ranges of the original.
    177 // These smaller bundles are then allocated independently.
    178 
    179 class LiveBundle;
    180 class VirtualRegister;
    181 
    182 } /* namespace jit */
    183 
    184 // A VirtualRegister may contain malloced memory but can still be LifoAlloc'ed
    185 // because it is always owned by a BacktrackingAllocator with a destructor that
    186 // destroys it.
    187 template <>
    188 struct CanLifoAlloc<js::jit::VirtualRegister> : std::true_type {};
    189 
    190 namespace jit {
    191 
    192 class LiveRange : public TempObject, public InlineForwardListNode<LiveRange> {
    193 public:
    194  struct Range {
    195    // The beginning of this range, inclusive.
    196    CodePosition from;
    197 
    198    // The end of this range, exclusive.
    199    CodePosition to;
    200 
    201    Range() = default;
    202 
    203    Range(CodePosition from, CodePosition to) : from(from), to(to) {
    204      MOZ_ASSERT(!empty());
    205    }
    206 
    207    bool empty() {
    208      MOZ_ASSERT(from <= to);
    209      return from == to;
    210    }
    211  };
    212 
    213 private:
    214  // The virtual register this range is for, or nullptr if this does not have a
    215  // virtual register (for example, it is in the callRanges bundle).
    216  VirtualRegister* vreg_;
    217 
    218  // The bundle containing this range, null if liveness information is being
    219  // constructed and we haven't started allocating bundles yet.
    220  LiveBundle* bundle_;
    221 
    222  // The code positions in this range.
    223  Range range_;
    224 
    225  // All uses of the virtual register in this range, ordered by location.
    226  InlineForwardList<UsePosition> uses_;
    227 
    228  // Total spill weight that calculate from all the uses' policy. Because the
    229  // use's policy can't be changed after initialization, we can update the
    230  // weight whenever a use is added to or remove from this range. This way, we
    231  // don't need to iterate all the uses every time computeSpillWeight() is
    232  // called.
    233  size_t usesSpillWeight_;
    234 
    235  // Number of uses that have policy LUse::FIXED.
    236  uint32_t numFixedUses_;
    237 
    238  // Whether this range contains the virtual register's definition.
    239  bool hasDefinition_;
    240 
    241  LiveRange(VirtualRegister* vreg, Range range)
    242      : vreg_(vreg),
    243        bundle_(nullptr),
    244        range_(range),
    245        usesSpillWeight_(0),
    246        numFixedUses_(0),
    247        hasDefinition_(false)
    248 
    249  {
    250    MOZ_ASSERT(!range.empty());
    251  }
    252 
    253  void noteAddedUse(UsePosition* use);
    254  void noteRemovedUse(UsePosition* use);
    255 
    256 public:
    257  static LiveRange* FallibleNew(TempAllocator& alloc, VirtualRegister* vreg,
    258                                CodePosition from, CodePosition to) {
    259    return new (alloc.fallible()) LiveRange(vreg, Range(from, to));
    260  }
    261 
    262  VirtualRegister& vreg() const {
    263    MOZ_ASSERT(hasVreg());
    264    return *vreg_;
    265  }
    266  bool hasVreg() const { return vreg_ != nullptr; }
    267 
    268  LiveBundle* bundle() const { return bundle_; }
    269 
    270  CodePosition from() const { return range_.from; }
    271  CodePosition to() const { return range_.to; }
    272  bool covers(CodePosition pos) const { return pos >= from() && pos < to(); }
    273 
    274  // Whether this range wholly contains other.
    275  bool contains(LiveRange* other) const;
    276 
    277  // Intersect this range with other, returning the subranges of this
    278  // that are before, inside, or after other.
    279  void intersect(LiveRange* other, Range* pre, Range* inside,
    280                 Range* post) const;
    281 
    282  // Whether this range has any intersection with other.
    283  bool intersects(LiveRange* other) const;
    284 
    285  UsePositionIterator usesBegin() const { return uses_.begin(); }
    286  UsePosition* lastUse() const { return uses_.back(); }
    287  bool hasUses() const { return !!usesBegin(); }
    288  UsePosition* popUse();
    289 
    290  bool hasDefinition() const { return hasDefinition_; }
    291 
    292  void setFrom(CodePosition from) {
    293    range_.from = from;
    294    MOZ_ASSERT(!range_.empty());
    295  }
    296  void setTo(CodePosition to) {
    297    range_.to = to;
    298    MOZ_ASSERT(!range_.empty());
    299  }
    300 
    301  void setBundle(LiveBundle* bundle) { bundle_ = bundle; }
    302 
    303  void addUse(UsePosition* use);
    304 
    305  void tryToMoveDefAndUsesInto(LiveRange* other);
    306  void moveAllUsesToTheEndOf(LiveRange* other);
    307 
    308  void setHasDefinition() {
    309    MOZ_ASSERT(!hasDefinition_);
    310    hasDefinition_ = true;
    311  }
    312 
    313  size_t usesSpillWeight() { return usesSpillWeight_; }
    314  uint32_t numFixedUses() { return numFixedUses_; }
    315 
    316 #ifdef JS_JITSPEW
    317  // Return a string describing this range.
    318  UniqueChars toString() const;
    319 #endif
    320 
    321  // Comparator for use in AVL trees.
    322  static int compare(LiveRange* v0, LiveRange* v1) {
    323    // The denoted range includes 'from' but excludes 'to'.
    324    if (v0->to() <= v1->from()) {
    325      return -1;
    326    }
    327    if (v0->from() >= v1->to()) {
    328      return 1;
    329    }
    330    return 0;
    331  }
    332 };
    333 
    334 // LiveRangePlus is a simple wrapper around a LiveRange*.  It caches the
    335 // LiveRange*'s `.range_.from` and `.range_.to` CodePositions.  The only
    336 // purpose of this is to avoid some cache misses that would otherwise occur
    337 // when comparing those fields in an AvlTree<LiveRange*, ..>.  This measurably
    338 // speeds up the allocator in some cases.  See bug 1814204.
    339 
    340 class LiveRangePlus {
    341  // The LiveRange we're wrapping.
    342  LiveRange* liveRange_;
    343  // Cached versions of liveRange_->range_.from and lr->range_.to
    344  CodePosition from_;
    345  CodePosition to_;
    346 
    347 public:
    348  explicit LiveRangePlus(LiveRange* lr)
    349      : liveRange_(lr), from_(lr->from()), to_(lr->to()) {}
    350  LiveRangePlus() : liveRange_(nullptr) {}
    351  ~LiveRangePlus() {
    352    MOZ_ASSERT(liveRange_ ? from_ == liveRange_->from()
    353                          : from_ == CodePosition());
    354    MOZ_ASSERT(liveRange_ ? to_ == liveRange_->to() : to_ == CodePosition());
    355  }
    356 
    357  LiveRange* liveRange() const { return liveRange_; }
    358 
    359  // Comparator for use in AVL trees.
    360  static int compare(const LiveRangePlus& lrp0, const LiveRangePlus& lrp1) {
    361    // The denoted range includes 'from' but excludes 'to'.
    362    if (lrp0.to_ <= lrp1.from_) {
    363      return -1;
    364    }
    365    if (lrp0.from_ >= lrp1.to_) {
    366      return 1;
    367    }
    368    return 0;
    369  }
    370 };
    371 
    372 // Make sure there's no alignment holes or vtable present.  Per bug 1814204,
    373 // it's important that this structure is as small as possible.
    374 static_assert(sizeof(LiveRangePlus) ==
    375              sizeof(LiveRange*) + 2 * sizeof(CodePosition));
    376 
    377 // Tracks information about bundles that should all be spilled to the same
    378 // physical location. At the beginning of allocation, each bundle has its own
    379 // spill set. As bundles are split, the new smaller bundles continue to use the
    380 // same spill set.
    381 class SpillSet : public TempObject {
    382  // All bundles with this spill set which have been spilled. All bundles in
    383  // this list will be given the same physical slot.
    384  Vector<LiveBundle*, 1, JitAllocPolicy> list_;
    385 
    386  explicit SpillSet(TempAllocator& alloc) : list_(alloc) {}
    387 
    388 public:
    389  static SpillSet* New(TempAllocator& alloc) {
    390    return new (alloc) SpillSet(alloc);
    391  }
    392 
    393  [[nodiscard]] bool addSpilledBundle(LiveBundle* bundle) {
    394    return list_.append(bundle);
    395  }
    396  size_t numSpilledBundles() const { return list_.length(); }
    397  LiveBundle* spilledBundle(size_t i) const { return list_[i]; }
    398 
    399  void setAllocation(LAllocation alloc);
    400 };
    401 
    402 // A set of live ranges which are all pairwise disjoint. The register allocator
    403 // attempts to find allocations for an entire bundle, and if it fails the
    404 // bundle will be broken into smaller ones which are allocated independently.
    405 class LiveBundle : public TempObject {
    406  // Set to use if this bundle or one it is split into is spilled.
    407  SpillSet* spill_;
    408 
    409  // All the ranges in this set, ordered by location.
    410  InlineForwardList<LiveRange> ranges_;
    411 
    412  // Allocation to use for ranges in this set, bogus if unallocated or spilled
    413  // and not yet given a physical stack slot.
    414  LAllocation alloc_;
    415 
    416  // Bundle which entirely contains this one and has no register uses. This
    417  // may or may not be spilled by the allocator, but it can be spilled and
    418  // will not be split.
    419  LiveBundle* spillParent_;
    420 
    421  // This is used for debug-printing bundles.  It gives them an
    422  // identifiable identity in the debug output, which they otherwise wouldn't
    423  // have.  It's also used for sorting VirtualRegister's live ranges; see the
    424  // comment in VirtualRegister::sortRanges.
    425  const uint32_t id_;
    426 
    427  LiveBundle(SpillSet* spill, LiveBundle* spillParent, uint32_t id)
    428      : spill_(spill), spillParent_(spillParent), id_(id) {}
    429 
    430 public:
    431  static LiveBundle* FallibleNew(TempAllocator& alloc, SpillSet* spill,
    432                                 LiveBundle* spillParent, uint32_t id) {
    433    return new (alloc.fallible()) LiveBundle(spill, spillParent, id);
    434  }
    435 
    436  using RangeIterator = InlineForwardListIterator<LiveRange>;
    437 
    438  SpillSet* spillSet() const { return spill_; }
    439  void setSpillSet(SpillSet* spill) { spill_ = spill; }
    440 
    441  RangeIterator rangesBegin() const { return ranges_.begin(); }
    442  RangeIterator rangesBegin(LiveRange* range) const {
    443    return ranges_.begin(range);
    444  }
    445  bool hasRanges() const { return !!rangesBegin(); }
    446  LiveRange* firstRange() const { return *rangesBegin(); }
    447  LiveRange* lastRange() const { return ranges_.back(); }
    448  LiveRange* rangeFor(CodePosition pos) const;
    449  void removeRange(LiveRange* range);
    450  void removeRangeAndIncrementIterator(RangeIterator& iter) {
    451    ranges_.removeAndIncrement(iter);
    452  }
    453  void removeAllRangesFromVirtualRegisters();
    454  void addRange(LiveRange* range, LiveRange* startAt = nullptr);
    455  void addRangeAtEnd(LiveRange* range);
    456  [[nodiscard]] bool addRangeAtEnd(TempAllocator& alloc, VirtualRegister* vreg,
    457                                   CodePosition from, CodePosition to);
    458  [[nodiscard]] bool addRangeAndDistributeUses(TempAllocator& alloc,
    459                                               LiveRange* oldRange,
    460                                               CodePosition from,
    461                                               CodePosition to);
    462  LiveRange* popFirstRange();
    463 #ifdef DEBUG
    464  size_t numRanges() const;
    465 #endif
    466 
    467  LAllocation allocation() const { return alloc_; }
    468  void setAllocation(LAllocation alloc) { alloc_ = alloc; }
    469 
    470  LiveBundle* spillParent() const { return spillParent_; }
    471 
    472  uint32_t id() const { return id_; }
    473 
    474 #ifdef JS_JITSPEW
    475  // Return a string describing this bundle.
    476  UniqueChars toString() const;
    477 #endif
    478 };
    479 
    480 // Information about a control flow edge to resolve (by inserting a move from
    481 // predecessor range to successor range) in createMoveGroupsForControlFlowEdges.
    482 struct ControlFlowEdge {
    483  // The predecessor and successor sides of this edge.
    484  LBlock* predecessor;
    485  LBlock* successor;
    486 
    487  // Live range that covers the successor block.
    488  LiveRange* successorRange;
    489 
    490  // Exit position of the predecessor block. This is |exitOf(predecessor)| but
    491  // cached here.
    492  CodePosition predecessorExit;
    493 
    494  ControlFlowEdge(LBlock* predecessor, LBlock* successor,
    495                  LiveRange* successorRange, CodePosition predecessorExit)
    496      : predecessor(predecessor),
    497        successor(successor),
    498        successorRange(successorRange),
    499        predecessorExit(predecessorExit) {
    500    MOZ_ASSERT(predecessor != successor);
    501  }
    502 };
    503 using ControlFlowEdgeVector =
    504    Vector<ControlFlowEdge, 8, BackgroundSystemAllocPolicy>;
    505 
    506 // Information about the allocation for a virtual register.
    507 class VirtualRegister {
    508 public:
    509  // Note: most virtual registers have <= 4 live ranges (at least 95% on x64 for
    510  // a few very large Wasm modules).
    511  using RangeVector = Vector<LiveRange*, 4, BackgroundSystemAllocPolicy>;
    512  class RangeIterator;
    513 
    514 private:
    515  // Instruction which defines this register.
    516  LNode* ins_ = nullptr;
    517 
    518  // Definition in the instruction for this register.
    519  LDefinition* def_ = nullptr;
    520 
    521  // All live ranges for this register. These may overlap each other.
    522  // If |rangesSorted_| is true, then these are ordered by their start position
    523  // in descending order.
    524  RangeVector ranges_;
    525 
    526  // Whether def_ is a temp or an output.
    527  bool isTemp_ = false;
    528 
    529  // Whether this vreg is an input for some phi. This use is not reflected in
    530  // any range on the vreg.
    531  bool usedByPhi_ = false;
    532 
    533  // If this register's definition is MUST_REUSE_INPUT, whether a copy must
    534  // be introduced before the definition that relaxes the policy.
    535  bool mustCopyInput_ = false;
    536 
    537  // If true, the |ranges_| vector is guaranteed to be sorted.
    538  bool rangesSorted_ = true;
    539 
    540  void operator=(const VirtualRegister&) = delete;
    541  VirtualRegister(const VirtualRegister&) = delete;
    542 
    543 #ifdef DEBUG
    544  void assertRangesSorted() const;
    545 #else
    546  void assertRangesSorted() const {}
    547 #endif
    548 
    549  const RangeVector& sortedRanges() const {
    550    assertRangesSorted();
    551    return ranges_;
    552  }
    553 
    554 public:
    555  VirtualRegister() = default;
    556 
    557  void init(LNode* ins, LDefinition* def, bool isTemp) {
    558    MOZ_ASSERT(!ins_);
    559    ins_ = ins;
    560    def_ = def;
    561    isTemp_ = isTemp;
    562  }
    563 
    564  LNode* ins() const { return ins_; }
    565  LDefinition* def() const { return def_; }
    566  LDefinition::Type type() const { return def()->type(); }
    567  uint32_t vreg() const { return def()->virtualRegister(); }
    568  bool isCompatible(const AnyRegister& r) const {
    569    return def_->isCompatibleReg(r);
    570  }
    571  bool isCompatible(const VirtualRegister& vr) const {
    572    return def_->isCompatibleDef(*vr.def_);
    573  }
    574  bool isTemp() const { return isTemp_; }
    575 
    576  void setUsedByPhi() { usedByPhi_ = true; }
    577  bool usedByPhi() { return usedByPhi_; }
    578 
    579  void setMustCopyInput() { mustCopyInput_ = true; }
    580  bool mustCopyInput() { return mustCopyInput_; }
    581 
    582  bool hasRanges() const { return !ranges_.empty(); }
    583  LiveRange* firstRange() const {
    584    assertRangesSorted();
    585    return ranges_.back();
    586  }
    587  LiveRange* lastRange() const {
    588    assertRangesSorted();
    589    return ranges_[0];
    590  }
    591  LiveRange* rangeFor(CodePosition pos, bool preferRegister = false) const;
    592  void sortRanges();
    593 
    594  void removeFirstRange(RangeIterator& iter);
    595  void removeRangesForBundle(LiveBundle* bundle);
    596  template <typename Pred>
    597  void removeRangesIf(Pred&& pred);
    598 
    599  [[nodiscard]] bool replaceLastRangeLinear(LiveRange* old, LiveRange* newPre,
    600                                            LiveRange* newPost);
    601 
    602  [[nodiscard]] bool addRange(LiveRange* range);
    603 
    604  LiveBundle* firstBundle() const { return firstRange()->bundle(); }
    605 
    606  [[nodiscard]] bool addInitialRange(TempAllocator& alloc, CodePosition from,
    607                                     CodePosition to);
    608  void addInitialUse(UsePosition* use);
    609  void setInitialDefinition(CodePosition from);
    610 
    611  // Iterator visiting a VirtualRegister's live ranges in order of increasing
    612  // start position. Because the ranges are sorted in descending order, this
    613  // iterates over the vector from index |length - 1| to 0.
    614  class MOZ_RAII RangeIterator {
    615    const RangeVector& ranges_;
    616 #ifdef DEBUG
    617    const VirtualRegister& reg_;
    618 #endif
    619    // if |pos_| is 0, the iterator is done. Else, |pos_ - 1| is the index of
    620    // the range that will be returned by |*iter|.
    621    size_t pos_;
    622 
    623   public:
    624    explicit RangeIterator(const VirtualRegister& reg)
    625        : ranges_(reg.sortedRanges()),
    626 #ifdef DEBUG
    627          reg_(reg),
    628 #endif
    629          pos_(ranges_.length()) {
    630    }
    631    RangeIterator(const VirtualRegister& reg, size_t index)
    632        : ranges_(reg.sortedRanges()),
    633 #ifdef DEBUG
    634          reg_(reg),
    635 #endif
    636          pos_(index + 1) {
    637      MOZ_ASSERT(index < ranges_.length());
    638    }
    639 
    640 #ifdef DEBUG
    641    ~RangeIterator() {
    642      // Ranges should stay sorted during iteration.
    643      reg_.assertRangesSorted();
    644    }
    645 #endif
    646 
    647    RangeIterator(RangeIterator&) = delete;
    648    void operator=(RangeIterator&) = delete;
    649 
    650    bool done() const { return pos_ == 0; }
    651 
    652    explicit operator bool() const { return !done(); }
    653 
    654    LiveRange* operator*() const {
    655      MOZ_ASSERT(!done());
    656      return ranges_[pos_ - 1];
    657    }
    658    LiveRange* operator->() { return operator*(); }
    659 
    660    size_t index() const {
    661      MOZ_ASSERT(!done());
    662      return pos_ - 1;
    663    }
    664 
    665    void operator++(int) {
    666      MOZ_ASSERT(!done());
    667      pos_--;
    668    }
    669  };
    670 };
    671 
    672 // A sequence of code positions, for tellings BacktrackingAllocator::splitAt
    673 // where to split.
    674 using SplitPositionVector =
    675    js::Vector<CodePosition, 4, BackgroundSystemAllocPolicy>;
    676 
    677 class MOZ_STACK_CLASS BacktrackingAllocator : protected RegisterAllocator {
    678 public:
    679  using IsStackAllocated = std::true_type;
    680 
    681 private:
    682  friend class GraphSpewer;
    683 
    684  // Computed data
    685  InstructionDataMap insData;
    686  Vector<CodePosition, 12, SystemAllocPolicy> entryPositions;
    687  Vector<CodePosition, 12, SystemAllocPolicy> exitPositions;
    688 
    689  // ~BacktrackingAllocator will call ~VirtualRegBitSet and ~VirtualRegister for
    690  // everything in these collections.
    691  using VirtualRegBitSet =
    692      SparseBitSet<BackgroundSystemAllocPolicy, BacktrackingAllocator>;
    693  Vector<VirtualRegBitSet, 0, JitAllocPolicy> liveIn;
    694  Vector<VirtualRegister, 0, JitAllocPolicy> vregs;
    695 
    696  // Allocation state.
    697  StackSlotAllocator stackSlotAllocator;
    698 
    699  // List of all instructions with a safepoint. The order is the same as the
    700  // order of the instructions in the LIR graph.
    701  Vector<LInstruction*, 0, JitAllocPolicy> safepoints_;
    702 
    703  // List of all non-call instructions with a safepoint. The order is the same
    704  // as the order of the instructions in the LIR graph.
    705  Vector<LInstruction*, 0, JitAllocPolicy> nonCallSafepoints_;
    706 
    707  // Priority queue element: a bundle and the associated priority.
    708  struct QueueItem {
    709    LiveBundle* bundle;
    710 
    711    QueueItem(LiveBundle* bundle, size_t priority)
    712        : bundle(bundle), priority_(priority) {}
    713 
    714    static bool higherPriority(const QueueItem& a, const QueueItem& b) {
    715      return a.priority_ > b.priority_;
    716    }
    717 
    718   private:
    719    size_t priority_;
    720  };
    721 
    722  PriorityQueue<QueueItem, QueueItem, 0, BackgroundSystemAllocPolicy>
    723      allocationQueue;
    724 
    725  // This is a set of LiveRange.  They must be non-overlapping.  Attempts
    726  // to add an overlapping range will cause AvlTree::insert to MOZ_CRASH().
    727  using LiveRangeSet = AvlTree<LiveRange*, LiveRange>;
    728 
    729  // The same, but for LiveRangePlus.  See comments on LiveRangePlus.
    730  using LiveRangePlusSet = AvlTree<LiveRangePlus, LiveRangePlus>;
    731 
    732  // Each physical register is associated with the set of ranges over which
    733  // that register is currently allocated.
    734  struct PhysicalRegister {
    735    bool allocatable;
    736    AnyRegister reg;
    737    LiveRangePlusSet allocations;
    738 
    739    PhysicalRegister() : allocatable(false) {}
    740  };
    741  mozilla::Array<PhysicalRegister, AnyRegister::Total> registers;
    742 
    743  // Ranges of code which are considered to be hot, for which good allocation
    744  // should be prioritized.
    745  LiveRangeSet hotcode;
    746 
    747  // Output positions of all call instructions (where all registers must be
    748  // spilled). This vector is sorted in ascending order and doesn't contain
    749  // duplicate values.
    750  Vector<CodePosition, 16, BackgroundSystemAllocPolicy> callPositions;
    751 
    752  // Information about an allocated stack slot.
    753  struct SpillSlot : public TempObject,
    754                     public InlineForwardListNode<SpillSlot> {
    755    LStackSlot alloc;
    756    LiveRangePlusSet allocated;
    757 
    758    SpillSlot(uint32_t slot, LStackSlot::Width width, LifoAlloc* alloc)
    759        : alloc(slot, width), allocated(alloc) {}
    760  };
    761  using SpillSlotList = InlineForwardList<SpillSlot>;
    762 
    763  // All allocated slots of each width.
    764  SpillSlotList normalSlots, doubleSlots, quadSlots;
    765 
    766  Vector<LiveBundle*, 4, BackgroundSystemAllocPolicy> spilledBundles;
    767 
    768  // The bundle id that will be used for the next LiveBundle that's allocated.
    769  uint32_t nextBundleId_ = 0;
    770 
    771  using LiveBundleVector = Vector<LiveBundle*, 4, BackgroundSystemAllocPolicy>;
    772 
    773  // Misc accessors
    774  bool compilingWasm() { return mir->outerInfo().compilingWasm(); }
    775  VirtualRegister& vreg(const LDefinition* def) {
    776    return vregs[def->virtualRegister()];
    777  }
    778  VirtualRegister& vreg(const LAllocation* alloc) {
    779    MOZ_ASSERT(alloc->isUse());
    780    return vregs[alloc->toUse()->virtualRegister()];
    781  }
    782 
    783  uint32_t getNextBundleId() { return nextBundleId_++; }
    784 
    785  CodePosition entryOf(const LBlock* block) {
    786    return entryPositions[block->mir()->id()];
    787  }
    788  CodePosition exitOf(const LBlock* block) {
    789    return exitPositions[block->mir()->id()];
    790  }
    791 
    792  // Atomic group helper.  See comments in BacktrackingAllocator.cpp.
    793  CodePosition minimalDefEnd(LNode* ins) const;
    794 
    795  // Helpers for creating and adding MoveGroups
    796  [[nodiscard]] bool addMove(LMoveGroup* moves, LiveRange* from, LiveRange* to,
    797                             LDefinition::Type type) {
    798    LAllocation fromAlloc = from->bundle()->allocation();
    799    LAllocation toAlloc = to->bundle()->allocation();
    800    MOZ_ASSERT(fromAlloc != toAlloc);
    801    return moves->add(fromAlloc, toAlloc, type);
    802  }
    803 
    804  [[nodiscard]] bool moveInput(LInstruction* ins, LiveRange* from,
    805                               LiveRange* to, LDefinition::Type type) {
    806    if (from->bundle()->allocation() == to->bundle()->allocation()) {
    807      return true;
    808    }
    809    LMoveGroup* moves = getInputMoveGroup(ins);
    810    return addMove(moves, from, to, type);
    811  }
    812 
    813  [[nodiscard]] bool moveAfter(LInstruction* ins, LiveRange* from,
    814                               LiveRange* to, LDefinition::Type type) {
    815    if (from->bundle()->allocation() == to->bundle()->allocation()) {
    816      return true;
    817    }
    818    LMoveGroup* moves = getMoveGroupAfter(ins);
    819    return addMove(moves, from, to, type);
    820  }
    821 
    822  [[nodiscard]] bool moveAtExit(LBlock* block, LiveRange* from, LiveRange* to,
    823                                LDefinition::Type type) {
    824    if (from->bundle()->allocation() == to->bundle()->allocation()) {
    825      return true;
    826    }
    827    LMoveGroup* moves = block->getExitMoveGroup(alloc());
    828    return addMove(moves, from, to, type);
    829  }
    830 
    831  [[nodiscard]] bool moveAtEntry(LBlock* block, LiveRange* from, LiveRange* to,
    832                                 LDefinition::Type type) {
    833    if (from->bundle()->allocation() == to->bundle()->allocation()) {
    834      return true;
    835    }
    836    LMoveGroup* moves = block->getEntryMoveGroup(alloc());
    837    return addMove(moves, from, to, type);
    838  }
    839 
    840  // Out-of-line methods, in the same sequence as in BacktrackingAllocator.cpp.
    841 
    842  // Misc helpers: queries about uses
    843  bool isReusedInput(LUse* use, LNode* ins, bool considerCopy);
    844  bool isRegisterUse(UsePosition* use, LNode* ins, bool considerCopy = false);
    845  bool isRegisterDefinition(LiveRange* range);
    846 
    847  // Misc helpers: atomic LIR groups
    848  // (these are all in the parent class, RegisterAllocator)
    849 
    850  // Misc helpers: computation of bundle priorities and spill weights
    851  size_t computePriority(LiveBundle* bundle);
    852  bool minimalDef(LiveRange* range, LNode* ins);
    853  bool minimalUse(LiveRange* range, UsePosition* use);
    854  bool minimalBundle(LiveBundle* bundle, bool* pfixed = nullptr);
    855  size_t computeSpillWeight(LiveBundle* bundle);
    856  size_t maximumSpillWeight(const LiveBundleVector& bundles);
    857 
    858  // Initialization of the allocator
    859  [[nodiscard]] bool init();
    860 
    861  // Liveness analysis
    862  [[nodiscard]] bool addInitialFixedRange(AnyRegister reg, CodePosition from,
    863                                          CodePosition to);
    864  [[nodiscard]] bool buildLivenessInfo();
    865 
    866  // Call positions.
    867  mozilla::Maybe<size_t> lookupFirstCallPositionInRange(CodePosition from,
    868                                                        CodePosition to);
    869 
    870  // Merging and queueing of LiveRange groups
    871  void tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1);
    872  void allocateStackDefinition(VirtualRegister& reg);
    873  [[nodiscard]] bool tryMergeReusedRegister(VirtualRegister& def,
    874                                            VirtualRegister& input);
    875  [[nodiscard]] bool mergeAndQueueRegisters();
    876 
    877  // Implementation of splitting decisions, but not the making of those
    878  // decisions
    879  [[nodiscard]] bool updateVirtualRegisterListsThenRequeueBundles(
    880      LiveBundle* bundle, const LiveBundleVector& newBundles);
    881 
    882  // Implementation of splitting decisions, but not the making of those
    883  // decisions
    884  [[nodiscard]] bool splitAt(LiveBundle* bundle,
    885                             const SplitPositionVector& splitPositions);
    886 
    887  // Creation of splitting decisions, but not their implementation
    888  [[nodiscard]] bool splitAcrossCalls(LiveBundle* bundle);
    889  [[nodiscard]] bool trySplitAcrossHotcode(LiveBundle* bundle, bool* success);
    890  [[nodiscard]] bool trySplitAfterLastRegisterUse(LiveBundle* bundle,
    891                                                  LiveBundle* conflict,
    892                                                  bool* success);
    893  [[nodiscard]] bool trySplitBeforeFirstRegisterUse(LiveBundle* bundle,
    894                                                    LiveBundle* conflict,
    895                                                    bool* success);
    896 
    897  // The top level driver for the splitting machinery
    898  [[nodiscard]] bool chooseBundleSplit(LiveBundle* bundle, bool hasCall,
    899                                       LiveBundle* conflict);
    900 
    901  // Bundle allocation
    902  [[nodiscard]] bool computeRequirement(LiveBundle* bundle,
    903                                        Requirement* prequirement,
    904                                        Requirement* phint);
    905  [[nodiscard]] bool tryAllocateRegister(PhysicalRegister& r,
    906                                         LiveBundle* bundle, bool* success,
    907                                         bool* hasCall,
    908                                         LiveBundleVector& conflicting);
    909  [[nodiscard]] bool tryAllocateAnyRegister(LiveBundle* bundle, bool* success,
    910                                            bool* hasCall,
    911                                            LiveBundleVector& conflicting);
    912  [[nodiscard]] bool evictBundle(LiveBundle* bundle);
    913  [[nodiscard]] bool tryAllocateFixed(LiveBundle* bundle,
    914                                      Requirement requirement, bool* success,
    915                                      bool* hasCall,
    916                                      LiveBundleVector& conflicting);
    917  [[nodiscard]] bool tryAllocateNonFixed(LiveBundle* bundle,
    918                                         Requirement requirement,
    919                                         Requirement hint, bool* success,
    920                                         bool* hasCall,
    921                                         LiveBundleVector& conflicting);
    922  [[nodiscard]] bool processBundle(const MIRGenerator* mir, LiveBundle* bundle);
    923  [[nodiscard]] bool spill(LiveBundle* bundle);
    924  [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool
    925  tryAllocatingRegistersForSpillBundles();
    926 
    927  // Rewriting of the LIR after bundle processing is done
    928  [[nodiscard]] bool insertAllRanges(LiveRangePlusSet& set, LiveBundle* bundle);
    929  void sortVirtualRegisterRanges();
    930  [[nodiscard]] bool pickStackSlot(SpillSet* spill);
    931  [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool pickStackSlots();
    932  [[nodiscard]] bool moveAtEdge(LBlock* predecessor, LBlock* successor,
    933                                LiveRange* from, LiveRange* to,
    934                                LDefinition::Type type);
    935  void removeDeadRanges(VirtualRegister& reg);
    936  [[nodiscard]] bool createMoveGroupsForControlFlowEdges(
    937      const VirtualRegister& reg, const ControlFlowEdgeVector& edges);
    938  [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool
    939  createMoveGroupsFromLiveRangeTransitions();
    940  size_t findFirstNonCallSafepoint(CodePosition pos, size_t startFrom);
    941  void addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range,
    942                                size_t* firstNonCallSafepoint);
    943  [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool installAllocationsInLIR();
    944  size_t findFirstSafepoint(CodePosition pos, size_t startFrom);
    945  [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool populateSafepoints();
    946  [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool annotateMoveGroups();
    947 
    948  // Debug-printing support
    949 #ifdef JS_JITSPEW
    950  void dumpLiveRangesByVReg(const char* who);
    951  void dumpLiveRangesByBundle(const char* who);
    952  void dumpAllocations();
    953 #endif
    954 
    955  // Top level of the register allocation machinery, and the only externally
    956  // visible bit.
    957 public:
    958  BacktrackingAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph)
    959      : RegisterAllocator(mir, lir, graph),
    960        liveIn(mir->alloc()),
    961        vregs(mir->alloc()),
    962        safepoints_(mir->alloc()),
    963        nonCallSafepoints_(mir->alloc()) {}
    964 
    965  [[nodiscard]] bool go();
    966 };
    967 
    968 }  // namespace jit
    969 }  // namespace js
    970 
    971 #endif /* jit_BacktrackingAllocator_h */