MoveResolver.h (9924B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef jit_MoveResolver_h 8 #define jit_MoveResolver_h 9 10 #include <algorithm> 11 12 #include "jit/InlineList.h" 13 #include "jit/JitAllocPolicy.h" 14 #include "jit/Registers.h" 15 #include "jit/RegisterSets.h" 16 #include "jit/shared/Assembler-shared.h" 17 18 namespace js { 19 namespace jit { 20 21 class MacroAssembler; 22 23 // This is similar to Operand, but carries more information. We're also not 24 // guaranteed that Operand looks like this on all ISAs. 25 class MoveOperand { 26 public: 27 enum class Kind : uint8_t { 28 // A register in the "integer", aka "general purpose", class. 29 Reg, 30 #ifdef JS_CODEGEN_REGISTER_PAIR 31 // Two consecutive "integer" registers (aka "general purpose"). The even 32 // register contains the lower part, the odd register has the high bits 33 // of the content. 34 RegPair, 35 #endif 36 // A register in the "float" register class. 37 FloatReg, 38 // A memory region. 39 Memory, 40 // The address of a memory region. 41 EffectiveAddress 42 }; 43 44 private: 45 Kind kind_; 46 uint8_t code_; 47 int32_t disp_; 48 49 static_assert(std::max(Registers::Total, FloatRegisters::Total) <= UINT8_MAX, 50 "Any register code must fit in code_"); 51 52 public: 53 MoveOperand() = delete; 54 explicit MoveOperand(Register reg) 55 : kind_(Kind::Reg), code_(reg.code()), disp_(0) {} 56 explicit MoveOperand(FloatRegister reg) 57 : kind_(Kind::FloatReg), code_(reg.code()), disp_(0) {} 58 MoveOperand(Register reg, int32_t disp, Kind kind = Kind::Memory) 59 : kind_(kind), code_(reg.code()), disp_(disp) { 60 MOZ_ASSERT(isMemoryOrEffectiveAddress()); 61 62 // With a zero offset, this is a plain reg-to-reg move. 63 if (disp == 0 && kind_ == Kind::EffectiveAddress) { 64 kind_ = Kind::Reg; 65 } 66 } 67 explicit MoveOperand(const Address& addr, Kind kind = Kind::Memory) 68 : MoveOperand(AsRegister(addr.base), addr.offset, kind) {} 69 MoveOperand(MacroAssembler& masm, const ABIArg& arg); 70 MoveOperand(const MoveOperand& other) = default; 71 bool isFloatReg() const { return kind_ == Kind::FloatReg; } 72 bool isGeneralReg() const { return kind_ == Kind::Reg; } 73 bool isGeneralRegPair() const { 74 #ifdef JS_CODEGEN_REGISTER_PAIR 75 return kind_ == Kind::RegPair; 76 #else 77 return false; 78 #endif 79 } 80 bool isMemory() const { return kind_ == Kind::Memory; } 81 bool isEffectiveAddress() const { return kind_ == Kind::EffectiveAddress; } 82 bool isMemoryOrEffectiveAddress() const { 83 return isMemory() || isEffectiveAddress(); 84 } 85 Register reg() const { 86 MOZ_ASSERT(isGeneralReg()); 87 return Register::FromCode(code_); 88 } 89 Register evenReg() const { 90 MOZ_ASSERT(isGeneralRegPair()); 91 return Register::FromCode(code_); 92 } 93 Register oddReg() const { 94 MOZ_ASSERT(isGeneralRegPair()); 95 return Register::FromCode(code_ + 1); 96 } 97 FloatRegister floatReg() const { 98 MOZ_ASSERT(isFloatReg()); 99 return FloatRegister::FromCode(code_); 100 } 101 Register base() const { 102 MOZ_ASSERT(isMemoryOrEffectiveAddress()); 103 return Register::FromCode(code_); 104 } 105 int32_t disp() const { 106 MOZ_ASSERT(isMemoryOrEffectiveAddress()); 107 return disp_; 108 } 109 110 bool aliases(MoveOperand other) const { 111 // These are not handled presently, but Memory and EffectiveAddress 112 // only appear in controlled circumstances in the trampoline code 113 // which ensures these cases never come up. 114 115 MOZ_ASSERT_IF(isMemoryOrEffectiveAddress() && other.isGeneralReg(), 116 base() != other.reg()); 117 MOZ_ASSERT_IF(other.isMemoryOrEffectiveAddress() && isGeneralReg(), 118 other.base() != reg()); 119 120 // Check if one of the operand is a registe rpair, in which case, we 121 // have to check any other register, or register pair. 122 if (isGeneralRegPair() || other.isGeneralRegPair()) { 123 if (isGeneralRegPair() && other.isGeneralRegPair()) { 124 // Assume that register pairs are aligned on even registers. 125 MOZ_ASSERT(!evenReg().aliases(other.oddReg())); 126 MOZ_ASSERT(!oddReg().aliases(other.evenReg())); 127 // Pair of registers are composed of consecutive registers, thus 128 // if the first registers are aliased, then the second registers 129 // are aliased too. 130 MOZ_ASSERT(evenReg().aliases(other.evenReg()) == 131 oddReg().aliases(other.oddReg())); 132 return evenReg().aliases(other.evenReg()); 133 } else if (other.isGeneralReg()) { 134 MOZ_ASSERT(isGeneralRegPair()); 135 return evenReg().aliases(other.reg()) || oddReg().aliases(other.reg()); 136 } else if (isGeneralReg()) { 137 MOZ_ASSERT(other.isGeneralRegPair()); 138 return other.evenReg().aliases(reg()) || other.oddReg().aliases(reg()); 139 } 140 return false; 141 } 142 143 if (kind_ != other.kind_) { 144 return false; 145 } 146 if (kind_ == Kind::FloatReg) { 147 return floatReg().aliases(other.floatReg()); 148 } 149 if (code_ != other.code_) { 150 return false; 151 } 152 if (isMemoryOrEffectiveAddress()) { 153 return disp_ == other.disp_; 154 } 155 return true; 156 } 157 158 bool operator==(const MoveOperand& other) const { 159 if (kind_ != other.kind_) { 160 return false; 161 } 162 if (code_ != other.code_) { 163 return false; 164 } 165 if (isMemoryOrEffectiveAddress()) { 166 return disp_ == other.disp_; 167 } 168 return true; 169 } 170 bool operator!=(const MoveOperand& other) const { return !operator==(other); } 171 }; 172 173 // This represents a move operation. 174 class MoveOp { 175 protected: 176 MoveOperand from_; 177 MoveOperand to_; 178 int32_t cycleBeginSlot_ = -1; 179 int32_t cycleEndSlot_ = -1; 180 bool cycleBegin_ = false; 181 bool cycleEnd_ = false; 182 183 public: 184 enum Type : uint8_t { GENERAL, INT32, FLOAT32, DOUBLE, SIMD128 }; 185 186 protected: 187 Type type_; 188 189 // If cycleBegin_ is true, endCycleType_ is the type of the move at the end 190 // of the cycle. For example, given these moves: 191 // INT32 move a -> b 192 // GENERAL move b -> a 193 // the move resolver starts by copying b into a temporary location, so that 194 // the last move can read it. This copy needs to use use type GENERAL. 195 Type endCycleType_; 196 197 public: 198 MoveOp() = delete; 199 MoveOp(const MoveOperand& from, const MoveOperand& to, Type type) 200 : from_(from), 201 to_(to), 202 type_(type), 203 endCycleType_(GENERAL) // initialize to silence UBSan warning 204 {} 205 206 bool isCycleBegin() const { return cycleBegin_; } 207 bool isCycleEnd() const { return cycleEnd_; } 208 uint32_t cycleBeginSlot() const { 209 MOZ_ASSERT(cycleBeginSlot_ != -1); 210 return cycleBeginSlot_; 211 } 212 uint32_t cycleEndSlot() const { 213 MOZ_ASSERT(cycleEndSlot_ != -1); 214 return cycleEndSlot_; 215 } 216 const MoveOperand& from() const { return from_; } 217 const MoveOperand& to() const { return to_; } 218 Type type() const { return type_; } 219 Type endCycleType() const { 220 MOZ_ASSERT(isCycleBegin()); 221 return endCycleType_; 222 } 223 bool aliases(const MoveOperand& op) const { 224 return from().aliases(op) || to().aliases(op); 225 } 226 bool aliases(const MoveOp& other) const { 227 return aliases(other.from()) || aliases(other.to()); 228 } 229 #ifdef JS_CODEGEN_ARM 230 void overwrite(MoveOperand& from, MoveOperand& to, Type type) { 231 from_ = from; 232 to_ = to; 233 type_ = type; 234 } 235 #endif 236 }; 237 238 class MoveResolver { 239 private: 240 struct PendingMove : public MoveOp, 241 public TempObject, 242 public InlineListNode<PendingMove> { 243 PendingMove() = delete; 244 245 PendingMove(const MoveOperand& from, const MoveOperand& to, Type type) 246 : MoveOp(from, to, type) {} 247 248 void setCycleBegin(Type endCycleType, int cycleSlot) { 249 MOZ_ASSERT(!cycleBegin_); 250 cycleBegin_ = true; 251 cycleBeginSlot_ = cycleSlot; 252 endCycleType_ = endCycleType; 253 } 254 void setCycleEnd(int cycleSlot) { 255 MOZ_ASSERT(!cycleEnd_); 256 cycleEnd_ = true; 257 cycleEndSlot_ = cycleSlot; 258 } 259 }; 260 261 using PendingMoveIterator = InlineList<MoveResolver::PendingMove>::iterator; 262 263 js::Vector<MoveOp, 16, SystemAllocPolicy> orderedMoves_; 264 int numCycles_; 265 int curCycles_; 266 TempObjectPool<PendingMove> movePool_; 267 268 InlineList<PendingMove> pending_; 269 270 PendingMove* findBlockingMove(const PendingMove* last); 271 PendingMove* findCycledMove(PendingMoveIterator* stack, 272 PendingMoveIterator end, 273 const PendingMove* first); 274 [[nodiscard]] bool addOrderedMove(const MoveOp& move); 275 void reorderMove(size_t from, size_t to); 276 277 // Internal reset function. Does not clear lists. 278 void resetState(); 279 280 #ifdef JS_CODEGEN_ARM 281 bool isDoubleAliasedAsSingle(const MoveOperand& move); 282 #endif 283 284 public: 285 MoveResolver(); 286 287 // Resolves a move group into two lists of ordered moves. These moves must 288 // be executed in the order provided. Some moves may indicate that they 289 // participate in a cycle. For every cycle there are two such moves, and it 290 // is guaranteed that cycles do not nest inside each other in the list. 291 // 292 // After calling addMove() for each parallel move, resolve() performs the 293 // cycle resolution algorithm. Calling addMove() again resets the resolver. 294 [[nodiscard]] bool addMove(const MoveOperand& from, const MoveOperand& to, 295 MoveOp::Type type); 296 [[nodiscard]] bool resolve(); 297 void sortMemoryToMemoryMoves(); 298 299 size_t numMoves() const { return orderedMoves_.length(); } 300 const MoveOp& getMove(size_t i) const { return orderedMoves_[i]; } 301 uint32_t numCycles() const { return numCycles_; } 302 bool hasNoPendingMoves() const { return pending_.empty(); } 303 void setAllocator(TempAllocator& alloc) { movePool_.setAllocator(alloc); } 304 }; 305 306 } // namespace jit 307 } // namespace js 308 309 #endif /* jit_MoveResolver_h */