Lowering-shared.h (16572B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef jit_shared_Lowering_shared_h 8 #define jit_shared_Lowering_shared_h 9 10 // This file declares the structures that are used for attaching LIR to a 11 // MIRGraph. 12 13 #include "jit/LIR.h" 14 #include "jit/MIRGenerator.h" 15 16 namespace js { 17 namespace jit { 18 19 class MIRGenerator; 20 class MIRGraph; 21 class MDefinition; 22 class MInstruction; 23 class LOsiPoint; 24 25 class LIRGeneratorShared { 26 protected: 27 MIRGenerator* gen; 28 MIRGraph& graph; 29 LIRGraph& lirGraph_; 30 LBlock* current; 31 MResumePoint* lastResumePoint_; 32 LRecoverInfo* cachedRecoverInfo_; 33 LOsiPoint* osiPoint_; 34 35 LIRGeneratorShared(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph) 36 : gen(gen), 37 graph(graph), 38 lirGraph_(lirGraph), 39 current(nullptr), 40 lastResumePoint_(nullptr), 41 cachedRecoverInfo_(nullptr), 42 osiPoint_(nullptr) {} 43 44 MIRGenerator* mir() { return gen; } 45 46 // Abort errors are caught at end of visitInstruction. It is possible for 47 // multiple errors to be detected before the end of visitInstruction. In 48 // this case, we only report the first back to the MIRGenerator. 49 bool errored() { return gen->getOffThreadStatus().isErr(); } 50 void abort(AbortReason r, const char* message, ...) MOZ_FORMAT_PRINTF(3, 4) { 51 if (errored()) { 52 return; 53 } 54 55 va_list ap; 56 va_start(ap, message); 57 auto reason_ = gen->abortFmt(r, message, ap); 58 va_end(ap); 59 gen->setOffThreadStatus(reason_); 60 } 61 void abort(AbortReason r) { 62 if (errored()) { 63 return; 64 } 65 66 auto reason_ = gen->abort(r); 67 gen->setOffThreadStatus(reason_); 68 } 69 70 static void ReorderCommutative(MDefinition** lhsp, MDefinition** rhsp, 71 MInstruction* ins); 72 static bool ShouldReorderCommutative(MDefinition* lhs, MDefinition* rhs, 73 MInstruction* ins); 74 75 // A backend can decide that an instruction should be emitted at its uses, 76 // rather than at its definition. To communicate this, set the 77 // instruction's virtual register set to 0. When using the instruction, 78 // its virtual register is temporarily reassigned. To know to clear it 79 // after constructing the use information, the worklist bit is temporarily 80 // unset. 81 // 82 // The backend can use the worklist bit to determine whether or not a 83 // definition should be created. 84 inline void emitAtUses(MInstruction* mir); 85 86 // The lowest-level calls to use, those that do not wrap another call to 87 // use(), must prefix grabbing virtual register IDs by these calls. 88 inline void ensureDefined(MDefinition* mir); 89 90 void visitEmittedAtUses(MInstruction* ins); 91 92 // These all create a use of a virtual register, with an optional 93 // allocation policy. 94 // 95 // Some of these use functions have atStart variants. 96 // - non-atStart variants will tell the register allocator that the input 97 // allocation must be different from any Temp or Definition also needed for 98 // this LInstruction. 99 // - atStart variants relax that restriction and allow the input to be in 100 // the same register as any output Definition (but not Temps) used by the 101 // LInstruction. Note that it doesn't *imply* this will actually happen, 102 // but gives a hint to the register allocator that it can do it. 103 // 104 // TL;DR: Use non-atStart variants only if you need the input value after 105 // writing to any definitions (excluding temps), during code generation of 106 // this LInstruction. Otherwise, use atStart variants, which will lower 107 // register pressure. 108 // 109 // There is an additional constraint. Consider a MIR node with two 110 // MDefinition* operands, op1 and op2. If the node reuses the register of op1 111 // for its output then op1 must be used as atStart. Then, if op1 and op2 112 // represent the same LIR node then op2 must be an atStart use too; otherwise 113 // op2 must be a non-atStart use. There is however not always a 1-1 mapping 114 // from MDefinition* to LNode*, so to determine whether two MDefinition* map 115 // to the same LNode*, ALWAYS go via the willHaveDifferentLIRNodes() 116 // predicate. Do not use pointer equality on the MIR nodes. 117 // 118 // Do not add other conditions when using willHaveDifferentLIRNodes(). The 119 // predicate is the source of truth about whether to use atStart or not, no 120 // other conditions may apply in contexts when it is appropriate to use it. 121 inline LUse use(MDefinition* mir, LUse policy); 122 inline LUse use(MDefinition* mir); 123 inline LUse useAtStart(MDefinition* mir); 124 inline LUse useRegister(MDefinition* mir); 125 inline LUse useRegisterAtStart(MDefinition* mir); 126 inline LUse useFixed(MDefinition* mir, Register reg); 127 inline LUse useFixed(MDefinition* mir, FloatRegister reg); 128 inline LUse useFixed(MDefinition* mir, AnyRegister reg); 129 inline LUse useFixedAtStart(MDefinition* mir, Register reg); 130 inline LUse useFixedAtStart(MDefinition* mir, AnyRegister reg); 131 inline LAllocation useOrConstant(MDefinition* mir); 132 inline LAllocation useOrConstantAtStart(MDefinition* mir); 133 // "Any" is architecture dependent, and will include registers and stack 134 // slots on X86, and only registers on ARM. 135 inline LAllocation useAny(MDefinition* mir); 136 inline LAllocation useAnyAtStart(MDefinition* mir); 137 inline LAllocation useAnyOrConstant(MDefinition* mir); 138 // "Storable" is architecture dependend, and will include registers and 139 // constants on X86 and only registers on ARM. This is a generic "things 140 // we can expect to write into memory in 1 instruction". 141 inline LAllocation useStorable(MDefinition* mir); 142 inline LAllocation useStorableAtStart(MDefinition* mir); 143 inline LAllocation useKeepalive(MDefinition* mir); 144 inline LAllocation useKeepaliveOrConstant(MDefinition* mir); 145 inline LAllocation useRegisterOrConstant(MDefinition* mir); 146 inline LAllocation useRegisterOrConstantAtStart(MDefinition* mir); 147 inline LAllocation useRegisterOrZeroAtStart(MDefinition* mir); 148 inline LAllocation useRegisterOrZero(MDefinition* mir); 149 inline LAllocation useRegisterOrNonDoubleConstant(MDefinition* mir); 150 151 // These methods accept either an Int32 or IntPtr value. A constant is used if 152 // the value fits in an int32. 153 inline LAllocation useRegisterOrInt32Constant(MDefinition* mir); 154 inline LAllocation useAnyOrInt32Constant(MDefinition* mir); 155 156 // Like useRegisterOrInt32Constant, but uses a constant only if 157 // |int32val * Scalar::byteSize(type)| doesn't overflow int32. 158 LAllocation useRegisterOrIndexConstant(MDefinition* mir, Scalar::Type type); 159 160 inline LUse useRegisterForTypedLoad(MDefinition* mir, MIRType type); 161 162 #ifdef JS_NUNBOX32 163 inline LUse useType(MDefinition* mir, LUse::Policy policy); 164 inline LUse usePayload(MDefinition* mir, LUse::Policy policy); 165 inline LUse usePayloadAtStart(MDefinition* mir, LUse::Policy policy); 166 inline LUse usePayloadInRegisterAtStart(MDefinition* mir); 167 168 // Adds a box input to an instruction, setting operand |n| to the type and 169 // |n+1| to the payload. Does not modify the operands, instead expecting a 170 // policy to already be set. 171 inline void fillBoxUses(LInstruction* lir, size_t n, MDefinition* mir); 172 #endif 173 174 // Test whether mir1 and mir2 may give rise to different LIR nodes even if 175 // mir1 == mir2; use it to guide the selection of the use directive for one of 176 // the nodes in the context of a reused input. See comments above about why 177 // it's important to use this predicate and not pointer equality. 178 // 179 // This predicate may be called before or after the application of a use 180 // directive to the first of the nodes, but it is meaningless to call it after 181 // the application of a directive to the second node. 182 inline bool willHaveDifferentLIRNodes(MDefinition* mir1, MDefinition* mir2); 183 184 // These create temporary register requests. 185 inline LDefinition temp(LDefinition::Type type = LDefinition::GENERAL, 186 LDefinition::Policy policy = LDefinition::REGISTER); 187 inline LInt64Definition tempInt64( 188 LDefinition::Policy policy = LDefinition::REGISTER); 189 inline LDefinition tempFloat32(); 190 inline LDefinition tempDouble(); 191 #ifdef ENABLE_WASM_SIMD 192 inline LDefinition tempSimd128(); 193 #endif 194 inline LDefinition tempCopy(MDefinition* input, uint32_t reusedInput); 195 196 // Note that the fixed register has a GENERAL type, 197 // unless the arg is of FloatRegister type 198 inline LDefinition tempFixed(Register reg); 199 inline LDefinition tempFixed(FloatRegister reg); 200 inline LInt64Definition tempInt64Fixed(Register64 reg); 201 202 template <size_t Ops, size_t Temps> 203 inline void defineFixed(LInstructionHelper<1, Ops, Temps>* lir, 204 MDefinition* mir, const LAllocation& output); 205 206 template <size_t Temps> 207 inline void defineBox( 208 details::LInstructionFixedDefsTempsHelper<BOX_PIECES, Temps>* lir, 209 MDefinition* mir, LDefinition::Policy policy = LDefinition::REGISTER); 210 211 template <size_t Ops, size_t Temps> 212 inline void defineInt64(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, 213 MDefinition* mir, 214 LDefinition::Policy policy = LDefinition::REGISTER); 215 216 template <size_t Ops, size_t Temps> 217 inline void defineInt64Fixed( 218 LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir, 219 const LInt64Allocation& output); 220 221 inline void defineReturn(LInstruction* lir, MDefinition* mir); 222 223 template <size_t X> 224 inline void define(details::LInstructionFixedDefsTempsHelper<1, X>* lir, 225 MDefinition* mir, 226 LDefinition::Policy policy = LDefinition::REGISTER); 227 template <size_t X> 228 inline void define(details::LInstructionFixedDefsTempsHelper<1, X>* lir, 229 MDefinition* mir, const LDefinition& def); 230 231 template <size_t Ops, size_t Temps> 232 inline void defineReuseInput(LInstructionHelper<1, Ops, Temps>* lir, 233 MDefinition* mir, uint32_t operand); 234 235 template <size_t Ops, size_t Temps> 236 inline void defineBoxReuseInput( 237 LInstructionHelper<BOX_PIECES, Ops, Temps>* lir, MDefinition* mir, 238 uint32_t operand); 239 240 template <size_t Ops, size_t Temps> 241 inline void defineInt64ReuseInput( 242 LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir, 243 uint32_t operand); 244 245 // Returns a box allocation for a Value-typed instruction. 246 inline LBoxAllocation useBox(MDefinition* mir, 247 LUse::Policy policy = LUse::REGISTER, 248 bool useAtStart = false); 249 250 // Returns a box allocation. The use is either typed, a Value, or 251 // a constant (if useConstant is true). 252 inline LBoxAllocation useBoxOrTypedOrConstant(MDefinition* mir, 253 bool useConstant, 254 bool useAtStart = false); 255 inline LBoxAllocation useBoxOrTyped(MDefinition* mir, 256 bool useAtStart = false); 257 258 // Returns an int64 allocation for an Int64-typed instruction. 259 inline LInt64Allocation useInt64(MDefinition* mir, LUse::Policy policy, 260 bool useAtStart = false); 261 inline LInt64Allocation useInt64(MDefinition* mir, bool useAtStart = false); 262 inline LInt64Allocation useInt64AtStart(MDefinition* mir); 263 inline LInt64Allocation useInt64OrConstant(MDefinition* mir, 264 bool useAtStart = false); 265 inline LInt64Allocation useInt64Register(MDefinition* mir, 266 bool useAtStart = false); 267 inline LInt64Allocation useInt64RegisterOrConstant(MDefinition* mir, 268 bool useAtStart = false); 269 inline LInt64Allocation useInt64Fixed(MDefinition* mir, Register64 regs, 270 bool useAtStart = false); 271 inline LInt64Allocation useInt64FixedAtStart(MDefinition* mir, 272 Register64 regs); 273 274 inline LInt64Allocation useInt64RegisterAtStart(MDefinition* mir); 275 inline LInt64Allocation useInt64RegisterOrConstantAtStart(MDefinition* mir); 276 inline LInt64Allocation useInt64OrConstantAtStart(MDefinition* mir); 277 278 #ifdef JS_NUNBOX32 279 // Returns a non-int64 allocation for an Int64-typed instruction. 280 inline LUse useLowWord(MDefinition* mir, LUse policy); 281 inline LUse useLowWordRegister(MDefinition* mir); 282 inline LUse useLowWordRegisterAtStart(MDefinition* mir); 283 inline LUse useLowWordFixed(MDefinition* mir, Register reg); 284 #endif 285 286 // Rather than defining a new virtual register, sets |ins| to have the same 287 // virtual register as |as|. 288 inline void redefine(MDefinition* ins, MDefinition* as); 289 290 template <typename LClass, typename... Args> 291 inline LClass* allocateVariadic(uint32_t numOperands, Args&&... args); 292 293 TempAllocator& alloc() const { return graph.alloc(); } 294 295 uint32_t getVirtualRegister() { 296 uint32_t vreg = lirGraph_.getVirtualRegister(); 297 298 // If we run out of virtual registers, mark code generation as having 299 // failed and return a dummy vreg. Include a + 1 here for NUNBOX32 300 // platforms that expect Value vregs to be adjacent. 301 if (vreg + 1 >= MAX_VIRTUAL_REGISTERS) { 302 abort(AbortReason::Alloc, "max virtual registers"); 303 return 1; 304 } 305 return vreg; 306 } 307 308 inline void annotate(LNode* ins); 309 inline void addUnchecked(LInstruction* ins, MInstruction* mir = nullptr); 310 311 // The template parameter ensures this can only be called for LIR instructions 312 // with no outputs. Call addUnchecked directly to ignore this check for code 313 // that sets the output manually with setDef or for LIR instructions with an 314 // optional output register. 315 template <size_t Temps> 316 void add(details::LInstructionFixedDefsTempsHelper<0, Temps>* ins, 317 MInstruction* mir = nullptr) { 318 addUnchecked(ins, mir); 319 } 320 321 void lowerTypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, 322 size_t lirIndex); 323 324 void definePhiOneRegister(MPhi* phi, size_t lirIndex); 325 #ifdef JS_NUNBOX32 326 void definePhiTwoRegisters(MPhi* phi, size_t lirIndex); 327 #endif 328 329 void defineTypedPhi(MPhi* phi, size_t lirIndex) { 330 // One register containing the payload. 331 definePhiOneRegister(phi, lirIndex); 332 } 333 void defineUntypedPhi(MPhi* phi, size_t lirIndex) { 334 #ifdef JS_NUNBOX32 335 // Two registers: one for the type, one for the payload. 336 definePhiTwoRegisters(phi, lirIndex); 337 #else 338 // One register containing the full Value. 339 definePhiOneRegister(phi, lirIndex); 340 #endif 341 } 342 343 LOsiPoint* popOsiPoint() { 344 LOsiPoint* tmp = osiPoint_; 345 osiPoint_ = nullptr; 346 return tmp; 347 } 348 349 LRecoverInfo* getRecoverInfo(MResumePoint* rp); 350 LSnapshot* buildSnapshot(MResumePoint* rp, BailoutKind kind); 351 bool assignPostSnapshot(MInstruction* mir, LInstruction* ins); 352 353 // Marks this instruction as fallible, meaning that before it performs 354 // effects (if any), it may check pre-conditions and bailout if they do not 355 // hold. This function informs the register allocator that it will need to 356 // capture appropriate state. 357 void assignSnapshot(LInstruction* ins, BailoutKind kind); 358 359 // Marks this instruction as needing to call into either the VM or GC. This 360 // function may build a snapshot that captures the result of its own 361 // instruction, and as such, should generally be called after define*(). 362 void assignSafepoint(LInstruction* ins, MInstruction* mir, 363 BailoutKind kind = BailoutKind::DuringVMCall); 364 365 // Marks this instruction as needing a wasm safepoint. 366 void assignWasmSafepoint(LInstruction* ins); 367 368 inline void lowerConstantDouble(double d, MInstruction* mir); 369 inline void lowerConstantFloat32(float f, MInstruction* mir); 370 371 bool canSpecializeWasmCompareAndSelect(MCompare::CompareType compTy, 372 MIRType insTy); 373 void lowerWasmCompareAndSelect(MWasmSelect* ins, MDefinition* lhs, 374 MDefinition* rhs, MCompare::CompareType compTy, 375 JSOp jsop); 376 377 public: 378 // Whether to generate typed reads for element accesses with hole checks. 379 static bool allowTypedElementHoleCheck() { return false; } 380 }; 381 382 } // namespace jit 383 } // namespace js 384 385 #endif /* jit_shared_Lowering_shared_h */