CodeGenerator-shared.h (18199B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef jit_shared_CodeGenerator_shared_h 8 #define jit_shared_CodeGenerator_shared_h 9 10 #include "mozilla/Alignment.h" 11 12 #include <utility> 13 14 #include "jit/InlineList.h" 15 #include "jit/InlineScriptTree.h" 16 #include "jit/JitcodeMap.h" 17 #include "jit/LIR.h" 18 #include "jit/MacroAssembler.h" 19 #include "jit/MIRGenerator.h" 20 #include "jit/MIRGraph.h" 21 #include "jit/SafepointIndex.h" 22 #include "jit/Safepoints.h" 23 #include "jit/Snapshots.h" 24 25 namespace js { 26 namespace jit { 27 28 class OutOfLineCode; 29 class CodeGenerator; 30 class MacroAssembler; 31 class IonIC; 32 33 class OutOfLineTruncateSlow; 34 35 class CodeGeneratorShared : public LElementVisitor { 36 AppendOnlyList<OutOfLineCode> outOfLineCode_; 37 38 MacroAssembler& ensureMasm(MacroAssembler* masm, TempAllocator& alloc, 39 CompileRealm* realm); 40 mozilla::Maybe<OffThreadMacroAssembler> maybeMasm_; 41 42 public: 43 MacroAssembler& masm; 44 45 protected: 46 MIRGenerator* gen; 47 LIRGraph& graph; 48 const wasm::CodeMetadata* wasmCodeMeta_; 49 LBlock* current; 50 SnapshotWriter snapshots_; 51 RecoverWriter recovers_; 52 #ifdef DEBUG 53 uint32_t pushedArgs_; 54 #endif 55 uint32_t lastOsiPointOffset_; 56 SafepointWriter safepoints_; 57 Label invalidate_; 58 CodeOffset invalidateEpilogueData_; 59 60 // Label for the common return path. 61 NonAssertingLabel returnLabel_; 62 63 // Amount of bytes allocated for incoming args. Used for Wasm return calls. 64 uint32_t inboundStackArgBytes_; 65 66 js::Vector<CodegenSafepointIndex, 0, JitAllocPolicy> safepointIndices_; 67 js::Vector<OsiIndex, 0, BackgroundSystemAllocPolicy> osiIndices_; 68 69 // Allocated data space needed at runtime. 70 js::Vector<uint8_t, 0, BackgroundSystemAllocPolicy> runtimeData_; 71 72 // Vector mapping each IC index to its offset in runtimeData_. 73 js::Vector<uint32_t, 0, BackgroundSystemAllocPolicy> icList_; 74 75 // IC data we need at compile-time. Discarded after creating the IonScript. 76 struct CompileTimeICInfo { 77 CodeOffset icOffsetForJump; 78 CodeOffset icOffsetForPush; 79 }; 80 js::Vector<CompileTimeICInfo, 0, BackgroundSystemAllocPolicy> icInfo_; 81 82 protected: 83 js::Vector<NativeToBytecode, 0, BackgroundSystemAllocPolicy> 84 nativeToBytecodeList_; 85 UniquePtr<uint8_t> nativeToBytecodeMap_; 86 uint32_t nativeToBytecodeMapSize_; 87 uint32_t nativeToBytecodeTableOffset_; 88 89 bool isProfilerInstrumentationEnabled() { 90 return gen->isProfilerInstrumentationEnabled(); 91 } 92 93 gc::Heap initialStringHeap() const { return gen->initialStringHeap(); } 94 gc::Heap initialBigIntHeap() const { return gen->initialBigIntHeap(); } 95 96 protected: 97 // The offset of the first instruction of the OSR entry block from the 98 // beginning of the code buffer. 99 mozilla::Maybe<size_t> osrEntryOffset_ = {}; 100 101 TempAllocator& alloc() const { return graph.mir().alloc(); } 102 103 void setOsrEntryOffset(size_t offset) { osrEntryOffset_.emplace(offset); } 104 105 size_t getOsrEntryOffset() const { 106 MOZ_RELEASE_ASSERT(osrEntryOffset_.isSome()); 107 return *osrEntryOffset_; 108 } 109 110 using SafepointIndices = 111 js::Vector<CodegenSafepointIndex, 8, SystemAllocPolicy>; 112 113 protected: 114 #ifdef CHECK_OSIPOINT_REGISTERS 115 // See JitOptions.checkOsiPointRegisters. We set this here to avoid 116 // races when enableOsiPointRegisterChecks is called while we're generating 117 // code off-thread. 118 bool checkOsiPointRegisters; 119 #endif 120 121 // The initial size of the frame in bytes. These are bytes beyond the 122 // constant header present for every Ion frame, used for pre-determined 123 // spills. 124 uint32_t frameDepth_; 125 126 // Offset in bytes to the incoming arguments, relative to the frame pointer. 127 uint32_t offsetOfArgsFromFP_ = 0; 128 129 // Offset in bytes of the stack region reserved for passed argument Values. 130 uint32_t offsetOfPassedArgSlots_ = 0; 131 132 // For argument construction for calls. Argslots are Value-sized. 133 inline Address AddressOfPassedArg(uint32_t slot) const; 134 inline uint32_t UnusedStackBytesForCall(uint32_t numArgSlots) const; 135 136 template <BaseRegForAddress Base = BaseRegForAddress::Default> 137 inline Address ToAddress(const LAllocation& a) const; 138 139 template <BaseRegForAddress Base = BaseRegForAddress::Default> 140 inline Address ToAddress(const LAllocation* a) const; 141 142 template <BaseRegForAddress Base = BaseRegForAddress::Default> 143 inline Address ToAddress(const LInt64Allocation& a) const; 144 145 static inline Address ToAddress(Register elements, const LAllocation* index, 146 Scalar::Type type); 147 148 uint32_t frameSize() const { return frameDepth_; } 149 150 protected: 151 bool addNativeToBytecodeEntry(const BytecodeSite* site); 152 void dumpNativeToBytecodeEntries(); 153 void dumpNativeToBytecodeEntry(uint32_t idx); 154 155 public: 156 MIRGenerator& mirGen() const { return *gen; } 157 const wasm::CodeMetadata* wasmCodeMeta() const { return wasmCodeMeta_; } 158 IonPerfSpewer& perfSpewer() const { return mirGen().perfSpewer(); } 159 160 // When appending to runtimeData_, the vector might realloc, leaving pointers 161 // int the origianl vector stale and unusable. DataPtr acts like a pointer, 162 // but allows safety in the face of potentially realloc'ing vector appends. 163 friend class DataPtr; 164 template <typename T> 165 class DataPtr { 166 CodeGeneratorShared* cg_; 167 size_t index_; 168 169 T* lookup() { return reinterpret_cast<T*>(&cg_->runtimeData_[index_]); } 170 171 public: 172 DataPtr(CodeGeneratorShared* cg, size_t index) : cg_(cg), index_(index) {} 173 174 T* operator->() { return lookup(); } 175 T* operator*() { return lookup(); } 176 }; 177 178 protected: 179 [[nodiscard]] bool allocateData(size_t size, size_t* offset) { 180 MOZ_ASSERT(size % sizeof(void*) == 0); 181 *offset = runtimeData_.length(); 182 masm.propagateOOM(runtimeData_.appendN(0, size)); 183 return !masm.oom(); 184 } 185 186 template <typename T> 187 inline size_t allocateIC(const T& cache) { 188 static_assert(std::is_base_of_v<IonIC, T>, "T must inherit from IonIC"); 189 size_t index; 190 masm.propagateOOM( 191 allocateData(sizeof(mozilla::AlignedStorage2<T>), &index)); 192 masm.propagateOOM(icList_.append(index)); 193 masm.propagateOOM(icInfo_.append(CompileTimeICInfo())); 194 if (masm.oom()) { 195 return SIZE_MAX; 196 } 197 // Use the copy constructor on the allocated space. 198 MOZ_ASSERT(index == icList_.back()); 199 new (&runtimeData_[index]) T(cache); 200 return index; 201 } 202 203 protected: 204 // Encodes an LSnapshot into the compressed snapshot buffer. 205 void encode(LRecoverInfo* recover); 206 void encode(LSnapshot* snapshot); 207 void encodeAllocation(LSnapshot* snapshot, MDefinition* def, 208 uint32_t* startIndex, bool hasSideEffects); 209 210 // Encode all encountered safepoints in CG-order, and resolve |indices| for 211 // safepoint offsets. 212 bool encodeSafepoints(); 213 214 // Fixup offsets of native-to-bytecode map. 215 bool createNativeToBytecodeScriptList(JSContext* cx, 216 IonEntry::ScriptList& scripts); 217 bool generateCompactNativeToBytecodeMap(JSContext* cx, JitCode* code, 218 IonEntry::ScriptList& scripts); 219 void verifyCompactNativeToBytecodeMap(JitCode* code, 220 const IonEntry::ScriptList& scripts, 221 uint32_t numRegions); 222 223 // Mark the safepoint on |ins| as corresponding to the current assembler 224 // location. The location should be just after a call. 225 void markSafepoint(LInstruction* ins); 226 void markSafepointAt(uint32_t offset, LInstruction* ins); 227 228 // Mark the OSI point |ins| as corresponding to the current 229 // assembler location inside the |osiIndices_|. Return the assembler 230 // location for the OSI point return location. 231 uint32_t markOsiPoint(LOsiPoint* ins); 232 233 // Ensure that there is enough room between the last OSI point and the 234 // current instruction, such that: 235 // (1) Invalidation will not overwrite the current instruction, and 236 // (2) Overwriting the current instruction will not overwrite 237 // an invalidation marker. 238 void ensureOsiSpace(); 239 240 OutOfLineCode* oolTruncateDouble( 241 FloatRegister src, Register dest, MInstruction* mir, 242 wasm::BytecodeOffset callOffset = wasm::BytecodeOffset()); 243 void emitTruncateDouble(FloatRegister src, Register dest, MInstruction* mir); 244 void emitTruncateFloat32(FloatRegister src, Register dest, MInstruction* mir); 245 246 void emitPreBarrier(Address address); 247 void emitPreBarrier(BaseObjectElementIndex address); 248 249 // We don't emit code for trivial blocks, so if we want to branch to the 250 // given block, and it's trivial, return the ultimate block we should 251 // actually branch directly to. 252 MBasicBlock* skipTrivialBlocks(MBasicBlock* block) { 253 while (block->lir()->isTrivial()) { 254 LGoto* ins = block->lir()->rbegin()->toGoto(); 255 MOZ_ASSERT(ins->numSuccessors() == 1); 256 block = ins->getSuccessor(0); 257 } 258 return block; 259 } 260 261 // Test whether the given block can be reached via fallthrough from the 262 // current block. 263 inline bool isNextBlock(LBlock* block) { 264 uint32_t targetId = skipTrivialBlocks(block->mir())->id(); 265 266 // If the target is before next, then it's not next. 267 if (targetId < current->mir()->id() + 1) { 268 return false; 269 } 270 271 if (current->isOutOfLine() != graph.getBlock(targetId)->isOutOfLine()) { 272 return false; 273 } 274 275 // Scan through blocks until the target to see if we can fallthrough them. 276 for (uint32_t nextId = current->mir()->id() + 1; nextId != targetId; 277 ++nextId) { 278 LBlock* nextBlock = graph.getBlock(nextId); 279 280 // If the next block is generated in a different section than this 281 // one, then we don't need to consider it for fallthrough. 282 if (nextBlock->isOutOfLine() != graph.getBlock(targetId)->isOutOfLine()) { 283 continue; 284 } 285 286 // If the next block is trivial, no code will be generated and we don't 287 // need to consider it for fallthrough. 288 if (nextBlock->isTrivial()) { 289 continue; 290 } 291 292 // Otherwise this is a real block that will prevent fallthrough. 293 return false; 294 } 295 296 return true; 297 } 298 299 protected: 300 // Save and restore all volatile registers to/from the stack, excluding the 301 // specified register(s), before a function call made using callWithABI and 302 // after storing the function call's return value to an output register. 303 // (The only registers that don't need to be saved/restored are 1) the 304 // temporary register used to store the return value of the function call, 305 // if there is one [otherwise that stored value would be overwritten]; and 306 // 2) temporary registers whose values aren't needed in the rest of the LIR 307 // instruction [this is purely an optimization]. All other volatiles must 308 // be saved and restored in case future LIR instructions need those values.) 309 void saveVolatile(Register output) { 310 LiveRegisterSet regs(RegisterSet::Volatile()); 311 regs.takeUnchecked(output); 312 masm.PushRegsInMask(regs); 313 } 314 void restoreVolatile(Register output) { 315 LiveRegisterSet regs(RegisterSet::Volatile()); 316 regs.takeUnchecked(output); 317 masm.PopRegsInMask(regs); 318 } 319 void saveVolatile(FloatRegister output) { 320 LiveRegisterSet regs(RegisterSet::Volatile()); 321 regs.takeUnchecked(output); 322 masm.PushRegsInMask(regs); 323 } 324 void restoreVolatile(FloatRegister output) { 325 LiveRegisterSet regs(RegisterSet::Volatile()); 326 regs.takeUnchecked(output); 327 masm.PopRegsInMask(regs); 328 } 329 void saveVolatile(LiveRegisterSet temps) { 330 masm.PushRegsInMask(LiveRegisterSet(RegisterSet::VolatileNot(temps.set()))); 331 } 332 void restoreVolatile(LiveRegisterSet temps) { 333 masm.PopRegsInMask(LiveRegisterSet(RegisterSet::VolatileNot(temps.set()))); 334 } 335 void saveVolatile() { 336 masm.PushRegsInMask(LiveRegisterSet(RegisterSet::Volatile())); 337 } 338 void restoreVolatile() { 339 masm.PopRegsInMask(LiveRegisterSet(RegisterSet::Volatile())); 340 } 341 342 // These functions have to be called before and after any callVM and before 343 // any modifications of the stack. Modification of the stack made after 344 // these calls should update the framePushed variable, needed by the exit 345 // frame produced by callVM. 346 inline void saveLive(LInstruction* ins); 347 inline void restoreLive(LInstruction* ins); 348 inline void restoreLiveIgnore(LInstruction* ins, LiveRegisterSet reg); 349 350 // Get/save/restore all registers that are both live and volatile. 351 inline LiveRegisterSet liveVolatileRegs(LInstruction* ins); 352 inline void saveLiveVolatile(LInstruction* ins); 353 inline void restoreLiveVolatile(LInstruction* ins); 354 355 public: 356 template <typename T> 357 void pushArg(const T& t) { 358 masm.Push(t); 359 #ifdef DEBUG 360 pushedArgs_++; 361 #endif 362 } 363 364 void pushArg(jsid id, Register temp) { 365 masm.Push(id, temp); 366 #ifdef DEBUG 367 pushedArgs_++; 368 #endif 369 } 370 371 template <typename T> 372 CodeOffset pushArgWithPatch(const T& t) { 373 #ifdef DEBUG 374 pushedArgs_++; 375 #endif 376 return masm.PushWithPatch(t); 377 } 378 379 void storePointerResultTo(Register reg) { masm.storeCallPointerResult(reg); } 380 381 void storeFloatResultTo(FloatRegister reg) { masm.storeCallFloatResult(reg); } 382 383 template <typename T> 384 void storeResultValueTo(const T& t) { 385 masm.storeCallResultValue(t); 386 } 387 388 protected: 389 void addIC(LInstruction* lir, size_t cacheIndex); 390 391 protected: 392 bool generatePrologue(); 393 bool generateEpilogue(); 394 395 void addOutOfLineCode(OutOfLineCode* code, const MInstruction* mir); 396 void addOutOfLineCode(OutOfLineCode* code, const BytecodeSite* site); 397 bool generateOutOfLineCode(); 398 399 Label* getJumpLabelForBranch(MBasicBlock* block); 400 401 // Generate a jump to the start of the specified block. Use this in place of 402 // jumping directly to mir->lir()->label(), or use getJumpLabelForBranch() 403 // if a label to use directly is needed. 404 void jumpToBlock(MBasicBlock* mir); 405 406 // This function is not used for MIPS. MIPS has branchToBlock. 407 #if !defined(JS_CODEGEN_MIPS64) 408 void jumpToBlock(MBasicBlock* mir, Assembler::Condition cond); 409 #endif 410 411 private: 412 void generateInvalidateEpilogue(); 413 414 public: 415 CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm, 416 const wasm::CodeMetadata* wasmCodeMeta); 417 418 public: 419 void visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool); 420 421 bool omitOverRecursedStackCheck() const; 422 bool omitOverRecursedInterruptCheck() const; 423 424 public: 425 bool isGlobalObject(JSObject* object); 426 }; 427 428 // An out-of-line path is generated at the end of the function. 429 class OutOfLineCode : public TempObject, 430 public AppendOnlyListNode<OutOfLineCode> { 431 Label entry_; 432 Label rejoin_; 433 uint32_t framePushed_; 434 const BytecodeSite* site_; 435 436 public: 437 OutOfLineCode() : framePushed_(0), site_() {} 438 439 virtual void generate(CodeGeneratorShared* codegen) = 0; 440 441 Label* entry() { return &entry_; } 442 virtual void bind(MacroAssembler* masm) { masm->bind(entry()); } 443 Label* rejoin() { return &rejoin_; } 444 void setFramePushed(uint32_t framePushed) { framePushed_ = framePushed; } 445 uint32_t framePushed() const { return framePushed_; } 446 void setBytecodeSite(const BytecodeSite* site) { site_ = site; } 447 const BytecodeSite* bytecodeSite() const { return site_; } 448 }; 449 450 // An implementation of OutOfLineCode for quick and simple cases. The lambda 451 // should have the signature (OutOfLineCode& ool) -> void. 452 template <typename Func> 453 class LambdaOutOfLineCode : public OutOfLineCode { 454 Func generateFunc_; 455 456 public: 457 explicit LambdaOutOfLineCode(Func generateFunc) 458 : generateFunc_(std::move(generateFunc)) {} 459 460 void generate(CodeGeneratorShared*) override { generateFunc_(*this); } 461 }; 462 463 // For OOL paths that want a specific-typed code generator. 464 template <typename T> 465 class OutOfLineCodeBase : public OutOfLineCode { 466 public: 467 virtual void generate(CodeGeneratorShared* codegen) override { 468 accept(static_cast<T*>(codegen)); 469 } 470 471 public: 472 virtual void accept(T* codegen) = 0; 473 }; 474 475 template <class CodeGen> 476 class OutOfLineWasmTruncateCheckBase : public OutOfLineCodeBase<CodeGen> { 477 MIRType fromType_; 478 MIRType toType_; 479 FloatRegister input_; 480 Register output_; 481 Register64 output64_; 482 TruncFlags flags_; 483 wasm::TrapSiteDesc trapSiteDesc_; 484 485 public: 486 OutOfLineWasmTruncateCheckBase(MWasmTruncateToInt32* mir, FloatRegister input, 487 Register output) 488 : fromType_(mir->input()->type()), 489 toType_(MIRType::Int32), 490 input_(input), 491 output_(output), 492 output64_(Register64::Invalid()), 493 flags_(mir->flags()), 494 trapSiteDesc_(mir->trapSiteDesc()) {} 495 496 OutOfLineWasmTruncateCheckBase(MWasmBuiltinTruncateToInt64* mir, 497 FloatRegister input, Register64 output) 498 : fromType_(mir->input()->type()), 499 toType_(MIRType::Int64), 500 input_(input), 501 output_(Register::Invalid()), 502 output64_(output), 503 flags_(mir->flags()), 504 trapSiteDesc_(mir->trapSiteDesc()) {} 505 506 OutOfLineWasmTruncateCheckBase(MWasmTruncateToInt64* mir, FloatRegister input, 507 Register64 output) 508 : fromType_(mir->input()->type()), 509 toType_(MIRType::Int64), 510 input_(input), 511 output_(Register::Invalid()), 512 output64_(output), 513 flags_(mir->flags()), 514 trapSiteDesc_(mir->trapSiteDesc()) {} 515 516 void accept(CodeGen* codegen) override { 517 codegen->visitOutOfLineWasmTruncateCheck(this); 518 } 519 520 FloatRegister input() const { return input_; } 521 Register output() const { return output_; } 522 Register64 output64() const { return output64_; } 523 MIRType toType() const { return toType_; } 524 MIRType fromType() const { return fromType_; } 525 bool isUnsigned() const { return flags_ & TRUNC_UNSIGNED; } 526 bool isSaturating() const { return flags_ & TRUNC_SATURATING; } 527 TruncFlags flags() const { return flags_; } 528 wasm::TrapSiteDesc trapSiteDesc() const { return trapSiteDesc_; } 529 }; 530 531 } // namespace jit 532 } // namespace js 533 534 #endif /* jit_shared_CodeGenerator_shared_h */