MIRGraph.h (32424B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef jit_MIRGraph_h 8 #define jit_MIRGraph_h 9 10 // This file declares the data structures used to build a control-flow graph 11 // containing MIR. 12 13 #include "jit/CompileInfo.h" 14 #include "jit/FixedList.h" 15 #include "jit/InlineScriptTree.h" 16 #include "jit/JitAllocPolicy.h" 17 #include "jit/MIR-wasm.h" 18 #include "jit/MIR.h" 19 20 namespace js { 21 namespace jit { 22 23 class MBasicBlock; 24 class MIRGraph; 25 class MStart; 26 27 class MDefinitionIterator; 28 29 using MInstructionIterator = InlineListIterator<MInstruction>; 30 using MInstructionReverseIterator = InlineListReverseIterator<MInstruction>; 31 using MPhiIterator = InlineListIterator<MPhi>; 32 33 #ifdef DEBUG 34 using MResumePointIterator = InlineForwardListIterator<MResumePoint>; 35 #endif 36 37 class LBlock; 38 39 // Represents the likelihood of a basic block to be executed at runtime. 40 // Unknown: default value. 41 // Likely: Likely to be executed at runtime, hot block. 42 // Unlikely: unlikely to be executed, cold block. 43 enum class Frequency : uint8_t { Unknown = 0, Likely = 1, Unlikely = 2 }; 44 45 class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock> { 46 public: 47 enum Kind { 48 NORMAL, 49 PENDING_LOOP_HEADER, 50 LOOP_HEADER, 51 SPLIT_EDGE, 52 FAKE_LOOP_PRED, 53 INTERNAL, 54 DEAD 55 }; 56 57 private: 58 MBasicBlock(MIRGraph& graph, const CompileInfo& info, BytecodeSite* site, 59 Kind kind); 60 [[nodiscard]] bool init(); 61 void copySlots(MBasicBlock* from); 62 [[nodiscard]] bool inherit(TempAllocator& alloc, size_t stackDepth, 63 MBasicBlock* maybePred, uint32_t popped); 64 65 // This block cannot be reached by any means. 66 bool unreachable_ = false; 67 68 // This block will unconditionally bail out. 69 bool alwaysBails_ = false; 70 71 // Represents the execution frequency of this block, considered unknown by 72 // default. Various passes can use this information for optimizations. 73 Frequency frequency_ = Frequency::Unknown; 74 75 // Pushes a copy of a local variable or argument. 76 void pushVariable(uint32_t slot) { push(slots_[slot]); } 77 78 // Sets a variable slot to the top of the stack, correctly creating copies 79 // as needed. 80 void setVariable(uint32_t slot) { 81 MOZ_ASSERT(stackPosition_ > info_.firstStackSlot()); 82 setSlot(slot, slots_[stackPosition_ - 1]); 83 } 84 85 enum ReferencesType { 86 RefType_None = 0, 87 88 // Assert that the instruction is unused. 89 RefType_AssertNoUses = 1 << 0, 90 91 // Discard the operands of the resume point / instructions if the 92 // following flag are given too. 93 RefType_DiscardOperands = 1 << 1, 94 RefType_DiscardResumePoint = 1 << 2, 95 RefType_DiscardInstruction = 1 << 3, 96 97 // Discard operands of the instruction and its resume point. 98 RefType_DefaultNoAssert = RefType_DiscardOperands | 99 RefType_DiscardResumePoint | 100 RefType_DiscardInstruction, 101 102 // Discard everything and assert that the instruction is not used. 103 RefType_Default = RefType_AssertNoUses | RefType_DefaultNoAssert, 104 105 // Discard resume point operands only, without discarding the operands 106 // of the current instruction. Asserts that the instruction is unused. 107 RefType_IgnoreOperands = RefType_AssertNoUses | RefType_DiscardOperands | 108 RefType_DiscardResumePoint 109 }; 110 111 void discardResumePoint(MResumePoint* rp, 112 ReferencesType refType = RefType_Default); 113 void removeResumePoint(MResumePoint* rp); 114 115 // Remove all references to an instruction such that it can be removed from 116 // the list of instruction, without keeping any dangling pointer to it. This 117 // includes the operands of the instruction, and the resume point if 118 // present. 119 void prepareForDiscard(MInstruction* ins, 120 ReferencesType refType = RefType_Default); 121 122 public: 123 /////////////////////////////////////////////////////// 124 ////////// BEGIN GRAPH BUILDING INSTRUCTIONS ////////// 125 /////////////////////////////////////////////////////// 126 127 // Creates a new basic block for a MIR generator. If |pred| is not nullptr, 128 // its slots and stack depth are initialized from |pred|. 129 static MBasicBlock* New(MIRGraph& graph, size_t stackDepth, 130 const CompileInfo& info, MBasicBlock* maybePred, 131 BytecodeSite* site, Kind kind); 132 static MBasicBlock* New(MIRGraph& graph, const CompileInfo& info, 133 MBasicBlock* pred, Kind kind); 134 static MBasicBlock* NewPopN(MIRGraph& graph, const CompileInfo& info, 135 MBasicBlock* pred, BytecodeSite* site, Kind kind, 136 uint32_t popn); 137 static MBasicBlock* NewPendingLoopHeader(MIRGraph& graph, 138 const CompileInfo& info, 139 MBasicBlock* pred, 140 BytecodeSite* site); 141 static MBasicBlock* NewSplitEdge(MIRGraph& graph, MBasicBlock* pred, 142 size_t predEdgeIdx, MBasicBlock* succ); 143 static MBasicBlock* NewFakeLoopPredecessor(MIRGraph& graph, 144 MBasicBlock* header); 145 146 // Create a new basic block for internal control flow not present in the 147 // original CFG. 148 static MBasicBlock* NewInternal(MIRGraph& graph, MBasicBlock* orig, 149 MResumePoint* activeResumePoint); 150 151 bool dominates(const MBasicBlock* other) const { 152 return other->domIndex() - domIndex() < numDominated(); 153 } 154 155 void setId(uint32_t id) { id_ = id; } 156 157 // Mark this block (and only this block) as unreachable. 158 void setUnreachable() { 159 MOZ_ASSERT(!unreachable_); 160 setUnreachableUnchecked(); 161 } 162 void setUnreachableUnchecked() { unreachable_ = true; } 163 bool unreachable() const { return unreachable_; } 164 165 void setAlwaysBails() { alwaysBails_ = true; } 166 bool alwaysBails() const { return alwaysBails_; } 167 168 // Move the definition to the top of the stack. 169 void pick(int32_t depth); 170 171 // Move the top of the stack definition under the depth-th stack value. 172 void unpick(int32_t depth); 173 174 // Exchange 2 stack slots at the defined depth 175 void swapAt(int32_t depth); 176 177 // Note: most of the methods below are hot. Do not un-inline them without 178 // measuring the impact. 179 180 // Gets the instruction associated with various slot types. 181 MDefinition* peek(int32_t depth) { 182 MOZ_ASSERT(depth < 0); 183 MOZ_ASSERT(stackPosition_ + depth >= info_.firstStackSlot()); 184 return peekUnchecked(depth); 185 } 186 187 MDefinition* peekUnchecked(int32_t depth) { 188 MOZ_ASSERT(depth < 0); 189 return getSlot(stackPosition_ + depth); 190 } 191 192 MDefinition* environmentChain(); 193 MDefinition* argumentsObject(); 194 195 // Increase the number of slots available 196 [[nodiscard]] bool increaseSlots(size_t num); 197 [[nodiscard]] bool ensureHasSlots(size_t num); 198 199 // Initializes a slot value; must not be called for normal stack 200 // operations, as it will not create new SSA names for copies. 201 void initSlot(uint32_t slot, MDefinition* ins) { 202 slots_[slot] = ins; 203 if (entryResumePoint()) { 204 entryResumePoint()->initOperand(slot, ins); 205 } 206 } 207 208 // Sets the instruction associated with various slot types. The 209 // instruction must lie at the top of the stack. 210 void setLocal(uint32_t local) { setVariable(info_.localSlot(local)); } 211 void setArg(uint32_t arg) { setVariable(info_.argSlot(arg)); } 212 void setSlot(uint32_t slot, MDefinition* ins) { slots_[slot] = ins; } 213 214 // Tracks an instruction as being pushed onto the operand stack. 215 void push(MDefinition* ins) { 216 MOZ_ASSERT(stackPosition_ < nslots()); 217 slots_[stackPosition_++] = ins; 218 } 219 void pushArg(uint32_t arg) { pushVariable(info_.argSlot(arg)); } 220 void pushArgUnchecked(uint32_t arg) { 221 pushVariable(info_.argSlotUnchecked(arg)); 222 } 223 void pushLocal(uint32_t local) { pushVariable(info_.localSlot(local)); } 224 void pushSlot(uint32_t slot) { pushVariable(slot); } 225 void setEnvironmentChain(MDefinition* ins); 226 void setArgumentsObject(MDefinition* ins); 227 228 // Returns the top of the stack, then decrements the virtual stack pointer. 229 MDefinition* pop() { 230 MOZ_ASSERT(stackPosition_ > info_.firstStackSlot()); 231 return slots_[--stackPosition_]; 232 } 233 void popn(uint32_t n) { 234 MOZ_ASSERT(stackPosition_ - n >= info_.firstStackSlot()); 235 MOZ_ASSERT(stackPosition_ >= stackPosition_ - n); 236 stackPosition_ -= n; 237 } 238 239 // Adds an instruction to this block's instruction list. 240 inline void add(MInstruction* ins); 241 242 // Marks the last instruction of the block; no further instructions 243 // can be added. 244 void end(MControlInstruction* ins) { 245 MOZ_ASSERT(!hasLastIns()); // Existing control instructions should be 246 // removed first. 247 MOZ_ASSERT(ins); 248 add(ins); 249 } 250 251 // Adds a phi instruction, but does not set successorWithPhis. 252 void addPhi(MPhi* phi); 253 254 // Adds a resume point to this block. 255 void addResumePoint(MResumePoint* resume) { 256 #ifdef DEBUG 257 resumePoints_.pushFront(resume); 258 #endif 259 } 260 261 // Discard pre-allocated resume point. 262 void discardPreAllocatedResumePoint(MResumePoint* resume) { 263 MOZ_ASSERT(!resume->instruction()); 264 discardResumePoint(resume); 265 } 266 267 // Adds a predecessor. Every predecessor must have the same exit stack 268 // depth as the entry state to this block. Adding a predecessor 269 // automatically creates phi nodes and rewrites uses as needed. 270 [[nodiscard]] bool addPredecessor(TempAllocator& alloc, MBasicBlock* pred); 271 [[nodiscard]] bool addPredecessorPopN(TempAllocator& alloc, MBasicBlock* pred, 272 uint32_t popped); 273 274 // Add a predecessor which won't introduce any new phis to this block. 275 // This may be called after the contents of this block have been built. 276 [[nodiscard]] bool addPredecessorSameInputsAs(MBasicBlock* pred, 277 MBasicBlock* existingPred); 278 279 // Stranger utilities used for inlining. 280 [[nodiscard]] bool addPredecessorWithoutPhis(MBasicBlock* pred); 281 void inheritSlots(MBasicBlock* parent); 282 [[nodiscard]] bool initEntrySlots(TempAllocator& alloc); 283 284 // Replaces an edge for a given block with a new block. This is 285 // used for critical edge splitting. 286 // 287 // Note: If successorWithPhis is set, you must not be replacing it. 288 void replacePredecessor(MBasicBlock* old, MBasicBlock* split); 289 void replaceSuccessor(size_t pos, MBasicBlock* split); 290 291 // Removes `pred` from the predecessor list. If this block defines phis, 292 // removes the entry for `pred` and updates the indices of later entries. 293 // This may introduce redundant phis if the new block has fewer 294 // than two predecessors. 295 void removePredecessor(MBasicBlock* pred); 296 297 // A version of removePredecessor which expects that phi operands to 298 // |pred| have already been removed. 299 void removePredecessorWithoutPhiOperands(MBasicBlock* pred, size_t predIndex); 300 301 // Resets all the dominator info so that it can be recomputed. 302 void clearDominatorInfo(); 303 304 // Sets a back edge. This places phi nodes and rewrites instructions within 305 // the current loop as necessary. 306 [[nodiscard]] bool setBackedge(MBasicBlock* block); 307 [[nodiscard]] bool setBackedgeWasm(MBasicBlock* block, size_t paramCount); 308 309 // Resets a LOOP_HEADER block to a NORMAL block. This is needed when 310 // optimizations remove the backedge. 311 void clearLoopHeader(); 312 313 // Sets a block to a LOOP_HEADER block, with newBackedge as its backedge. 314 // This is needed when optimizations remove the normal entry to a loop 315 // with multiple entries. 316 void setLoopHeader(MBasicBlock* newBackedge); 317 318 // Marks this as a LOOP_HEADER block, but doesn't change anything else. 319 void setLoopHeader() { 320 MOZ_ASSERT(!isLoopHeader()); 321 kind_ = LOOP_HEADER; 322 } 323 324 // Propagates backedge slots into phis operands of the loop header. 325 [[nodiscard]] bool inheritPhisFromBackedge(MBasicBlock* backedge); 326 327 void insertBefore(MInstruction* at, MInstruction* ins); 328 void insertAfter(MInstruction* at, MInstruction* ins); 329 330 void insertAtEnd(MInstruction* ins); 331 332 // Move an instruction. Movement may cross block boundaries. 333 void moveBefore(MInstruction* at, MInstruction* ins); 334 335 enum IgnoreTop { IgnoreNone = 0, IgnoreRecover = 1 << 0 }; 336 337 // Locate the top of the |block|, where it is safe to insert a new 338 // instruction. 339 MInstruction* safeInsertTop(MDefinition* ins = nullptr, 340 IgnoreTop ignore = IgnoreNone); 341 342 // Removes an instruction with the intention to discard it. 343 void discard(MInstruction* ins); 344 void discardLastIns(); 345 void discardAllInstructions(); 346 void discardAllInstructionsStartingAt(MInstructionIterator iter); 347 void discardAllPhis(); 348 void discardAllResumePoints(bool discardEntry = true); 349 void clear(); 350 351 // Splits this block in two at a given instruction, inserting a new control 352 // flow diamond with |ins| in the slow path, |fastpath| in the other, and 353 // |condition| determining which path to take. 354 bool wrapInstructionInFastpath(MInstruction* ins, MInstruction* fastpath, 355 MInstruction* condition); 356 357 void moveOuterResumePointTo(MBasicBlock* dest); 358 359 // Move an instruction from this block to a block that has not yet been 360 // terminated. 361 void moveToNewBlock(MInstruction* ins, MBasicBlock* dst); 362 363 // Same as |void discard(MInstruction* ins)| but assuming that 364 // all operands are already discarded. 365 void discardIgnoreOperands(MInstruction* ins); 366 367 // Discards a phi instruction and updates predecessor successorWithPhis. 368 void discardPhi(MPhi* phi); 369 370 // Some instruction which are guarding against some MIRType value, or 371 // against a type expectation should be considered as removing a potenatial 372 // branch where the guard does not hold. We need to register such 373 // instructions in order to do destructive optimizations correctly, such as 374 // Range Analysis. 375 void flagOperandsOfPrunedBranches(MInstruction* ins); 376 377 // Mark this block as having been removed from the graph. 378 void markAsDead() { 379 MOZ_ASSERT(kind_ != DEAD); 380 kind_ = DEAD; 381 } 382 383 /////////////////////////////////////////////////////// 384 /////////// END GRAPH BUILDING INSTRUCTIONS /////////// 385 /////////////////////////////////////////////////////// 386 387 MIRGraph& graph() { return graph_; } 388 const CompileInfo& info() const { return info_; } 389 jsbytecode* pc() const { return trackedSite_->pc(); } 390 jsbytecode* entryPC() const { return entryResumePoint()->pc(); } 391 uint32_t nslots() const { return slots_.length(); } 392 uint32_t id() const { return id_; } 393 uint32_t numPredecessors() const { return predecessors_.length(); } 394 395 bool isUnknownFrequency() const { return frequency_ == Frequency::Unknown; } 396 397 bool isLikelyFrequency() const { return frequency_ == Frequency::Likely; } 398 399 bool isUnlikelyFrequency() const { return frequency_ == Frequency::Unlikely; } 400 401 Frequency getFrequency() const { return frequency_; } 402 void setFrequency(Frequency value) { frequency_ = value; } 403 404 uint32_t domIndex() const { 405 MOZ_ASSERT(!isDead()); 406 return domIndex_; 407 } 408 void setDomIndex(uint32_t d) { domIndex_ = d; } 409 410 MBasicBlock* getPredecessor(uint32_t i) const { return predecessors_[i]; } 411 void setPredecessor(uint32_t i, MBasicBlock* p) { predecessors_[i] = p; } 412 [[nodiscard]] 413 bool appendPredecessor(MBasicBlock* p) { 414 return predecessors_.append(p); 415 } 416 void erasePredecessor(uint32_t i) { predecessors_.erase(&predecessors_[i]); } 417 418 size_t indexForPredecessor(MBasicBlock* block) const { 419 // This should only be called before critical edge splitting. 420 MOZ_ASSERT(!block->successorWithPhis()); 421 422 for (size_t i = 0; i < predecessors_.length(); i++) { 423 if (predecessors_[i] == block) { 424 return i; 425 } 426 } 427 MOZ_CRASH(); 428 } 429 bool hasAnyIns() const { return !instructions_.empty(); } 430 bool hasLastIns() const { 431 return hasAnyIns() && instructions_.rbegin()->isControlInstruction(); 432 } 433 MControlInstruction* lastIns() const { 434 MOZ_ASSERT(hasLastIns()); 435 return instructions_.rbegin()->toControlInstruction(); 436 } 437 // Find or allocate an optimized out constant. 438 MConstant* optimizedOutConstant(TempAllocator& alloc); 439 MPhiIterator phisBegin() const { return phis_.begin(); } 440 MPhiIterator phisBegin(MPhi* at) const { return phis_.begin(at); } 441 MPhiIterator phisEnd() const { return phis_.end(); } 442 bool phisEmpty() const { return phis_.empty(); } 443 #ifdef DEBUG 444 MResumePointIterator resumePointsBegin() const { 445 return resumePoints_.begin(); 446 } 447 MResumePointIterator resumePointsEnd() const { return resumePoints_.end(); } 448 bool resumePointsEmpty() const { return resumePoints_.empty(); } 449 #endif 450 MInstructionIterator begin() { return instructions_.begin(); } 451 MInstructionIterator begin(MInstruction* at) { 452 MOZ_ASSERT(at->block() == this); 453 return instructions_.begin(at); 454 } 455 MInstructionIterator end() { return instructions_.end(); } 456 MInstructionReverseIterator rbegin() { return instructions_.rbegin(); } 457 MInstructionReverseIterator rbegin(MInstruction* at) { 458 MOZ_ASSERT(at->block() == this); 459 return instructions_.rbegin(at); 460 } 461 MInstructionReverseIterator rend() { return instructions_.rend(); } 462 463 bool isLoopHeader() const { return kind_ == LOOP_HEADER; } 464 bool isPendingLoopHeader() const { return kind_ == PENDING_LOOP_HEADER; } 465 466 bool hasUniqueBackedge() const { 467 MOZ_ASSERT(isLoopHeader()); 468 MOZ_ASSERT(numPredecessors() >= 1); 469 if (numPredecessors() == 1 || numPredecessors() == 2) { 470 return true; 471 } 472 if (numPredecessors() == 3) { 473 // fixup block added by NewFakeLoopPredecessor 474 return getPredecessor(1)->numPredecessors() == 0; 475 } 476 return false; 477 } 478 MBasicBlock* backedge() const { 479 MOZ_ASSERT(hasUniqueBackedge()); 480 return getPredecessor(numPredecessors() - 1); 481 } 482 MBasicBlock* loopHeaderOfBackedge() const { 483 MOZ_ASSERT(isLoopBackedge()); 484 return getSuccessor(numSuccessors() - 1); 485 } 486 MBasicBlock* loopPredecessor() const { 487 MOZ_ASSERT(isLoopHeader()); 488 return getPredecessor(0); 489 } 490 bool isLoopBackedge() const { 491 if (!numSuccessors()) { 492 return false; 493 } 494 MBasicBlock* lastSuccessor = getSuccessor(numSuccessors() - 1); 495 return lastSuccessor->isLoopHeader() && 496 lastSuccessor->hasUniqueBackedge() && 497 lastSuccessor->backedge() == this; 498 } 499 bool isSplitEdge() const { return kind_ == SPLIT_EDGE; } 500 bool isDead() const { return kind_ == DEAD; } 501 bool isFakeLoopPred() const { return kind_ == FAKE_LOOP_PRED; } 502 503 uint32_t stackDepth() const { return stackPosition_; } 504 bool isMarked() const { return mark_; } 505 void mark() { 506 MOZ_ASSERT(!mark_, "Marking already-marked block"); 507 markUnchecked(); 508 } 509 void markUnchecked() { mark_ = true; } 510 void unmark() { 511 MOZ_ASSERT(mark_, "Unarking unmarked block"); 512 unmarkUnchecked(); 513 } 514 void unmarkUnchecked() { mark_ = false; } 515 516 MBasicBlock* immediateDominator() const { return immediateDominator_; } 517 518 void setImmediateDominator(MBasicBlock* dom) { immediateDominator_ = dom; } 519 520 MTest* immediateDominatorBranch(BranchDirection* pdirection); 521 522 size_t numImmediatelyDominatedBlocks() const { 523 return immediatelyDominated_.length(); 524 } 525 526 MBasicBlock* getImmediatelyDominatedBlock(size_t i) const { 527 return immediatelyDominated_[i]; 528 } 529 530 MBasicBlock** immediatelyDominatedBlocksBegin() { 531 return immediatelyDominated_.begin(); 532 } 533 534 MBasicBlock** immediatelyDominatedBlocksEnd() { 535 return immediatelyDominated_.end(); 536 } 537 538 // Return the number of blocks dominated by this block. All blocks 539 // dominate at least themselves, so this will always be non-zero. 540 size_t numDominated() const { 541 MOZ_ASSERT(numDominated_ != 0); 542 return numDominated_; 543 } 544 545 void addNumDominated(size_t n) { numDominated_ += n; } 546 547 // Add |child| to this block's immediately-dominated set. 548 bool addImmediatelyDominatedBlock(MBasicBlock* child); 549 550 // Remove |child| from this block's immediately-dominated set. 551 void removeImmediatelyDominatedBlock(MBasicBlock* child); 552 553 // This function retrieves the internal instruction associated with a 554 // slot, and should not be used for normal stack operations. It is an 555 // internal helper that is also used to enhance spew. 556 MDefinition* getSlot(uint32_t index) { 557 MOZ_ASSERT(index < stackPosition_); 558 return slots_[index]; 559 } 560 561 MResumePoint* entryResumePoint() const { return entryResumePoint_; } 562 void setEntryResumePoint(MResumePoint* rp) { entryResumePoint_ = rp; } 563 void clearEntryResumePoint() { 564 discardResumePoint(entryResumePoint_); 565 entryResumePoint_ = nullptr; 566 } 567 MResumePoint* outerResumePoint() const { return outerResumePoint_; } 568 void setOuterResumePoint(MResumePoint* outer) { 569 MOZ_ASSERT(!outerResumePoint_); 570 outerResumePoint_ = outer; 571 } 572 void clearOuterResumePoint() { 573 discardResumePoint(outerResumePoint_); 574 outerResumePoint_ = nullptr; 575 } 576 MResumePoint* callerResumePoint() const { return callerResumePoint_; } 577 void setCallerResumePoint(MResumePoint* caller) { 578 callerResumePoint_ = caller; 579 } 580 581 LBlock* lir() const { return lir_; } 582 void assignLir(LBlock* lir) { 583 MOZ_ASSERT(!lir_); 584 lir_ = lir; 585 } 586 587 MBasicBlock* successorWithPhis() const { return successorWithPhis_; } 588 uint32_t positionInPhiSuccessor() const { 589 MOZ_ASSERT(successorWithPhis()); 590 return positionInPhiSuccessor_; 591 } 592 void setSuccessorWithPhis(MBasicBlock* successor, uint32_t id) { 593 successorWithPhis_ = successor; 594 positionInPhiSuccessor_ = id; 595 } 596 void clearSuccessorWithPhis() { successorWithPhis_ = nullptr; } 597 size_t numSuccessors() const { 598 MOZ_ASSERT(lastIns()); 599 return lastIns()->numSuccessors(); 600 } 601 MBasicBlock* getSuccessor(size_t index) const { 602 MOZ_ASSERT(lastIns()); 603 return lastIns()->getSuccessor(index); 604 } 605 MBasicBlock* getSingleSuccessor() const { 606 MOZ_ASSERT(numSuccessors() == 1); 607 return getSuccessor(0); 608 } 609 size_t getSuccessorIndex(MBasicBlock*) const; 610 size_t getPredecessorIndex(MBasicBlock*) const; 611 612 void setLoopDepth(uint32_t loopDepth) { loopDepth_ = loopDepth; } 613 uint32_t loopDepth() const { return loopDepth_; } 614 615 void dumpStack(GenericPrinter& out); 616 void dumpStack(); 617 618 void dump(GenericPrinter& out); 619 void dump(); 620 621 void updateTrackedSite(BytecodeSite* site) { 622 MOZ_ASSERT(site->tree() == trackedSite_->tree()); 623 trackedSite_ = site; 624 } 625 BytecodeSite* trackedSite() const { return trackedSite_; } 626 InlineScriptTree* trackedTree() const { return trackedSite_->tree(); } 627 628 // Find the previous resume point that would be used if this instruction 629 // bails out. 630 MResumePoint* activeResumePoint(MInstruction* ins); 631 632 #ifdef JS_JITSPEW 633 const char* nameOfKind() const { 634 switch (kind_) { 635 case MBasicBlock::Kind::NORMAL: 636 return "NORMAL"; 637 case MBasicBlock::Kind::PENDING_LOOP_HEADER: 638 return "PENDING_LOOP_HEADER"; 639 case MBasicBlock::Kind::LOOP_HEADER: 640 return "LOOP_HEADER"; 641 case MBasicBlock::Kind::SPLIT_EDGE: 642 return "SPLIT_EDGE"; 643 case MBasicBlock::Kind::FAKE_LOOP_PRED: 644 return "FAKE_LOOP_PRED"; 645 case MBasicBlock::Kind::INTERNAL: 646 return "INTERNAL"; 647 case MBasicBlock::Kind::DEAD: 648 return "DEAD"; 649 default: 650 return "MBasicBlock::Kind::???"; 651 } 652 } 653 #endif 654 655 private: 656 MIRGraph& graph_; 657 const CompileInfo& info_; // Each block originates from a particular script. 658 InlineList<MInstruction> instructions_; 659 Vector<MBasicBlock*, 1, JitAllocPolicy> predecessors_; 660 InlineList<MPhi> phis_; 661 FixedList<MDefinition*> slots_; 662 uint32_t stackPosition_; 663 uint32_t id_; 664 uint32_t domIndex_; // Index in the dominator tree. 665 uint32_t numDominated_; 666 LBlock* lir_; 667 668 // Copy of a dominator block's outerResumePoint_ which holds the state of 669 // caller frame at the time of the call. If not null, this implies that this 670 // basic block corresponds to an inlined script. 671 MResumePoint* callerResumePoint_; 672 673 // Resume point holding baseline-like frame for the PC corresponding to the 674 // entry of this basic block. 675 MResumePoint* entryResumePoint_; 676 677 // Resume point holding baseline-like frame for the PC corresponding to the 678 // beginning of the call-site which is being inlined after this block. 679 MResumePoint* outerResumePoint_; 680 681 #ifdef DEBUG 682 // Unordered list used to verify that all the resume points which are 683 // registered are correctly removed when a basic block is removed. 684 InlineForwardList<MResumePoint> resumePoints_; 685 #endif 686 687 MBasicBlock* successorWithPhis_; 688 uint32_t positionInPhiSuccessor_; 689 uint32_t loopDepth_; 690 Kind kind_ : 8; 691 692 // Utility mark for traversal algorithms. 693 bool mark_; 694 695 Vector<MBasicBlock*, 1, JitAllocPolicy> immediatelyDominated_; 696 MBasicBlock* immediateDominator_; 697 698 // Track bailouts by storing the current pc in MIR instruction added at 699 // this cycle. This is also used for tracking calls and optimizations when 700 // profiling. 701 BytecodeSite* trackedSite_; 702 }; 703 704 using MBasicBlockIterator = InlineListIterator<MBasicBlock>; 705 using ReversePostorderIterator = InlineListIterator<MBasicBlock>; 706 using PostorderIterator = InlineListReverseIterator<MBasicBlock>; 707 708 using MIRGraphReturns = Vector<MBasicBlock*, 1, JitAllocPolicy>; 709 710 class MIRGraph { 711 InlineList<MBasicBlock> blocks_; 712 TempAllocator* alloc_; 713 MIRGraphReturns* returnAccumulator_; 714 uint32_t blockIdGen_; 715 uint32_t idGen_; 716 MBasicBlock* osrBlock_; 717 718 size_t numBlocks_; 719 bool hasTryBlock_; 720 721 InlineList<MPhi> phiFreeList_; 722 size_t phiFreeListLength_; 723 724 public: 725 explicit MIRGraph(TempAllocator* alloc) 726 : alloc_(alloc), 727 returnAccumulator_(nullptr), 728 blockIdGen_(0), 729 idGen_(0), 730 osrBlock_(nullptr), 731 numBlocks_(0), 732 hasTryBlock_(false), 733 phiFreeListLength_(0) {} 734 735 TempAllocator& alloc() const { return *alloc_; } 736 737 void addBlock(MBasicBlock* block); 738 void insertBlockAfter(MBasicBlock* at, MBasicBlock* block); 739 void insertBlockBefore(MBasicBlock* at, MBasicBlock* block); 740 741 void unmarkBlocks(); 742 743 void setReturnAccumulator(MIRGraphReturns* accum) { 744 returnAccumulator_ = accum; 745 } 746 MIRGraphReturns* returnAccumulator() const { return returnAccumulator_; } 747 748 [[nodiscard]] bool addReturn(MBasicBlock* returnBlock) { 749 if (!returnAccumulator_) { 750 return true; 751 } 752 753 return returnAccumulator_->append(returnBlock); 754 } 755 756 MBasicBlock* entryBlock() { return *blocks_.begin(); } 757 MBasicBlockIterator begin() { return blocks_.begin(); } 758 MBasicBlockIterator begin(const MBasicBlock* at) { return blocks_.begin(at); } 759 MBasicBlockIterator end() { return blocks_.end(); } 760 PostorderIterator poBegin() { return blocks_.rbegin(); } 761 PostorderIterator poBegin(const MBasicBlock* at) { 762 return blocks_.rbegin(at); 763 } 764 PostorderIterator poEnd() { return blocks_.rend(); } 765 ReversePostorderIterator rpoBegin() { return blocks_.begin(); } 766 ReversePostorderIterator rpoBegin(const MBasicBlock* at) { 767 return blocks_.begin(at); 768 } 769 ReversePostorderIterator rpoEnd() { return blocks_.end(); } 770 void removeBlock(MBasicBlock* block); 771 void moveBlockToEnd(MBasicBlock* block) { 772 blocks_.remove(block); 773 MOZ_ASSERT_IF(!blocks_.empty(), block->id()); 774 blocks_.pushBack(block); 775 } 776 void moveBlockBefore(MBasicBlock* at, MBasicBlock* block) { 777 MOZ_ASSERT(block->id()); 778 blocks_.remove(block); 779 blocks_.insertBefore(at, block); 780 } 781 void moveBlockAfter(MBasicBlock* at, MBasicBlock* block) { 782 MOZ_ASSERT(block->id()); 783 blocks_.remove(block); 784 blocks_.insertAfter(at, block); 785 } 786 size_t numBlocks() const { return numBlocks_; } 787 uint32_t numBlockIds() const { return blockIdGen_; } 788 void allocDefinitionId(MDefinition* ins) { ins->setId(idGen_++); } 789 uint32_t getNumInstructionIds() { return idGen_; } 790 MResumePoint* entryResumePoint() { return entryBlock()->entryResumePoint(); } 791 792 void setOsrBlock(MBasicBlock* osrBlock) { 793 MOZ_ASSERT(!osrBlock_); 794 osrBlock_ = osrBlock; 795 } 796 MBasicBlock* osrBlock() const { return osrBlock_; } 797 798 MBasicBlock* osrPreHeaderBlock() const { 799 return osrBlock() ? osrBlock()->getSingleSuccessor() : nullptr; 800 } 801 802 bool hasTryBlock() const { return hasTryBlock_; } 803 void setHasTryBlock() { hasTryBlock_ = true; } 804 805 void dump(GenericPrinter& out); 806 void dump(); 807 808 void addPhiToFreeList(MPhi* phi) { 809 phiFreeList_.pushBack(phi); 810 phiFreeListLength_++; 811 } 812 size_t phiFreeListLength() const { return phiFreeListLength_; } 813 MPhi* takePhiFromFreeList() { 814 MOZ_ASSERT(phiFreeListLength_ > 0); 815 phiFreeListLength_--; 816 return phiFreeList_.popBack(); 817 } 818 819 void removeFakeLoopPredecessors(); 820 821 #ifdef DEBUG 822 // Dominators can't be built after we remove fake loop predecessors. 823 private: 824 bool canBuildDominators_ = true; 825 826 public: 827 bool canBuildDominators() const { return canBuildDominators_; } 828 #endif 829 }; 830 831 class MDefinitionIterator { 832 friend class MBasicBlock; 833 friend class MNodeIterator; 834 835 private: 836 MBasicBlock* block_; 837 MPhiIterator phiIter_; 838 MInstructionIterator iter_; 839 840 bool atPhi() const { return phiIter_ != block_->phisEnd(); } 841 842 MDefinition* getIns() { 843 if (atPhi()) { 844 return *phiIter_; 845 } 846 return *iter_; 847 } 848 849 bool more() const { return atPhi() || (*iter_) != block_->lastIns(); } 850 851 public: 852 explicit MDefinitionIterator(MBasicBlock* block) 853 : block_(block), phiIter_(block->phisBegin()), iter_(block->begin()) {} 854 855 MDefinitionIterator operator++() { 856 MOZ_ASSERT(more()); 857 if (atPhi()) { 858 ++phiIter_; 859 } else { 860 ++iter_; 861 } 862 return *this; 863 } 864 865 MDefinitionIterator operator++(int) { 866 MDefinitionIterator old(*this); 867 operator++(); 868 return old; 869 } 870 871 explicit operator bool() const { return more(); } 872 873 MDefinition* operator*() { return getIns(); } 874 875 MDefinition* operator->() { return getIns(); } 876 }; 877 878 // Iterates on all resume points, phis, and instructions of a MBasicBlock. 879 // Resume points are visited as long as they have not been discarded. 880 class MNodeIterator { 881 private: 882 // If this is non-null, the resume point that we will visit next (unless 883 // it has been discarded). Initialized to the entry resume point. 884 // Otherwise, resume point of the most recently visited instruction. 885 MResumePoint* resumePoint_; 886 887 mozilla::DebugOnly<MInstruction*> lastInstruction_ = nullptr; 888 889 // Definition iterator which is one step ahead when visiting resume points. 890 // This is in order to avoid incrementing the iterator while it is settled 891 // on a discarded instruction. 892 MDefinitionIterator defIter_; 893 894 MBasicBlock* block() const { return defIter_.block_; } 895 896 bool atResumePoint() const { 897 MOZ_ASSERT_IF(lastInstruction_ && !lastInstruction_->isDiscarded(), 898 lastInstruction_->resumePoint() == resumePoint_); 899 return resumePoint_ && !resumePoint_->isDiscarded(); 900 } 901 902 MNode* getNode() { 903 if (atResumePoint()) { 904 return resumePoint_; 905 } 906 return *defIter_; 907 } 908 909 void next() { 910 if (!atResumePoint()) { 911 if (defIter_->isInstruction()) { 912 resumePoint_ = defIter_->toInstruction()->resumePoint(); 913 lastInstruction_ = defIter_->toInstruction(); 914 } 915 defIter_++; 916 } else { 917 resumePoint_ = nullptr; 918 lastInstruction_ = nullptr; 919 } 920 } 921 922 bool more() const { return defIter_ || atResumePoint(); } 923 924 public: 925 explicit MNodeIterator(MBasicBlock* block) 926 : resumePoint_(block->entryResumePoint()), defIter_(block) { 927 MOZ_ASSERT(bool(block->entryResumePoint()) == atResumePoint()); 928 } 929 930 MNodeIterator operator++(int) { 931 MNodeIterator old(*this); 932 if (more()) { 933 next(); 934 } 935 return old; 936 } 937 938 explicit operator bool() const { return more(); } 939 940 MNode* operator*() { return getNode(); } 941 942 MNode* operator->() { return getNode(); } 943 }; 944 945 void MBasicBlock::add(MInstruction* ins) { 946 MOZ_ASSERT(!hasLastIns()); 947 ins->setInstructionBlock(this, trackedSite_); 948 graph().allocDefinitionId(ins); 949 instructions_.pushBack(ins); 950 } 951 952 } // namespace jit 953 } // namespace js 954 955 #endif /* jit_MIRGraph_h */