ScalarReplacement.cpp (133091B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #include "jit/ScalarReplacement.h" 8 9 #include "jit/IonAnalysis.h" 10 #include "jit/JitSpewer.h" 11 #include "jit/MIR-wasm.h" 12 #include "jit/MIR.h" 13 #include "jit/MIRGenerator.h" 14 #include "jit/MIRGraph.h" 15 #include "jit/WarpBuilderShared.h" 16 #include "js/Vector.h" 17 #include "vm/ArgumentsObject.h" 18 #include "vm/TypedArrayObject.h" 19 20 #include "gc/ObjectKind-inl.h" 21 22 namespace js { 23 namespace jit { 24 25 template <typename MemoryView> 26 class EmulateStateOf { 27 private: 28 using BlockState = typename MemoryView::BlockState; 29 30 const MIRGenerator* mir_; 31 MIRGraph& graph_; 32 33 // Block state at the entrance of all basic blocks. 34 Vector<BlockState*, 8, SystemAllocPolicy> states_; 35 36 public: 37 EmulateStateOf(const MIRGenerator* mir, MIRGraph& graph) 38 : mir_(mir), graph_(graph) {} 39 40 bool run(MemoryView& view); 41 }; 42 43 template <typename MemoryView> 44 bool EmulateStateOf<MemoryView>::run(MemoryView& view) { 45 // Initialize the current block state of each block to an unknown state. 46 if (!states_.appendN(nullptr, graph_.numBlocks())) { 47 return false; 48 } 49 50 // Initialize the first block which needs to be traversed in RPO. 51 MBasicBlock* startBlock = view.startingBlock(); 52 if (!view.initStartingState(&states_[startBlock->id()])) { 53 return false; 54 } 55 56 // Iterate over each basic block which has a valid entry state, and merge 57 // the state in the successor blocks. 58 for (ReversePostorderIterator block = graph_.rpoBegin(startBlock); 59 block != graph_.rpoEnd(); block++) { 60 if (mir_->shouldCancel(MemoryView::phaseName)) { 61 return false; 62 } 63 64 // Get the block state as the result of the merge of all predecessors 65 // which have already been visited in RPO. This means that backedges 66 // are not yet merged into the loop. 67 BlockState* state = states_[block->id()]; 68 if (!state) { 69 continue; 70 } 71 view.setEntryBlockState(state); 72 73 // Iterates over resume points, phi and instructions. 74 for (MNodeIterator iter(*block); iter;) { 75 // Increment the iterator before visiting the instruction, as the 76 // visit function might discard itself from the basic block. 77 MNode* ins = *iter++; 78 if (ins->isDefinition()) { 79 MDefinition* def = ins->toDefinition(); 80 switch (def->op()) { 81 #define MIR_OP(op) \ 82 case MDefinition::Opcode::op: \ 83 view.visit##op(def->to##op()); \ 84 break; 85 MIR_OPCODE_LIST(MIR_OP) 86 #undef MIR_OP 87 } 88 } else { 89 view.visitResumePoint(ins->toResumePoint()); 90 } 91 if (!graph_.alloc().ensureBallast()) { 92 return false; 93 } 94 if (view.oom()) { 95 return false; 96 } 97 } 98 99 // For each successor, merge the current state into the state of the 100 // successors. 101 for (size_t s = 0; s < block->numSuccessors(); s++) { 102 MBasicBlock* succ = block->getSuccessor(s); 103 if (!view.mergeIntoSuccessorState(*block, succ, &states_[succ->id()])) { 104 return false; 105 } 106 } 107 } 108 109 states_.clear(); 110 return true; 111 } 112 113 static inline bool IsOptimizableObjectInstruction(MInstruction* ins) { 114 return ins->isNewObject() || ins->isNewPlainObject() || 115 ins->isNewCallObject() || ins->isNewIterator(); 116 } 117 118 static bool PhiOperandEqualTo(MDefinition* operand, MInstruction* newObject) { 119 if (operand == newObject) { 120 return true; 121 } 122 123 switch (operand->op()) { 124 case MDefinition::Opcode::GuardShape: 125 return PhiOperandEqualTo(operand->toGuardShape()->input(), newObject); 126 127 case MDefinition::Opcode::GuardToClass: 128 return PhiOperandEqualTo(operand->toGuardToClass()->input(), newObject); 129 130 case MDefinition::Opcode::CheckIsObj: 131 return PhiOperandEqualTo(operand->toCheckIsObj()->input(), newObject); 132 133 case MDefinition::Opcode::Unbox: 134 return PhiOperandEqualTo(operand->toUnbox()->input(), newObject); 135 136 default: 137 return false; 138 } 139 } 140 141 // Return true if all phi operands are equal to |newObject|. 142 static bool PhiOperandsEqualTo(MPhi* phi, MInstruction* newObject) { 143 MOZ_ASSERT(IsOptimizableObjectInstruction(newObject)); 144 145 for (size_t i = 0, e = phi->numOperands(); i < e; i++) { 146 if (!PhiOperandEqualTo(phi->getOperand(i), newObject)) { 147 return false; 148 } 149 } 150 return true; 151 } 152 153 static bool IsObjectEscaped(MDefinition* ins, MInstruction* newObject, 154 const Shape* shapeDefault = nullptr); 155 156 // Returns False if the lambda is not escaped and if it is optimizable by 157 // ScalarReplacementOfObject. 158 static bool IsLambdaEscaped(MInstruction* ins, MInstruction* lambda, 159 MInstruction* newObject, const Shape* shape) { 160 MOZ_ASSERT(lambda->isLambda() || lambda->isFunctionWithProto()); 161 MOZ_ASSERT(IsOptimizableObjectInstruction(newObject)); 162 JitSpewDef(JitSpew_Escape, "Check lambda\n", ins); 163 JitSpewIndent spewIndent(JitSpew_Escape); 164 165 // The scope chain is not escaped if none of the Lambdas which are 166 // capturing it are escaped. 167 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) { 168 MNode* consumer = (*i)->consumer(); 169 if (!consumer->isDefinition()) { 170 // Cannot optimize if it is observable from fun.arguments or others. 171 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) { 172 JitSpew(JitSpew_Escape, "Observable lambda cannot be recovered"); 173 return true; 174 } 175 continue; 176 } 177 178 MDefinition* def = consumer->toDefinition(); 179 switch (def->op()) { 180 case MDefinition::Opcode::GuardToFunction: { 181 auto* guard = def->toGuardToFunction(); 182 if (IsLambdaEscaped(guard, lambda, newObject, shape)) { 183 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 184 return true; 185 } 186 break; 187 } 188 189 case MDefinition::Opcode::GuardFunctionScript: { 190 auto* guard = def->toGuardFunctionScript(); 191 BaseScript* actual; 192 if (lambda->isLambda()) { 193 actual = lambda->toLambda()->templateFunction()->baseScript(); 194 } else { 195 actual = lambda->toFunctionWithProto()->function()->baseScript(); 196 } 197 if (actual != guard->expected()) { 198 JitSpewDef(JitSpew_Escape, "has a non-matching script guard\n", 199 guard); 200 return true; 201 } 202 if (IsLambdaEscaped(guard, lambda, newObject, shape)) { 203 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 204 return true; 205 } 206 break; 207 } 208 209 case MDefinition::Opcode::FunctionEnvironment: { 210 if (IsObjectEscaped(def->toFunctionEnvironment(), newObject, shape)) { 211 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 212 return true; 213 } 214 break; 215 } 216 217 default: 218 JitSpewDef(JitSpew_Escape, "is escaped by\n", def); 219 return true; 220 } 221 } 222 JitSpew(JitSpew_Escape, "Lambda is not escaped"); 223 return false; 224 } 225 226 static bool IsLambdaEscaped(MInstruction* lambda, MInstruction* newObject, 227 const Shape* shape) { 228 return IsLambdaEscaped(lambda, lambda, newObject, shape); 229 } 230 231 // Returns False if the object is not escaped and if it is optimizable by 232 // ScalarReplacementOfObject. 233 // 234 // For the moment, this code is dumb as it only supports objects which are not 235 // changing shape. 236 static bool IsObjectEscaped(MDefinition* ins, MInstruction* newObject, 237 const Shape* shapeDefault) { 238 MOZ_ASSERT(ins->type() == MIRType::Object || ins->isPhi()); 239 MOZ_ASSERT(IsOptimizableObjectInstruction(newObject)); 240 241 JitSpewDef(JitSpew_Escape, "Check object\n", ins); 242 JitSpewIndent spewIndent(JitSpew_Escape); 243 244 const Shape* shape = shapeDefault; 245 if (!shape) { 246 if (ins->isNewPlainObject()) { 247 shape = ins->toNewPlainObject()->shape(); 248 } else if (JSObject* templateObj = MObjectState::templateObjectOf(ins)) { 249 shape = templateObj->shape(); 250 } 251 } 252 253 if (!shape) { 254 JitSpew(JitSpew_Escape, "No shape defined."); 255 return true; 256 } 257 258 // Check if the object is escaped. If the object is not the first argument 259 // of either a known Store / Load, then we consider it as escaped. This is a 260 // cheap and conservative escape analysis. 261 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) { 262 MNode* consumer = (*i)->consumer(); 263 if (!consumer->isDefinition()) { 264 // Cannot optimize if it is observable from fun.arguments or others. 265 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) { 266 JitSpew(JitSpew_Escape, "Observable object cannot be recovered"); 267 return true; 268 } 269 continue; 270 } 271 272 MDefinition* def = consumer->toDefinition(); 273 switch (def->op()) { 274 case MDefinition::Opcode::StoreFixedSlot: 275 case MDefinition::Opcode::LoadFixedSlot: 276 // Not escaped if it is the first argument. 277 if (def->indexOf(*i) == 0) { 278 break; 279 } 280 281 JitSpewDef(JitSpew_Escape, "is escaped by\n", def); 282 return true; 283 284 case MDefinition::Opcode::PostWriteBarrier: 285 break; 286 287 case MDefinition::Opcode::Slots: { 288 #ifdef DEBUG 289 // Assert that MSlots are only used by MStoreDynamicSlot and 290 // MLoadDynamicSlot. 291 MSlots* ins = def->toSlots(); 292 MOZ_ASSERT(ins->object() != 0); 293 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) { 294 // toDefinition should normally never fail, since they don't get 295 // captured by resume points. 296 MDefinition* def = (*i)->consumer()->toDefinition(); 297 MOZ_ASSERT( 298 def->op() == MDefinition::Opcode::StoreDynamicSlot || 299 def->op() == MDefinition::Opcode::LoadDynamicSlot || 300 def->op() == MDefinition::Opcode::StoreDynamicSlotFromOffset || 301 def->op() == MDefinition::Opcode::LoadDynamicSlotFromOffset); 302 } 303 #endif 304 break; 305 } 306 307 case MDefinition::Opcode::GuardShape: { 308 MGuardShape* guard = def->toGuardShape(); 309 if (shape != guard->shape()) { 310 JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", guard); 311 return true; 312 } 313 if (IsObjectEscaped(def->toInstruction(), newObject, shape)) { 314 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 315 return true; 316 } 317 break; 318 } 319 320 case MDefinition::Opcode::GuardToClass: { 321 MGuardToClass* guard = def->toGuardToClass(); 322 if (!shape || shape->getObjectClass() != guard->getClass()) { 323 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard); 324 return true; 325 } 326 if (IsObjectEscaped(def->toInstruction(), newObject, shape)) { 327 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 328 return true; 329 } 330 break; 331 } 332 333 case MDefinition::Opcode::CheckIsObj: { 334 if (IsObjectEscaped(def->toInstruction(), newObject, shape)) { 335 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 336 return true; 337 } 338 break; 339 } 340 341 case MDefinition::Opcode::Unbox: { 342 if (def->type() != MIRType::Object) { 343 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def); 344 return true; 345 } 346 if (IsObjectEscaped(def->toInstruction(), newObject, shape)) { 347 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 348 return true; 349 } 350 break; 351 } 352 353 case MDefinition::Opcode::Lambda: 354 case MDefinition::Opcode::FunctionWithProto: { 355 if (IsLambdaEscaped(def->toInstruction(), newObject, shape)) { 356 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 357 return true; 358 } 359 break; 360 } 361 362 case MDefinition::Opcode::Phi: { 363 auto* phi = def->toPhi(); 364 if (!PhiOperandsEqualTo(phi, newObject)) { 365 JitSpewDef(JitSpew_Escape, "has different phi operands\n", def); 366 return true; 367 } 368 if (IsObjectEscaped(phi, newObject, shape)) { 369 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 370 return true; 371 } 372 break; 373 } 374 375 case MDefinition::Opcode::Compare: { 376 bool canFold; 377 if (!def->toCompare()->tryFold(&canFold)) { 378 JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def); 379 return true; 380 } 381 break; 382 } 383 384 // Doesn't escape the object. 385 case MDefinition::Opcode::IsObject: 386 break; 387 388 // This instruction is a no-op used to verify that scalar replacement 389 // is working as expected in jit-test. 390 case MDefinition::Opcode::AssertRecoveredOnBailout: 391 break; 392 393 // This is just a special flavor of constant which lets us optimize 394 // out some guards in certain circumstances. We'll turn this into a 395 // regular constant later. 396 case MDefinition::Opcode::ConstantProto: 397 break; 398 399 // We definitely don't need barriers for objects that don't exist. 400 case MDefinition::Opcode::AssertCanElidePostWriteBarrier: 401 break; 402 403 default: 404 JitSpewDef(JitSpew_Escape, "is escaped by\n", def); 405 return true; 406 } 407 } 408 409 JitSpew(JitSpew_Escape, "Object is not escaped"); 410 return false; 411 } 412 413 class ObjectMemoryView : public MDefinitionVisitorDefaultNoop { 414 public: 415 using BlockState = MObjectState; 416 static const char phaseName[]; 417 418 private: 419 TempAllocator& alloc_; 420 MConstant* undefinedVal_; 421 MInstruction* obj_; 422 MBasicBlock* startBlock_; 423 BlockState* state_; 424 425 // Used to improve the memory usage by sharing common modification. 426 const MResumePoint* lastResumePoint_; 427 428 bool oom_; 429 430 public: 431 ObjectMemoryView(TempAllocator& alloc, MInstruction* obj); 432 433 MBasicBlock* startingBlock(); 434 bool initStartingState(BlockState** outState); 435 436 void setEntryBlockState(BlockState* state); 437 bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ, 438 BlockState** pSuccState); 439 440 #ifdef DEBUG 441 void assertSuccess(); 442 #else 443 void assertSuccess() {} 444 #endif 445 446 bool oom() const { return oom_; } 447 448 private: 449 MDefinition* functionForCallObject(MDefinition* ins); 450 451 public: 452 void visitResumePoint(MResumePoint* rp); 453 void visitObjectState(MObjectState* ins); 454 void visitStoreFixedSlot(MStoreFixedSlot* ins); 455 void visitLoadFixedSlot(MLoadFixedSlot* ins); 456 void visitPostWriteBarrier(MPostWriteBarrier* ins); 457 void visitStoreDynamicSlot(MStoreDynamicSlot* ins); 458 void visitLoadDynamicSlot(MLoadDynamicSlot* ins); 459 void visitGuardShape(MGuardShape* ins); 460 void visitGuardToClass(MGuardToClass* ins); 461 void visitCheckIsObj(MCheckIsObj* ins); 462 void visitUnbox(MUnbox* ins); 463 void visitFunctionEnvironment(MFunctionEnvironment* ins); 464 void visitGuardToFunction(MGuardToFunction* ins); 465 void visitGuardFunctionScript(MGuardFunctionScript* ins); 466 void visitLambda(MLambda* ins); 467 void visitFunctionWithProto(MFunctionWithProto* ins); 468 void visitPhi(MPhi* ins); 469 void visitCompare(MCompare* ins); 470 void visitConstantProto(MConstantProto* ins); 471 void visitIsObject(MIsObject* ins); 472 void visitAssertCanElidePostWriteBarrier( 473 MAssertCanElidePostWriteBarrier* ins); 474 }; 475 476 /* static */ const char ObjectMemoryView::phaseName[] = 477 "Scalar Replacement of Object"; 478 479 ObjectMemoryView::ObjectMemoryView(TempAllocator& alloc, MInstruction* obj) 480 : alloc_(alloc), 481 undefinedVal_(nullptr), 482 obj_(obj), 483 startBlock_(obj->block()), 484 state_(nullptr), 485 lastResumePoint_(nullptr), 486 oom_(false) { 487 // Annotate snapshots RValue such that we recover the store first. 488 obj_->setIncompleteObject(); 489 490 // Annotate the instruction such that we do not replace it by a 491 // Magic(JS_OPTIMIZED_OUT) in case of removed uses. 492 obj_->setImplicitlyUsedUnchecked(); 493 } 494 495 MBasicBlock* ObjectMemoryView::startingBlock() { return startBlock_; } 496 497 bool ObjectMemoryView::initStartingState(BlockState** outState) { 498 // Uninitialized slots have an "undefined" value. 499 undefinedVal_ = MConstant::NewUndefined(alloc_); 500 startBlock_->insertBefore(obj_, undefinedVal_); 501 502 // Create a new block state and insert at it at the location of the new 503 // object. 504 BlockState* state = BlockState::New(alloc_, obj_); 505 if (!state) { 506 return false; 507 } 508 509 startBlock_->insertAfter(obj_, state); 510 511 // Initialize the properties of the object state. 512 state->initFromTemplateObject(alloc_, undefinedVal_); 513 514 // Hold out of resume point until it is visited. 515 state->setInWorklist(); 516 517 *outState = state; 518 return true; 519 } 520 521 void ObjectMemoryView::setEntryBlockState(BlockState* state) { state_ = state; } 522 523 bool ObjectMemoryView::mergeIntoSuccessorState(MBasicBlock* curr, 524 MBasicBlock* succ, 525 BlockState** pSuccState) { 526 BlockState* succState = *pSuccState; 527 528 // When a block has no state yet, create an empty one for the 529 // successor. 530 if (!succState) { 531 // If the successor is not dominated then the object cannot flow 532 // in this basic block without a Phi. We know that no Phi exist 533 // in non-dominated successors as the conservative escaped 534 // analysis fails otherwise. Such condition can succeed if the 535 // successor is a join at the end of a if-block and the object 536 // only exists within the branch. 537 if (!startBlock_->dominates(succ)) { 538 return true; 539 } 540 541 // If there is only one predecessor, carry over the last state of the 542 // block to the successor. As the block state is immutable, if the 543 // current block has multiple successors, they will share the same entry 544 // state. 545 if (succ->numPredecessors() <= 1 || !state_->numSlots()) { 546 *pSuccState = state_; 547 return true; 548 } 549 550 // If we have multiple predecessors, then we allocate one Phi node for 551 // each predecessor, and create a new block state which only has phi 552 // nodes. These would later be removed by the removal of redundant phi 553 // nodes. 554 succState = BlockState::Copy(alloc_, state_); 555 if (!succState) { 556 return false; 557 } 558 559 size_t numPreds = succ->numPredecessors(); 560 for (size_t slot = 0; slot < state_->numSlots(); slot++) { 561 MPhi* phi = MPhi::New(alloc_.fallible()); 562 if (!phi || !phi->reserveLength(numPreds)) { 563 return false; 564 } 565 566 // Fill the input of the successors Phi with undefined 567 // values, and each block later fills the Phi inputs. 568 for (size_t p = 0; p < numPreds; p++) { 569 phi->addInput(undefinedVal_); 570 } 571 572 // Add Phi in the list of Phis of the basic block. 573 succ->addPhi(phi); 574 succState->setSlot(slot, phi); 575 } 576 577 // Insert the newly created block state instruction at the beginning 578 // of the successor block, after all the phi nodes. Note that it 579 // would be captured by the entry resume point of the successor 580 // block. 581 succ->insertBefore(succ->safeInsertTop(), succState); 582 *pSuccState = succState; 583 } 584 585 MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader()); 586 if (succ->numPredecessors() > 1 && succState->numSlots() && 587 succ != startBlock_) { 588 // We need to re-compute successorWithPhis as the previous EliminatePhis 589 // phase might have removed all the Phis from the successor block. 590 size_t currIndex; 591 MOZ_ASSERT(!succ->phisEmpty()); 592 if (curr->successorWithPhis()) { 593 MOZ_ASSERT(curr->successorWithPhis() == succ); 594 currIndex = curr->positionInPhiSuccessor(); 595 } else { 596 currIndex = succ->indexForPredecessor(curr); 597 curr->setSuccessorWithPhis(succ, currIndex); 598 } 599 MOZ_ASSERT(succ->getPredecessor(currIndex) == curr); 600 601 // Copy the current slot states to the index of current block in all the 602 // Phi created during the first visit of the successor. 603 for (size_t slot = 0; slot < state_->numSlots(); slot++) { 604 MPhi* phi = succState->getSlot(slot)->toPhi(); 605 phi->replaceOperand(currIndex, state_->getSlot(slot)); 606 } 607 } 608 609 return true; 610 } 611 612 #ifdef DEBUG 613 void ObjectMemoryView::assertSuccess() { 614 for (MUseIterator i(obj_->usesBegin()); i != obj_->usesEnd(); i++) { 615 MNode* ins = (*i)->consumer(); 616 MDefinition* def = nullptr; 617 618 // Resume points have been replaced by the object state. 619 if (ins->isResumePoint() || 620 (def = ins->toDefinition())->isRecoveredOnBailout()) { 621 MOZ_ASSERT(obj_->isIncompleteObject()); 622 continue; 623 } 624 625 // The only remaining uses would be removed by DCE, which will also 626 // recover the object on bailouts. 627 MOZ_ASSERT(def->isSlots() || def->isLambda() || def->isFunctionWithProto()); 628 MOZ_ASSERT(!def->hasDefUses()); 629 } 630 } 631 #endif 632 633 void ObjectMemoryView::visitResumePoint(MResumePoint* rp) { 634 // As long as the MObjectState is not yet seen next to the allocation, we do 635 // not patch the resume point to recover the side effects. 636 if (!state_->isInWorklist()) { 637 rp->addStore(alloc_, state_, lastResumePoint_); 638 lastResumePoint_ = rp; 639 } 640 } 641 642 void ObjectMemoryView::visitObjectState(MObjectState* ins) { 643 if (ins->isInWorklist()) { 644 ins->setNotInWorklist(); 645 } 646 } 647 648 void ObjectMemoryView::visitStoreFixedSlot(MStoreFixedSlot* ins) { 649 // Skip stores made on other objects. 650 if (ins->object() != obj_) { 651 return; 652 } 653 654 // Clone the state and update the slot value. 655 if (state_->hasFixedSlot(ins->slot())) { 656 state_ = BlockState::Copy(alloc_, state_); 657 if (!state_) { 658 oom_ = true; 659 return; 660 } 661 662 state_->setFixedSlot(ins->slot(), ins->value()); 663 ins->block()->insertBefore(ins->toInstruction(), state_); 664 } else { 665 // UnsafeSetReserveSlot can access baked-in slots which are guarded by 666 // conditions, which are not seen by the escape analysis. 667 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable); 668 ins->block()->insertBefore(ins, bailout); 669 } 670 671 // Remove original instruction. 672 ins->block()->discard(ins); 673 } 674 675 void ObjectMemoryView::visitLoadFixedSlot(MLoadFixedSlot* ins) { 676 // Skip loads made on other objects. 677 if (ins->object() != obj_) { 678 return; 679 } 680 681 // Replace load by the slot value. 682 if (state_->hasFixedSlot(ins->slot())) { 683 ins->replaceAllUsesWith(state_->getFixedSlot(ins->slot())); 684 } else { 685 // UnsafeGetReserveSlot can access baked-in slots which are guarded by 686 // conditions, which are not seen by the escape analysis. 687 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable); 688 ins->block()->insertBefore(ins, bailout); 689 ins->replaceAllUsesWith(undefinedVal_); 690 } 691 692 // Remove original instruction. 693 ins->block()->discard(ins); 694 } 695 696 void ObjectMemoryView::visitPostWriteBarrier(MPostWriteBarrier* ins) { 697 // Skip loads made on other objects. 698 if (ins->object() != obj_) { 699 return; 700 } 701 702 // Remove original instruction. 703 ins->block()->discard(ins); 704 } 705 706 void ObjectMemoryView::visitStoreDynamicSlot(MStoreDynamicSlot* ins) { 707 // Skip stores made on other objects. 708 MSlots* slots = ins->slots()->toSlots(); 709 if (slots->object() != obj_) { 710 // Guard objects are replaced when they are visited. 711 MOZ_ASSERT(!slots->object()->isGuardShape() || 712 slots->object()->toGuardShape()->object() != obj_); 713 return; 714 } 715 716 // Clone the state and update the slot value. 717 if (state_->hasDynamicSlot(ins->slot())) { 718 state_ = BlockState::Copy(alloc_, state_); 719 if (!state_) { 720 oom_ = true; 721 return; 722 } 723 724 state_->setDynamicSlot(ins->slot(), ins->value()); 725 ins->block()->insertBefore(ins->toInstruction(), state_); 726 } else { 727 // UnsafeSetReserveSlot can access baked-in slots which are guarded by 728 // conditions, which are not seen by the escape analysis. 729 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable); 730 ins->block()->insertBefore(ins, bailout); 731 } 732 733 // Remove original instruction. 734 ins->block()->discard(ins); 735 } 736 737 void ObjectMemoryView::visitLoadDynamicSlot(MLoadDynamicSlot* ins) { 738 // Skip loads made on other objects. 739 MSlots* slots = ins->slots()->toSlots(); 740 if (slots->object() != obj_) { 741 // Guard objects are replaced when they are visited. 742 MOZ_ASSERT(!slots->object()->isGuardShape() || 743 slots->object()->toGuardShape()->object() != obj_); 744 return; 745 } 746 747 // Replace load by the slot value. 748 if (state_->hasDynamicSlot(ins->slot())) { 749 ins->replaceAllUsesWith(state_->getDynamicSlot(ins->slot())); 750 } else { 751 // UnsafeGetReserveSlot can access baked-in slots which are guarded by 752 // conditions, which are not seen by the escape analysis. 753 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable); 754 ins->block()->insertBefore(ins, bailout); 755 ins->replaceAllUsesWith(undefinedVal_); 756 } 757 758 // Remove original instruction. 759 ins->block()->discard(ins); 760 } 761 762 void ObjectMemoryView::visitGuardShape(MGuardShape* ins) { 763 // Skip guards on other objects. 764 if (ins->object() != obj_) { 765 return; 766 } 767 768 // Replace the guard by its object. 769 ins->replaceAllUsesWith(obj_); 770 771 // Remove original instruction. 772 ins->block()->discard(ins); 773 } 774 775 void ObjectMemoryView::visitGuardToClass(MGuardToClass* ins) { 776 // Skip guards on other objects. 777 if (ins->object() != obj_) { 778 return; 779 } 780 781 // Replace the guard by its object. 782 ins->replaceAllUsesWith(obj_); 783 784 // Remove original instruction. 785 ins->block()->discard(ins); 786 } 787 788 void ObjectMemoryView::visitCheckIsObj(MCheckIsObj* ins) { 789 // Skip checks on other objects. 790 if (ins->input() != obj_) { 791 return; 792 } 793 794 // Replace the check by its object. 795 ins->replaceAllUsesWith(obj_); 796 797 // Remove original instruction. 798 ins->block()->discard(ins); 799 } 800 801 void ObjectMemoryView::visitUnbox(MUnbox* ins) { 802 // Skip unrelated unboxes. 803 if (ins->input() != obj_) { 804 return; 805 } 806 MOZ_ASSERT(ins->type() == MIRType::Object); 807 808 // Replace the unbox with the object. 809 ins->replaceAllUsesWith(obj_); 810 811 // Remove the unbox. 812 ins->block()->discard(ins); 813 } 814 815 MDefinition* ObjectMemoryView::functionForCallObject(MDefinition* ins) { 816 // Return early when we don't replace MNewCallObject. 817 if (!obj_->isNewCallObject()) { 818 return nullptr; 819 } 820 821 // Unwrap instructions until we found either MLambda or MFunctionWithProto. 822 // Return the function instruction if their environment chain matches the 823 // MNewCallObject we're about to replace. 824 while (true) { 825 switch (ins->op()) { 826 case MDefinition::Opcode::Lambda: { 827 if (ins->toLambda()->environmentChain() == obj_) { 828 return ins; 829 } 830 return nullptr; 831 } 832 case MDefinition::Opcode::FunctionWithProto: { 833 if (ins->toFunctionWithProto()->environmentChain() == obj_) { 834 return ins; 835 } 836 return nullptr; 837 } 838 case MDefinition::Opcode::FunctionEnvironment: 839 ins = ins->toFunctionEnvironment()->function(); 840 break; 841 case MDefinition::Opcode::GuardToFunction: 842 ins = ins->toGuardToFunction()->object(); 843 break; 844 case MDefinition::Opcode::GuardFunctionScript: 845 ins = ins->toGuardFunctionScript()->function(); 846 break; 847 default: 848 return nullptr; 849 } 850 } 851 } 852 853 void ObjectMemoryView::visitFunctionEnvironment(MFunctionEnvironment* ins) { 854 // Skip function environment which are not aliases of the NewCallObject. 855 if (!functionForCallObject(ins)) { 856 return; 857 } 858 859 // Replace the function environment by the scope chain of the lambda. 860 ins->replaceAllUsesWith(obj_); 861 862 // Remove original instruction. 863 ins->block()->discard(ins); 864 } 865 866 void ObjectMemoryView::visitGuardToFunction(MGuardToFunction* ins) { 867 // Skip guards on other objects. 868 auto* function = functionForCallObject(ins); 869 if (!function) { 870 return; 871 } 872 873 // Replace the guard by its object. 874 ins->replaceAllUsesWith(function); 875 876 // Remove original instruction. 877 ins->block()->discard(ins); 878 } 879 880 void ObjectMemoryView::visitGuardFunctionScript(MGuardFunctionScript* ins) { 881 // Skip guards on other objects. 882 auto* function = functionForCallObject(ins); 883 if (!function) { 884 return; 885 } 886 887 // Replace the guard by its object. 888 ins->replaceAllUsesWith(function); 889 890 // Remove original instruction. 891 ins->block()->discard(ins); 892 } 893 894 void ObjectMemoryView::visitLambda(MLambda* ins) { 895 if (ins->environmentChain() != obj_) { 896 return; 897 } 898 899 // In order to recover the lambda we need to recover the scope chain, as the 900 // lambda is holding it. 901 ins->setIncompleteObject(); 902 } 903 904 void ObjectMemoryView::visitFunctionWithProto(MFunctionWithProto* ins) { 905 if (ins->environmentChain() != obj_) { 906 return; 907 } 908 909 ins->setIncompleteObject(); 910 } 911 912 void ObjectMemoryView::visitPhi(MPhi* ins) { 913 // Skip phis on other objects. 914 if (!PhiOperandsEqualTo(ins, obj_)) { 915 return; 916 } 917 918 // Replace the phi by its object. 919 ins->replaceAllUsesWith(obj_); 920 921 // Remove original instruction. 922 ins->block()->discardPhi(ins); 923 } 924 925 void ObjectMemoryView::visitCompare(MCompare* ins) { 926 // Skip unrelated comparisons. 927 if (ins->lhs() != obj_ && ins->rhs() != obj_) { 928 return; 929 } 930 931 bool folded; 932 MOZ_ALWAYS_TRUE(ins->tryFold(&folded)); 933 934 auto* cst = MConstant::NewBoolean(alloc_, folded); 935 ins->block()->insertBefore(ins, cst); 936 937 // Replace the comparison with a constant. 938 ins->replaceAllUsesWith(cst); 939 940 // Remove original instruction. 941 ins->block()->discard(ins); 942 } 943 944 void ObjectMemoryView::visitConstantProto(MConstantProto* ins) { 945 if (ins->getReceiverObject() != obj_) { 946 return; 947 } 948 949 auto* cst = ins->protoObject(); 950 ins->replaceAllUsesWith(cst); 951 ins->block()->discard(ins); 952 } 953 954 void ObjectMemoryView::visitIsObject(MIsObject* ins) { 955 // Skip unrelated tests. 956 if (ins->input() != obj_) { 957 return; 958 } 959 960 auto* cst = MConstant::NewBoolean(alloc_, true); 961 ins->block()->insertBefore(ins, cst); 962 963 // Replace the test with a constant. 964 ins->replaceAllUsesWith(cst); 965 966 // Remove original instruction. 967 ins->block()->discard(ins); 968 } 969 970 void ObjectMemoryView::visitAssertCanElidePostWriteBarrier( 971 MAssertCanElidePostWriteBarrier* ins) { 972 if (ins->object() != obj_) { 973 return; 974 } 975 976 ins->block()->discard(ins); 977 } 978 979 static bool IndexOf(MDefinition* ins, int32_t* res) { 980 MOZ_ASSERT(ins->isLoadElement() || ins->isStoreElement()); 981 MDefinition* indexDef = ins->getOperand(1); // ins->index(); 982 if (indexDef->isSpectreMaskIndex()) { 983 indexDef = indexDef->toSpectreMaskIndex()->index(); 984 } 985 if (indexDef->isBoundsCheck()) { 986 indexDef = indexDef->toBoundsCheck()->index(); 987 } 988 if (indexDef->isToNumberInt32()) { 989 indexDef = indexDef->toToNumberInt32()->getOperand(0); 990 } 991 MConstant* indexDefConst = indexDef->maybeConstantValue(); 992 if (!indexDefConst || indexDefConst->type() != MIRType::Int32) { 993 return false; 994 } 995 *res = indexDefConst->toInt32(); 996 return true; 997 } 998 999 static inline bool IsOptimizableArrayInstruction(MInstruction* ins) { 1000 return ins->isNewArray() || ins->isNewArrayObject(); 1001 } 1002 1003 // We don't support storing holes when doing scalar replacement, so any 1004 // optimizable MNewArrayObject instruction is guaranteed to be packed. 1005 static inline bool IsPackedArray(MInstruction* ins) { 1006 return ins->isNewArrayObject(); 1007 } 1008 1009 // Returns False if the elements is not escaped and if it is optimizable by 1010 // ScalarReplacementOfArray. 1011 static bool IsElementEscaped(MDefinition* def, MInstruction* newArray, 1012 uint32_t arraySize) { 1013 MOZ_ASSERT(def->isElements()); 1014 MOZ_ASSERT(IsOptimizableArrayInstruction(newArray)); 1015 1016 JitSpewDef(JitSpew_Escape, "Check elements\n", def); 1017 JitSpewIndent spewIndent(JitSpew_Escape); 1018 1019 for (MUseIterator i(def->usesBegin()); i != def->usesEnd(); i++) { 1020 // The MIRType::Elements cannot be captured in a resume point as 1021 // it does not represent a value allocation. 1022 MDefinition* access = (*i)->consumer()->toDefinition(); 1023 1024 switch (access->op()) { 1025 case MDefinition::Opcode::LoadElement: { 1026 MOZ_ASSERT(access->toLoadElement()->elements() == def); 1027 1028 // If the index is not a constant then this index can alias 1029 // all others. We do not handle this case. 1030 int32_t index; 1031 if (!IndexOf(access, &index)) { 1032 JitSpewDef(JitSpew_Escape, 1033 "has a load element with a non-trivial index\n", access); 1034 return true; 1035 } 1036 if (index < 0 || arraySize <= uint32_t(index)) { 1037 JitSpewDef(JitSpew_Escape, 1038 "has a load element with an out-of-bound index\n", access); 1039 return true; 1040 } 1041 break; 1042 } 1043 1044 case MDefinition::Opcode::StoreElement: { 1045 MStoreElement* storeElem = access->toStoreElement(); 1046 MOZ_ASSERT(storeElem->elements() == def); 1047 1048 // StoreElement must bail out if it stores to a hole, in case 1049 // there is a setter on the prototype chain. If this StoreElement 1050 // might store to a hole, we can't scalar-replace it. 1051 if (storeElem->needsHoleCheck()) { 1052 JitSpewDef(JitSpew_Escape, "has a store element with a hole check\n", 1053 storeElem); 1054 return true; 1055 } 1056 1057 // If the index is not a constant then this index can alias 1058 // all others. We do not handle this case. 1059 int32_t index; 1060 if (!IndexOf(storeElem, &index)) { 1061 JitSpewDef(JitSpew_Escape, 1062 "has a store element with a non-trivial index\n", 1063 storeElem); 1064 return true; 1065 } 1066 if (index < 0 || arraySize <= uint32_t(index)) { 1067 JitSpewDef(JitSpew_Escape, 1068 "has a store element with an out-of-bound index\n", 1069 storeElem); 1070 return true; 1071 } 1072 1073 // Dense element holes are written using MStoreHoleValueElement instead 1074 // of MStoreElement. 1075 MOZ_ASSERT(storeElem->value()->type() != MIRType::MagicHole); 1076 break; 1077 } 1078 1079 case MDefinition::Opcode::SetInitializedLength: 1080 MOZ_ASSERT(access->toSetInitializedLength()->elements() == def); 1081 break; 1082 1083 case MDefinition::Opcode::InitializedLength: 1084 MOZ_ASSERT(access->toInitializedLength()->elements() == def); 1085 break; 1086 1087 case MDefinition::Opcode::ArrayLength: 1088 MOZ_ASSERT(access->toArrayLength()->elements() == def); 1089 break; 1090 1091 case MDefinition::Opcode::ApplyArray: 1092 MOZ_ASSERT(access->toApplyArray()->getElements() == def); 1093 if (!IsPackedArray(newArray)) { 1094 JitSpewDef(JitSpew_Escape, "is not guaranteed to be packed\n", 1095 access); 1096 return true; 1097 } 1098 break; 1099 1100 case MDefinition::Opcode::ConstructArray: 1101 MOZ_ASSERT(access->toConstructArray()->getElements() == def); 1102 if (!IsPackedArray(newArray)) { 1103 JitSpewDef(JitSpew_Escape, "is not guaranteed to be packed\n", 1104 access); 1105 return true; 1106 } 1107 break; 1108 1109 case MDefinition::Opcode::GuardElementsArePacked: 1110 MOZ_ASSERT(access->toGuardElementsArePacked()->elements() == def); 1111 if (!IsPackedArray(newArray)) { 1112 JitSpewDef(JitSpew_Escape, "is not guaranteed to be packed\n", 1113 access); 1114 return true; 1115 } 1116 break; 1117 1118 default: 1119 JitSpewDef(JitSpew_Escape, "is escaped by\n", access); 1120 return true; 1121 } 1122 } 1123 JitSpew(JitSpew_Escape, "Elements is not escaped"); 1124 return false; 1125 } 1126 1127 // Returns False if the array is not escaped and if it is optimizable by 1128 // ScalarReplacementOfArray. 1129 // 1130 // For the moment, this code is dumb as it only supports arrays which are not 1131 // changing length, with only access with known constants. 1132 static bool IsArrayEscaped(MInstruction* ins, MInstruction* newArray) { 1133 MOZ_ASSERT(ins->type() == MIRType::Object); 1134 MOZ_ASSERT(IsOptimizableArrayInstruction(newArray)); 1135 1136 JitSpewDef(JitSpew_Escape, "Check array\n", ins); 1137 JitSpewIndent spewIndent(JitSpew_Escape); 1138 1139 const Shape* shape; 1140 uint32_t length; 1141 if (newArray->isNewArrayObject()) { 1142 length = newArray->toNewArrayObject()->length(); 1143 shape = newArray->toNewArrayObject()->shape(); 1144 } else { 1145 length = newArray->toNewArray()->length(); 1146 JSObject* templateObject = newArray->toNewArray()->templateObject(); 1147 if (!templateObject) { 1148 JitSpew(JitSpew_Escape, "No template object defined."); 1149 return true; 1150 } 1151 shape = templateObject->shape(); 1152 } 1153 1154 if (length >= 16) { 1155 JitSpew(JitSpew_Escape, "Array has too many elements"); 1156 return true; 1157 } 1158 1159 // Check if the object is escaped. If the object is not the first argument 1160 // of either a known Store / Load, then we consider it as escaped. This is a 1161 // cheap and conservative escape analysis. 1162 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) { 1163 MNode* consumer = (*i)->consumer(); 1164 if (!consumer->isDefinition()) { 1165 // Cannot optimize if it is observable from fun.arguments or others. 1166 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) { 1167 JitSpew(JitSpew_Escape, "Observable array cannot be recovered"); 1168 return true; 1169 } 1170 continue; 1171 } 1172 1173 MDefinition* def = consumer->toDefinition(); 1174 switch (def->op()) { 1175 case MDefinition::Opcode::Elements: { 1176 MElements* elem = def->toElements(); 1177 MOZ_ASSERT(elem->object() == ins); 1178 if (IsElementEscaped(elem, newArray, length)) { 1179 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", elem); 1180 return true; 1181 } 1182 1183 break; 1184 } 1185 1186 case MDefinition::Opcode::GuardShape: { 1187 MGuardShape* guard = def->toGuardShape(); 1188 if (shape != guard->shape()) { 1189 JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", guard); 1190 return true; 1191 } 1192 if (IsArrayEscaped(guard, newArray)) { 1193 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 1194 return true; 1195 } 1196 1197 break; 1198 } 1199 1200 case MDefinition::Opcode::GuardToClass: { 1201 MGuardToClass* guard = def->toGuardToClass(); 1202 if (shape->getObjectClass() != guard->getClass()) { 1203 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard); 1204 return true; 1205 } 1206 if (IsArrayEscaped(guard, newArray)) { 1207 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 1208 return true; 1209 } 1210 1211 break; 1212 } 1213 1214 case MDefinition::Opcode::GuardArrayIsPacked: { 1215 auto* guard = def->toGuardArrayIsPacked(); 1216 if (!IsPackedArray(newArray)) { 1217 JitSpewDef(JitSpew_Escape, "is not guaranteed to be packed\n", def); 1218 return true; 1219 } 1220 if (IsArrayEscaped(guard, newArray)) { 1221 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 1222 return true; 1223 } 1224 break; 1225 } 1226 1227 case MDefinition::Opcode::Unbox: { 1228 if (def->type() != MIRType::Object) { 1229 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def); 1230 return true; 1231 } 1232 if (IsArrayEscaped(def->toInstruction(), newArray)) { 1233 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 1234 return true; 1235 } 1236 break; 1237 } 1238 1239 // This instruction is supported for |JSOp::OptimizeSpreadCall|. 1240 case MDefinition::Opcode::Compare: { 1241 bool canFold; 1242 if (!def->toCompare()->tryFold(&canFold)) { 1243 JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def); 1244 return true; 1245 } 1246 break; 1247 } 1248 1249 case MDefinition::Opcode::PostWriteBarrier: 1250 case MDefinition::Opcode::PostWriteElementBarrier: 1251 break; 1252 1253 // This instruction is a no-op used to verify that scalar replacement 1254 // is working as expected in jit-test. 1255 case MDefinition::Opcode::AssertRecoveredOnBailout: 1256 break; 1257 1258 default: 1259 JitSpewDef(JitSpew_Escape, "is escaped by\n", def); 1260 return true; 1261 } 1262 } 1263 1264 JitSpew(JitSpew_Escape, "Array is not escaped"); 1265 return false; 1266 } 1267 1268 // This is just a class designed to extract the common elements across 1269 // several different Array replacement strategies to avoid code duplication. 1270 // There is nothing essential or sacred about it, it just felt like this 1271 // was some pretty basic stuff we often want to do when we're replacing a 1272 // true JS array with something which cheaply approximates it. When 1273 // inheriting from this in the future, please validate that each of its 1274 // core visit functions is safe to do in your new context. 1275 class GenericArrayReplacer : public MDefinitionVisitorDefaultNoop { 1276 protected: 1277 TempAllocator& alloc_; 1278 MInstruction* arr_; 1279 1280 bool isTargetElements(MDefinition* elements); 1281 void discardInstruction(MInstruction* ins, MDefinition* elements); 1282 void visitLength(MInstruction* ins, MDefinition* elements); 1283 1284 GenericArrayReplacer(TempAllocator& alloc, MInstruction* arr) 1285 : alloc_(alloc), arr_(arr) {} 1286 1287 public: 1288 void visitGuardToClass(MGuardToClass* ins); 1289 void visitGuardShape(MGuardShape* ins); 1290 void visitGuardArrayIsPacked(MGuardArrayIsPacked* ins); 1291 void visitUnbox(MUnbox* ins); 1292 void visitCompare(MCompare* ins); 1293 void visitGuardElementsArePacked(MGuardElementsArePacked* ins); 1294 }; 1295 1296 bool GenericArrayReplacer::isTargetElements(MDefinition* elements) { 1297 return elements->isElements() && elements->toElements()->object() == arr_; 1298 } 1299 1300 void GenericArrayReplacer::discardInstruction(MInstruction* ins, 1301 MDefinition* elements) { 1302 MOZ_ASSERT(elements->isElements()); 1303 ins->block()->discard(ins); 1304 if (!elements->hasLiveDefUses()) { 1305 elements->block()->discard(elements->toInstruction()); 1306 } 1307 } 1308 1309 void GenericArrayReplacer::visitGuardToClass(MGuardToClass* ins) { 1310 // Skip guards on other objects. 1311 if (ins->object() != arr_) { 1312 return; 1313 } 1314 MOZ_ASSERT(ins->getClass() == &ArrayObject::class_); 1315 1316 // Replace the guard with the array object. 1317 ins->replaceAllUsesWith(arr_); 1318 1319 // Remove the guard. 1320 ins->block()->discard(ins); 1321 } 1322 1323 void GenericArrayReplacer::visitGuardShape(MGuardShape* ins) { 1324 // Skip guards on other objects. 1325 if (ins->object() != arr_) { 1326 return; 1327 } 1328 1329 // Replace the guard with the array object. 1330 ins->replaceAllUsesWith(arr_); 1331 1332 // Remove the guard. 1333 ins->block()->discard(ins); 1334 } 1335 1336 void GenericArrayReplacer::visitGuardArrayIsPacked(MGuardArrayIsPacked* ins) { 1337 // Skip guards on other objects. 1338 if (ins->array() != arr_) { 1339 return; 1340 } 1341 1342 // Replace the guard by its object. 1343 ins->replaceAllUsesWith(arr_); 1344 1345 // Remove original instruction. 1346 ins->block()->discard(ins); 1347 } 1348 1349 void GenericArrayReplacer::visitUnbox(MUnbox* ins) { 1350 // Skip unrelated unboxes. 1351 if (ins->input() != arr_) { 1352 return; 1353 } 1354 MOZ_ASSERT(ins->type() == MIRType::Object); 1355 1356 // Replace the unbox with the array object. 1357 ins->replaceAllUsesWith(arr_); 1358 1359 // Remove the unbox. 1360 ins->block()->discard(ins); 1361 } 1362 1363 void GenericArrayReplacer::visitCompare(MCompare* ins) { 1364 // Skip unrelated comparisons. 1365 if (ins->lhs() != arr_ && ins->rhs() != arr_) { 1366 return; 1367 } 1368 1369 bool folded; 1370 MOZ_ALWAYS_TRUE(ins->tryFold(&folded)); 1371 1372 auto* cst = MConstant::NewBoolean(alloc_, folded); 1373 ins->block()->insertBefore(ins, cst); 1374 1375 // Replace the comparison with a constant. 1376 ins->replaceAllUsesWith(cst); 1377 1378 // Remove original instruction. 1379 ins->block()->discard(ins); 1380 } 1381 1382 void GenericArrayReplacer::visitGuardElementsArePacked( 1383 MGuardElementsArePacked* ins) { 1384 // Skip other array objects. 1385 MDefinition* elements = ins->elements(); 1386 if (!isTargetElements(elements)) { 1387 return; 1388 } 1389 1390 // Remove original instruction. 1391 discardInstruction(ins, elements); 1392 } 1393 1394 // This class replaces every MStoreElement and MSetInitializedLength by an 1395 // MArrayState which emulates the content of the array. All MLoadElement, 1396 // MInitializedLength and MArrayLength are replaced by the corresponding value. 1397 // 1398 // In order to restore the value of the array correctly in case of bailouts, we 1399 // replace all reference of the allocation by the MArrayState definition. 1400 class ArrayMemoryView : public GenericArrayReplacer { 1401 public: 1402 using BlockState = MArrayState; 1403 static const char* phaseName; 1404 1405 private: 1406 MConstant* undefinedVal_; 1407 MConstant* length_; 1408 MBasicBlock* startBlock_; 1409 BlockState* state_; 1410 1411 // Used to improve the memory usage by sharing common modification. 1412 const MResumePoint* lastResumePoint_; 1413 1414 bool oom_; 1415 1416 public: 1417 ArrayMemoryView(TempAllocator& alloc, MInstruction* arr); 1418 1419 MBasicBlock* startingBlock(); 1420 bool initStartingState(BlockState** pState); 1421 1422 void setEntryBlockState(BlockState* state); 1423 bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ, 1424 BlockState** pSuccState); 1425 1426 #ifdef DEBUG 1427 void assertSuccess(); 1428 #else 1429 void assertSuccess() {} 1430 #endif 1431 1432 bool oom() const { return oom_; } 1433 1434 private: 1435 bool isArrayStateElements(MDefinition* elements); 1436 void discardInstruction(MInstruction* ins, MDefinition* elements); 1437 1438 public: 1439 void visitResumePoint(MResumePoint* rp); 1440 void visitArrayState(MArrayState* ins); 1441 void visitStoreElement(MStoreElement* ins); 1442 void visitLoadElement(MLoadElement* ins); 1443 void visitSetInitializedLength(MSetInitializedLength* ins); 1444 void visitInitializedLength(MInitializedLength* ins); 1445 void visitArrayLength(MArrayLength* ins); 1446 void visitPostWriteBarrier(MPostWriteBarrier* ins); 1447 void visitPostWriteElementBarrier(MPostWriteElementBarrier* ins); 1448 void visitApplyArray(MApplyArray* ins); 1449 void visitConstructArray(MConstructArray* ins); 1450 }; 1451 1452 const char* ArrayMemoryView::phaseName = "Scalar Replacement of Array"; 1453 1454 ArrayMemoryView::ArrayMemoryView(TempAllocator& alloc, MInstruction* arr) 1455 : GenericArrayReplacer(alloc, arr), 1456 undefinedVal_(nullptr), 1457 length_(nullptr), 1458 startBlock_(arr->block()), 1459 state_(nullptr), 1460 lastResumePoint_(nullptr), 1461 oom_(false) { 1462 // Annotate snapshots RValue such that we recover the store first. 1463 arr_->setIncompleteObject(); 1464 1465 // Annotate the instruction such that we do not replace it by a 1466 // Magic(JS_OPTIMIZED_OUT) in case of removed uses. 1467 arr_->setImplicitlyUsedUnchecked(); 1468 } 1469 1470 MBasicBlock* ArrayMemoryView::startingBlock() { return startBlock_; } 1471 1472 bool ArrayMemoryView::initStartingState(BlockState** pState) { 1473 // Uninitialized elements have an "undefined" value. 1474 undefinedVal_ = MConstant::NewUndefined(alloc_); 1475 MConstant* initLength = MConstant::NewInt32(alloc_, 0); 1476 arr_->block()->insertBefore(arr_, undefinedVal_); 1477 arr_->block()->insertBefore(arr_, initLength); 1478 1479 // Create a new block state and insert at it at the location of the new array. 1480 BlockState* state = BlockState::New(alloc_, arr_, initLength); 1481 if (!state) { 1482 return false; 1483 } 1484 1485 startBlock_->insertAfter(arr_, state); 1486 1487 // Initialize the elements of the array state. 1488 state->initFromTemplateObject(alloc_, undefinedVal_); 1489 1490 // Hold out of resume point until it is visited. 1491 state->setInWorklist(); 1492 1493 *pState = state; 1494 return true; 1495 } 1496 1497 void ArrayMemoryView::setEntryBlockState(BlockState* state) { state_ = state; } 1498 1499 bool ArrayMemoryView::mergeIntoSuccessorState(MBasicBlock* curr, 1500 MBasicBlock* succ, 1501 BlockState** pSuccState) { 1502 BlockState* succState = *pSuccState; 1503 1504 // When a block has no state yet, create an empty one for the 1505 // successor. 1506 if (!succState) { 1507 // If the successor is not dominated then the array cannot flow 1508 // in this basic block without a Phi. We know that no Phi exist 1509 // in non-dominated successors as the conservative escaped 1510 // analysis fails otherwise. Such condition can succeed if the 1511 // successor is a join at the end of a if-block and the array 1512 // only exists within the branch. 1513 if (!startBlock_->dominates(succ)) { 1514 return true; 1515 } 1516 1517 // If there is only one predecessor, carry over the last state of the 1518 // block to the successor. As the block state is immutable, if the 1519 // current block has multiple successors, they will share the same entry 1520 // state. 1521 if (succ->numPredecessors() <= 1 || !state_->numElements()) { 1522 *pSuccState = state_; 1523 return true; 1524 } 1525 1526 // If we have multiple predecessors, then we allocate one Phi node for 1527 // each predecessor, and create a new block state which only has phi 1528 // nodes. These would later be removed by the removal of redundant phi 1529 // nodes. 1530 succState = BlockState::Copy(alloc_, state_); 1531 if (!succState) { 1532 return false; 1533 } 1534 1535 size_t numPreds = succ->numPredecessors(); 1536 for (size_t index = 0; index < state_->numElements(); index++) { 1537 MPhi* phi = MPhi::New(alloc_.fallible()); 1538 if (!phi || !phi->reserveLength(numPreds)) { 1539 return false; 1540 } 1541 1542 // Fill the input of the successors Phi with undefined 1543 // values, and each block later fills the Phi inputs. 1544 for (size_t p = 0; p < numPreds; p++) { 1545 phi->addInput(undefinedVal_); 1546 } 1547 1548 // Add Phi in the list of Phis of the basic block. 1549 succ->addPhi(phi); 1550 succState->setElement(index, phi); 1551 } 1552 1553 // Insert the newly created block state instruction at the beginning 1554 // of the successor block, after all the phi nodes. Note that it 1555 // would be captured by the entry resume point of the successor 1556 // block. 1557 succ->insertBefore(succ->safeInsertTop(), succState); 1558 *pSuccState = succState; 1559 } 1560 1561 MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader()); 1562 if (succ->numPredecessors() > 1 && succState->numElements() && 1563 succ != startBlock_) { 1564 // We need to re-compute successorWithPhis as the previous EliminatePhis 1565 // phase might have removed all the Phis from the successor block. 1566 size_t currIndex; 1567 MOZ_ASSERT(!succ->phisEmpty()); 1568 if (curr->successorWithPhis()) { 1569 MOZ_ASSERT(curr->successorWithPhis() == succ); 1570 currIndex = curr->positionInPhiSuccessor(); 1571 } else { 1572 currIndex = succ->indexForPredecessor(curr); 1573 curr->setSuccessorWithPhis(succ, currIndex); 1574 } 1575 MOZ_ASSERT(succ->getPredecessor(currIndex) == curr); 1576 1577 // Copy the current element states to the index of current block in all 1578 // the Phi created during the first visit of the successor. 1579 for (size_t index = 0; index < state_->numElements(); index++) { 1580 MPhi* phi = succState->getElement(index)->toPhi(); 1581 phi->replaceOperand(currIndex, state_->getElement(index)); 1582 } 1583 } 1584 1585 return true; 1586 } 1587 1588 #ifdef DEBUG 1589 void ArrayMemoryView::assertSuccess() { MOZ_ASSERT(!arr_->hasLiveDefUses()); } 1590 #endif 1591 1592 void ArrayMemoryView::visitResumePoint(MResumePoint* rp) { 1593 // As long as the MArrayState is not yet seen next to the allocation, we do 1594 // not patch the resume point to recover the side effects. 1595 if (!state_->isInWorklist()) { 1596 rp->addStore(alloc_, state_, lastResumePoint_); 1597 lastResumePoint_ = rp; 1598 } 1599 } 1600 1601 void ArrayMemoryView::visitArrayState(MArrayState* ins) { 1602 if (ins->isInWorklist()) { 1603 ins->setNotInWorklist(); 1604 } 1605 } 1606 1607 bool ArrayMemoryView::isArrayStateElements(MDefinition* elements) { 1608 return elements->isElements() && elements->toElements()->object() == arr_; 1609 } 1610 1611 void ArrayMemoryView::discardInstruction(MInstruction* ins, 1612 MDefinition* elements) { 1613 MOZ_ASSERT(elements->isElements()); 1614 ins->block()->discard(ins); 1615 if (!elements->hasLiveDefUses()) { 1616 elements->block()->discard(elements->toInstruction()); 1617 } 1618 } 1619 1620 void ArrayMemoryView::visitStoreElement(MStoreElement* ins) { 1621 // Skip other array objects. 1622 MDefinition* elements = ins->elements(); 1623 if (!isArrayStateElements(elements)) { 1624 return; 1625 } 1626 1627 // Register value of the setter in the state. 1628 int32_t index; 1629 MOZ_ALWAYS_TRUE(IndexOf(ins, &index)); 1630 state_ = BlockState::Copy(alloc_, state_); 1631 if (!state_) { 1632 oom_ = true; 1633 return; 1634 } 1635 1636 state_->setElement(index, ins->value()); 1637 ins->block()->insertBefore(ins, state_); 1638 1639 // Remove original instruction. 1640 discardInstruction(ins, elements); 1641 } 1642 1643 void ArrayMemoryView::visitLoadElement(MLoadElement* ins) { 1644 // Skip other array objects. 1645 MDefinition* elements = ins->elements(); 1646 if (!isArrayStateElements(elements)) { 1647 return; 1648 } 1649 1650 // Replace by the value contained at the index. 1651 int32_t index; 1652 MOZ_ALWAYS_TRUE(IndexOf(ins, &index)); 1653 1654 // The only way to store a hole value in a new array is with 1655 // StoreHoleValueElement, which IsElementEscaped does not allow. 1656 // Therefore, we do not have to do a hole check. 1657 MDefinition* element = state_->getElement(index); 1658 MOZ_ASSERT(element->type() != MIRType::MagicHole); 1659 1660 ins->replaceAllUsesWith(element); 1661 1662 // Remove original instruction. 1663 discardInstruction(ins, elements); 1664 } 1665 1666 void ArrayMemoryView::visitSetInitializedLength(MSetInitializedLength* ins) { 1667 // Skip other array objects. 1668 MDefinition* elements = ins->elements(); 1669 if (!isArrayStateElements(elements)) { 1670 return; 1671 } 1672 1673 // Replace by the new initialized length. Note that the argument of 1674 // MSetInitializedLength is the last index and not the initialized length. 1675 // To obtain the length, we need to add 1 to it, and thus we need to create 1676 // a new constant that we register in the ArrayState. 1677 state_ = BlockState::Copy(alloc_, state_); 1678 if (!state_) { 1679 oom_ = true; 1680 return; 1681 } 1682 1683 int32_t initLengthValue = ins->index()->maybeConstantValue()->toInt32() + 1; 1684 MConstant* initLength = MConstant::NewInt32(alloc_, initLengthValue); 1685 ins->block()->insertBefore(ins, initLength); 1686 ins->block()->insertBefore(ins, state_); 1687 state_->setInitializedLength(initLength); 1688 1689 // Remove original instruction. 1690 discardInstruction(ins, elements); 1691 } 1692 1693 void ArrayMemoryView::visitInitializedLength(MInitializedLength* ins) { 1694 // Skip other array objects. 1695 MDefinition* elements = ins->elements(); 1696 if (!isArrayStateElements(elements)) { 1697 return; 1698 } 1699 1700 // Replace by the value of the length. 1701 ins->replaceAllUsesWith(state_->initializedLength()); 1702 1703 // Remove original instruction. 1704 discardInstruction(ins, elements); 1705 } 1706 1707 void ArrayMemoryView::visitArrayLength(MArrayLength* ins) { 1708 // Skip other array objects. 1709 MDefinition* elements = ins->elements(); 1710 if (!isArrayStateElements(elements)) { 1711 return; 1712 } 1713 1714 // Replace by the value of the length. 1715 if (!length_) { 1716 length_ = MConstant::NewInt32(alloc_, state_->numElements()); 1717 arr_->block()->insertBefore(arr_, length_); 1718 } 1719 ins->replaceAllUsesWith(length_); 1720 1721 // Remove original instruction. 1722 discardInstruction(ins, elements); 1723 } 1724 1725 void ArrayMemoryView::visitPostWriteBarrier(MPostWriteBarrier* ins) { 1726 // Skip barriers on other objects. 1727 if (ins->object() != arr_) { 1728 return; 1729 } 1730 1731 // Remove original instruction. 1732 ins->block()->discard(ins); 1733 } 1734 1735 void ArrayMemoryView::visitPostWriteElementBarrier( 1736 MPostWriteElementBarrier* ins) { 1737 // Skip barriers on other objects. 1738 if (ins->object() != arr_) { 1739 return; 1740 } 1741 1742 // Remove original instruction. 1743 ins->block()->discard(ins); 1744 } 1745 1746 void ArrayMemoryView::visitApplyArray(MApplyArray* ins) { 1747 // Skip other array objects. 1748 MDefinition* elements = ins->getElements(); 1749 if (!isArrayStateElements(elements)) { 1750 return; 1751 } 1752 1753 uint32_t numElements = state_->numElements(); 1754 1755 CallInfo callInfo(alloc_, /*constructing=*/false, ins->ignoresReturnValue()); 1756 if (!callInfo.initForApplyArray(ins->getFunction(), ins->getThis(), 1757 numElements)) { 1758 oom_ = true; 1759 return; 1760 } 1761 1762 for (uint32_t i = 0; i < numElements; i++) { 1763 auto* element = state_->getElement(i); 1764 MOZ_ASSERT(element->type() != MIRType::MagicHole); 1765 1766 callInfo.initArg(i, element); 1767 } 1768 1769 auto addUndefined = [this]() { return undefinedVal_; }; 1770 1771 bool needsThisCheck = false; 1772 bool isDOMCall = false; 1773 auto* call = MakeCall(alloc_, addUndefined, callInfo, needsThisCheck, 1774 ins->getSingleTarget(), isDOMCall); 1775 if (!call) { 1776 oom_ = true; 1777 return; 1778 } 1779 if (!ins->maybeCrossRealm()) { 1780 call->setNotCrossRealm(); 1781 } 1782 1783 ins->block()->insertBefore(ins, call); 1784 ins->replaceAllUsesWith(call); 1785 1786 call->stealResumePoint(ins); 1787 1788 // Remove original instruction. 1789 discardInstruction(ins, elements); 1790 } 1791 1792 void ArrayMemoryView::visitConstructArray(MConstructArray* ins) { 1793 // Skip other array objects. 1794 MDefinition* elements = ins->getElements(); 1795 if (!isArrayStateElements(elements)) { 1796 return; 1797 } 1798 1799 uint32_t numElements = state_->numElements(); 1800 1801 CallInfo callInfo(alloc_, /*constructing=*/true, ins->ignoresReturnValue()); 1802 if (!callInfo.initForConstructArray(ins->getFunction(), ins->getThis(), 1803 ins->getNewTarget(), numElements)) { 1804 oom_ = true; 1805 return; 1806 } 1807 1808 for (uint32_t i = 0; i < numElements; i++) { 1809 auto* element = state_->getElement(i); 1810 MOZ_ASSERT(element->type() != MIRType::MagicHole); 1811 1812 callInfo.initArg(i, element); 1813 } 1814 1815 auto addUndefined = [this]() { return undefinedVal_; }; 1816 1817 bool needsThisCheck = ins->needsThisCheck(); 1818 bool isDOMCall = false; 1819 auto* call = MakeCall(alloc_, addUndefined, callInfo, needsThisCheck, 1820 ins->getSingleTarget(), isDOMCall); 1821 if (!call) { 1822 oom_ = true; 1823 return; 1824 } 1825 if (!ins->maybeCrossRealm()) { 1826 call->setNotCrossRealm(); 1827 } 1828 1829 ins->block()->insertBefore(ins, call); 1830 ins->replaceAllUsesWith(call); 1831 1832 call->stealResumePoint(ins); 1833 1834 // Remove original instruction. 1835 discardInstruction(ins, elements); 1836 } 1837 1838 static inline bool IsOptimizableArgumentsInstruction(MInstruction* ins) { 1839 return ins->isCreateArgumentsObject() || 1840 ins->isCreateInlinedArgumentsObject(); 1841 } 1842 1843 class ArgumentsReplacer : public MDefinitionVisitorDefaultNoop { 1844 private: 1845 const MIRGenerator* mir_; 1846 MIRGraph& graph_; 1847 MInstruction* args_; 1848 1849 bool oom_ = false; 1850 1851 TempAllocator& alloc() { return graph_.alloc(); } 1852 1853 bool isInlinedArguments() const { 1854 return args_->isCreateInlinedArgumentsObject(); 1855 } 1856 1857 MNewArrayObject* inlineArgsArray(MInstruction* ins, Shape* shape, 1858 uint32_t begin, uint32_t count); 1859 1860 void visitGuardToClass(MGuardToClass* ins); 1861 void visitGuardProto(MGuardProto* ins); 1862 void visitGuardArgumentsObjectFlags(MGuardArgumentsObjectFlags* ins); 1863 void visitUnbox(MUnbox* ins); 1864 void visitGetArgumentsObjectArg(MGetArgumentsObjectArg* ins); 1865 void visitLoadArgumentsObjectArg(MLoadArgumentsObjectArg* ins); 1866 void visitLoadArgumentsObjectArgHole(MLoadArgumentsObjectArgHole* ins); 1867 void visitInArgumentsObjectArg(MInArgumentsObjectArg* ins); 1868 void visitArgumentsObjectLength(MArgumentsObjectLength* ins); 1869 void visitApplyArgsObj(MApplyArgsObj* ins); 1870 void visitArrayFromArgumentsObject(MArrayFromArgumentsObject* ins); 1871 void visitArgumentsSlice(MArgumentsSlice* ins); 1872 void visitLoadFixedSlot(MLoadFixedSlot* ins); 1873 1874 bool oom() const { return oom_; } 1875 1876 public: 1877 ArgumentsReplacer(const MIRGenerator* mir, MIRGraph& graph, 1878 MInstruction* args) 1879 : mir_(mir), graph_(graph), args_(args) { 1880 MOZ_ASSERT(IsOptimizableArgumentsInstruction(args_)); 1881 } 1882 1883 bool escapes(MInstruction* ins, bool guardedForMapped = false); 1884 bool run(); 1885 void assertSuccess(); 1886 }; 1887 1888 // Returns false if the arguments object does not escape. 1889 bool ArgumentsReplacer::escapes(MInstruction* ins, bool guardedForMapped) { 1890 MOZ_ASSERT(ins->type() == MIRType::Object); 1891 1892 JitSpewDef(JitSpew_Escape, "Check arguments object\n", ins); 1893 JitSpewIndent spewIndent(JitSpew_Escape); 1894 1895 // We can replace inlined arguments in scripts with OSR entries, but 1896 // the outermost arguments object has already been allocated before 1897 // we enter via OSR and can't be replaced. 1898 if (ins->isCreateArgumentsObject() && graph_.osrBlock()) { 1899 JitSpew(JitSpew_Escape, "Can't replace outermost OSR arguments"); 1900 return true; 1901 } 1902 1903 // Check all uses to see whether they can be supported without 1904 // allocating an ArgumentsObject. 1905 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) { 1906 MNode* consumer = (*i)->consumer(); 1907 1908 // If a resume point can observe this instruction, we can only optimize 1909 // if it is recoverable. 1910 if (consumer->isResumePoint()) { 1911 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) { 1912 JitSpew(JitSpew_Escape, "Observable args object cannot be recovered"); 1913 return true; 1914 } 1915 continue; 1916 } 1917 1918 MDefinition* def = consumer->toDefinition(); 1919 switch (def->op()) { 1920 case MDefinition::Opcode::GuardToClass: { 1921 MGuardToClass* guard = def->toGuardToClass(); 1922 if (!guard->isArgumentsObjectClass()) { 1923 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard); 1924 return true; 1925 } 1926 bool isMapped = guard->getClass() == &MappedArgumentsObject::class_; 1927 if (escapes(guard, isMapped)) { 1928 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 1929 return true; 1930 } 1931 break; 1932 } 1933 1934 case MDefinition::Opcode::GuardProto: { 1935 if (escapes(def->toInstruction(), guardedForMapped)) { 1936 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 1937 return true; 1938 } 1939 break; 1940 } 1941 1942 case MDefinition::Opcode::GuardArgumentsObjectFlags: { 1943 if (escapes(def->toInstruction(), guardedForMapped)) { 1944 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 1945 return true; 1946 } 1947 break; 1948 } 1949 1950 case MDefinition::Opcode::Unbox: { 1951 if (def->type() != MIRType::Object) { 1952 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def); 1953 return true; 1954 } 1955 if (escapes(def->toInstruction())) { 1956 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 1957 return true; 1958 } 1959 break; 1960 } 1961 1962 case MDefinition::Opcode::LoadFixedSlot: { 1963 MLoadFixedSlot* load = def->toLoadFixedSlot(); 1964 1965 // We can replace arguments.callee. 1966 if (load->slot() == ArgumentsObject::CALLEE_SLOT) { 1967 MOZ_ASSERT(guardedForMapped); 1968 continue; 1969 } 1970 JitSpew(JitSpew_Escape, "is escaped by unsupported LoadFixedSlot\n"); 1971 return true; 1972 } 1973 1974 case MDefinition::Opcode::ApplyArgsObj: { 1975 if (ins == def->toApplyArgsObj()->getThis()) { 1976 JitSpew(JitSpew_Escape, "is escaped as |this| arg of ApplyArgsObj\n"); 1977 return true; 1978 } 1979 MOZ_ASSERT(ins == def->toApplyArgsObj()->getArgsObj()); 1980 break; 1981 } 1982 1983 // This is a replaceable consumer. 1984 case MDefinition::Opcode::ArgumentsObjectLength: 1985 case MDefinition::Opcode::GetArgumentsObjectArg: 1986 case MDefinition::Opcode::LoadArgumentsObjectArg: 1987 case MDefinition::Opcode::LoadArgumentsObjectArgHole: 1988 case MDefinition::Opcode::InArgumentsObjectArg: 1989 case MDefinition::Opcode::ArrayFromArgumentsObject: 1990 case MDefinition::Opcode::ArgumentsSlice: 1991 break; 1992 1993 // This instruction is a no-op used to test that scalar replacement 1994 // is working as expected. 1995 case MDefinition::Opcode::AssertRecoveredOnBailout: 1996 break; 1997 1998 default: 1999 JitSpewDef(JitSpew_Escape, "is escaped by\n", def); 2000 return true; 2001 } 2002 } 2003 2004 JitSpew(JitSpew_Escape, "ArgumentsObject is not escaped"); 2005 return false; 2006 } 2007 2008 // Replacing the arguments object is simpler than replacing an object 2009 // or array, because the arguments object does not change state. 2010 bool ArgumentsReplacer::run() { 2011 MBasicBlock* startBlock = args_->block(); 2012 2013 // Iterate over each basic block. 2014 for (ReversePostorderIterator block = graph_.rpoBegin(startBlock); 2015 block != graph_.rpoEnd(); block++) { 2016 if (mir_->shouldCancel("Scalar replacement of Arguments Object")) { 2017 return false; 2018 } 2019 2020 // Iterates over phis and instructions. 2021 // We do not have to visit resume points. Any resume points that capture 2022 // the argument object will be handled by the Sink pass. 2023 for (MDefinitionIterator iter(*block); iter;) { 2024 // Increment the iterator before visiting the instruction, as the 2025 // visit function might discard itself from the basic block. 2026 MDefinition* def = *iter++; 2027 switch (def->op()) { 2028 #define MIR_OP(op) \ 2029 case MDefinition::Opcode::op: \ 2030 visit##op(def->to##op()); \ 2031 break; 2032 MIR_OPCODE_LIST(MIR_OP) 2033 #undef MIR_OP 2034 } 2035 if (!graph_.alloc().ensureBallast()) { 2036 return false; 2037 } 2038 if (oom()) { 2039 return false; 2040 } 2041 } 2042 } 2043 2044 assertSuccess(); 2045 return true; 2046 } 2047 2048 void ArgumentsReplacer::assertSuccess() { 2049 MOZ_ASSERT(args_->canRecoverOnBailout()); 2050 MOZ_ASSERT(!args_->hasLiveDefUses()); 2051 } 2052 2053 void ArgumentsReplacer::visitGuardToClass(MGuardToClass* ins) { 2054 // Skip guards on other objects. 2055 if (ins->object() != args_) { 2056 return; 2057 } 2058 MOZ_ASSERT(ins->isArgumentsObjectClass()); 2059 2060 // Replace the guard with the args object. 2061 ins->replaceAllUsesWith(args_); 2062 2063 // Remove the guard. 2064 ins->block()->discard(ins); 2065 } 2066 2067 void ArgumentsReplacer::visitGuardProto(MGuardProto* ins) { 2068 // Skip guards on other objects. 2069 if (ins->object() != args_) { 2070 return; 2071 } 2072 2073 // The prototype can only be changed through explicit operations, for example 2074 // by calling |Reflect.setPrototype|. We have already determined that the args 2075 // object doesn't escape, so its prototype can't be mutated. 2076 2077 // Replace the guard with the args object. 2078 ins->replaceAllUsesWith(args_); 2079 2080 // Remove the guard. 2081 ins->block()->discard(ins); 2082 } 2083 2084 void ArgumentsReplacer::visitGuardArgumentsObjectFlags( 2085 MGuardArgumentsObjectFlags* ins) { 2086 // Skip other arguments objects. 2087 if (ins->argsObject() != args_) { 2088 return; 2089 } 2090 2091 #ifdef DEBUG 2092 // Each *_OVERRIDDEN_BIT can only be set by setting or deleting a 2093 // property of the args object. We have already determined that the 2094 // args object doesn't escape, so its properties can't be mutated. 2095 // 2096 // FORWARDED_ARGUMENTS_BIT is set if any mapped argument is closed 2097 // over, which is an immutable property of the script. Because we 2098 // are replacing the args object for a known script, we can check 2099 // the flag once, which is done when we first attach the CacheIR, 2100 // and rely on it. (Note that this wouldn't be true if we didn't 2101 // know the origin of args_, because it could be passed in from 2102 // another function.) 2103 uint32_t supportedBits = ArgumentsObject::LENGTH_OVERRIDDEN_BIT | 2104 ArgumentsObject::ITERATOR_OVERRIDDEN_BIT | 2105 ArgumentsObject::ELEMENT_OVERRIDDEN_BIT | 2106 ArgumentsObject::CALLEE_OVERRIDDEN_BIT | 2107 ArgumentsObject::FORWARDED_ARGUMENTS_BIT; 2108 2109 MOZ_ASSERT((ins->flags() & ~supportedBits) == 0); 2110 MOZ_ASSERT_IF(ins->flags() & ArgumentsObject::FORWARDED_ARGUMENTS_BIT, 2111 !args_->block()->info().anyFormalIsForwarded()); 2112 #endif 2113 2114 // Replace the guard with the args object. 2115 ins->replaceAllUsesWith(args_); 2116 2117 // Remove the guard. 2118 ins->block()->discard(ins); 2119 } 2120 2121 void ArgumentsReplacer::visitUnbox(MUnbox* ins) { 2122 // Skip unrelated unboxes. 2123 if (ins->getOperand(0) != args_) { 2124 return; 2125 } 2126 MOZ_ASSERT(ins->type() == MIRType::Object); 2127 2128 // Replace the unbox with the args object. 2129 ins->replaceAllUsesWith(args_); 2130 2131 // Remove the unbox. 2132 ins->block()->discard(ins); 2133 } 2134 2135 void ArgumentsReplacer::visitGetArgumentsObjectArg( 2136 MGetArgumentsObjectArg* ins) { 2137 // Skip other arguments objects. 2138 if (ins->argsObject() != args_) { 2139 return; 2140 } 2141 2142 // We don't support setting arguments in ArgumentsReplacer::escapes, 2143 // so we can load the initial value of the argument without worrying 2144 // about it being stale. 2145 MDefinition* getArg; 2146 if (isInlinedArguments()) { 2147 // Inlined frames have direct access to the actual arguments. 2148 auto* actualArgs = args_->toCreateInlinedArgumentsObject(); 2149 if (ins->argno() < actualArgs->numActuals()) { 2150 getArg = actualArgs->getArg(ins->argno()); 2151 } else { 2152 // Omitted arguments are not mapped to the arguments object, and 2153 // will always be undefined. 2154 auto* undef = MConstant::NewUndefined(alloc()); 2155 ins->block()->insertBefore(ins, undef); 2156 getArg = undef; 2157 } 2158 } else { 2159 // Load the argument from the frame. 2160 auto* index = MConstant::NewInt32(alloc(), ins->argno()); 2161 ins->block()->insertBefore(ins, index); 2162 2163 auto* loadArg = MGetFrameArgument::New(alloc(), index); 2164 ins->block()->insertBefore(ins, loadArg); 2165 getArg = loadArg; 2166 } 2167 ins->replaceAllUsesWith(getArg); 2168 2169 // Remove original instruction. 2170 ins->block()->discard(ins); 2171 } 2172 2173 void ArgumentsReplacer::visitLoadArgumentsObjectArg( 2174 MLoadArgumentsObjectArg* ins) { 2175 // Skip other arguments objects. 2176 if (ins->argsObject() != args_) { 2177 return; 2178 } 2179 2180 MDefinition* index = ins->index(); 2181 2182 MInstruction* loadArg; 2183 if (isInlinedArguments()) { 2184 auto* actualArgs = args_->toCreateInlinedArgumentsObject(); 2185 2186 // Insert bounds check. 2187 auto* length = MConstant::NewInt32(alloc(), actualArgs->numActuals()); 2188 ins->block()->insertBefore(ins, length); 2189 2190 MInstruction* check = MBoundsCheck::New(alloc(), index, length); 2191 check->setBailoutKind(ins->bailoutKind()); 2192 ins->block()->insertBefore(ins, check); 2193 2194 if (mir_->outerInfo().hadBoundsCheckBailout()) { 2195 check->setNotMovable(); 2196 } 2197 2198 loadArg = MGetInlinedArgument::New(alloc(), check, actualArgs); 2199 if (!loadArg) { 2200 oom_ = true; 2201 return; 2202 } 2203 } else { 2204 // Insert bounds check. 2205 auto* length = MArgumentsLength::New(alloc()); 2206 ins->block()->insertBefore(ins, length); 2207 2208 MInstruction* check = MBoundsCheck::New(alloc(), index, length); 2209 check->setBailoutKind(ins->bailoutKind()); 2210 ins->block()->insertBefore(ins, check); 2211 2212 if (mir_->outerInfo().hadBoundsCheckBailout()) { 2213 check->setNotMovable(); 2214 } 2215 2216 if (JitOptions.spectreIndexMasking) { 2217 check = MSpectreMaskIndex::New(alloc(), check, length); 2218 ins->block()->insertBefore(ins, check); 2219 } 2220 2221 loadArg = MGetFrameArgument::New(alloc(), check); 2222 } 2223 ins->block()->insertBefore(ins, loadArg); 2224 ins->replaceAllUsesWith(loadArg); 2225 2226 // Remove original instruction. 2227 ins->block()->discard(ins); 2228 } 2229 2230 void ArgumentsReplacer::visitLoadArgumentsObjectArgHole( 2231 MLoadArgumentsObjectArgHole* ins) { 2232 // Skip other arguments objects. 2233 if (ins->argsObject() != args_) { 2234 return; 2235 } 2236 2237 MDefinition* index = ins->index(); 2238 2239 MInstruction* loadArg; 2240 if (isInlinedArguments()) { 2241 auto* actualArgs = args_->toCreateInlinedArgumentsObject(); 2242 2243 loadArg = MGetInlinedArgumentHole::New(alloc(), index, actualArgs); 2244 if (!loadArg) { 2245 oom_ = true; 2246 return; 2247 } 2248 } else { 2249 auto* length = MArgumentsLength::New(alloc()); 2250 ins->block()->insertBefore(ins, length); 2251 2252 loadArg = MGetFrameArgumentHole::New(alloc(), index, length); 2253 } 2254 loadArg->setBailoutKind(ins->bailoutKind()); 2255 ins->block()->insertBefore(ins, loadArg); 2256 ins->replaceAllUsesWith(loadArg); 2257 2258 // Remove original instruction. 2259 ins->block()->discard(ins); 2260 } 2261 2262 void ArgumentsReplacer::visitInArgumentsObjectArg(MInArgumentsObjectArg* ins) { 2263 // Skip other arguments objects. 2264 if (ins->argsObject() != args_) { 2265 return; 2266 } 2267 2268 MDefinition* index = ins->index(); 2269 2270 // Ensure the index is non-negative. 2271 auto* guardedIndex = MGuardInt32IsNonNegative::New(alloc(), index); 2272 guardedIndex->setBailoutKind(ins->bailoutKind()); 2273 ins->block()->insertBefore(ins, guardedIndex); 2274 2275 MInstruction* length; 2276 if (isInlinedArguments()) { 2277 uint32_t argc = args_->toCreateInlinedArgumentsObject()->numActuals(); 2278 length = MConstant::NewInt32(alloc(), argc); 2279 } else { 2280 length = MArgumentsLength::New(alloc()); 2281 } 2282 ins->block()->insertBefore(ins, length); 2283 2284 auto* compare = MCompare::New(alloc(), guardedIndex, length, JSOp::Lt, 2285 MCompare::Compare_Int32); 2286 ins->block()->insertBefore(ins, compare); 2287 ins->replaceAllUsesWith(compare); 2288 2289 // Remove original instruction. 2290 ins->block()->discard(ins); 2291 } 2292 2293 void ArgumentsReplacer::visitArgumentsObjectLength( 2294 MArgumentsObjectLength* ins) { 2295 // Skip other arguments objects. 2296 if (ins->argsObject() != args_) { 2297 return; 2298 } 2299 2300 MInstruction* length; 2301 if (isInlinedArguments()) { 2302 uint32_t argc = args_->toCreateInlinedArgumentsObject()->numActuals(); 2303 length = MConstant::NewInt32(alloc(), argc); 2304 } else { 2305 length = MArgumentsLength::New(alloc()); 2306 } 2307 ins->block()->insertBefore(ins, length); 2308 ins->replaceAllUsesWith(length); 2309 2310 // Remove original instruction. 2311 ins->block()->discard(ins); 2312 } 2313 2314 void ArgumentsReplacer::visitApplyArgsObj(MApplyArgsObj* ins) { 2315 // Skip other arguments objects. 2316 if (ins->getArgsObj() != args_) { 2317 return; 2318 } 2319 2320 MInstruction* newIns; 2321 if (isInlinedArguments()) { 2322 auto* actualArgs = args_->toCreateInlinedArgumentsObject(); 2323 CallInfo callInfo(alloc(), /*constructing=*/false, 2324 ins->ignoresReturnValue()); 2325 2326 callInfo.initForApplyInlinedArgs(ins->getFunction(), ins->getThis(), 2327 actualArgs->numActuals()); 2328 for (uint32_t i = 0; i < actualArgs->numActuals(); i++) { 2329 callInfo.initArg(i, actualArgs->getArg(i)); 2330 } 2331 2332 auto addUndefined = [this, &ins]() -> MConstant* { 2333 MConstant* undef = MConstant::NewUndefined(alloc()); 2334 ins->block()->insertBefore(ins, undef); 2335 return undef; 2336 }; 2337 2338 bool needsThisCheck = false; 2339 bool isDOMCall = false; 2340 auto* call = MakeCall(alloc(), addUndefined, callInfo, needsThisCheck, 2341 ins->getSingleTarget(), isDOMCall); 2342 if (!call) { 2343 oom_ = true; 2344 return; 2345 } 2346 if (!ins->maybeCrossRealm()) { 2347 call->setNotCrossRealm(); 2348 } 2349 newIns = call; 2350 } else { 2351 auto* numArgs = MArgumentsLength::New(alloc()); 2352 ins->block()->insertBefore(ins, numArgs); 2353 2354 // TODO: Should we rename MApplyArgs? 2355 auto* apply = MApplyArgs::New(alloc(), ins->getSingleTarget(), 2356 ins->getFunction(), numArgs, ins->getThis()); 2357 apply->setBailoutKind(ins->bailoutKind()); 2358 if (!ins->maybeCrossRealm()) { 2359 apply->setNotCrossRealm(); 2360 } 2361 if (ins->ignoresReturnValue()) { 2362 apply->setIgnoresReturnValue(); 2363 } 2364 newIns = apply; 2365 } 2366 2367 ins->block()->insertBefore(ins, newIns); 2368 ins->replaceAllUsesWith(newIns); 2369 2370 newIns->stealResumePoint(ins); 2371 ins->block()->discard(ins); 2372 } 2373 2374 MNewArrayObject* ArgumentsReplacer::inlineArgsArray(MInstruction* ins, 2375 Shape* shape, 2376 uint32_t begin, 2377 uint32_t count) { 2378 auto* actualArgs = args_->toCreateInlinedArgumentsObject(); 2379 2380 // Contrary to |WarpBuilder::build_Rest()|, we can always create 2381 // MNewArrayObject, because we're guaranteed to have a shape and all 2382 // arguments can be stored into fixed elements. 2383 static_assert( 2384 gc::CanUseFixedElementsForArray(ArgumentsObject::MaxInlinedArgs)); 2385 2386 gc::Heap heap = gc::Heap::Default; 2387 2388 // Allocate an array of the correct size. 2389 auto* shapeConstant = MConstant::NewShape(alloc(), shape); 2390 ins->block()->insertBefore(ins, shapeConstant); 2391 2392 auto* newArray = MNewArrayObject::New(alloc(), shapeConstant, count, heap); 2393 ins->block()->insertBefore(ins, newArray); 2394 2395 if (count) { 2396 auto* elements = MElements::New(alloc(), newArray); 2397 ins->block()->insertBefore(ins, elements); 2398 2399 MConstant* index = nullptr; 2400 for (uint32_t i = 0; i < count; i++) { 2401 index = MConstant::NewInt32(alloc(), i); 2402 ins->block()->insertBefore(ins, index); 2403 2404 MDefinition* arg = actualArgs->getArg(begin + i); 2405 auto* store = MStoreElement::NewUnbarriered(alloc(), elements, index, arg, 2406 /* needsHoleCheck = */ false); 2407 ins->block()->insertBefore(ins, store); 2408 2409 auto* barrier = MPostWriteBarrier::New(alloc(), newArray, arg); 2410 ins->block()->insertBefore(ins, barrier); 2411 } 2412 2413 auto* initLength = MSetInitializedLength::New(alloc(), elements, index); 2414 ins->block()->insertBefore(ins, initLength); 2415 } 2416 2417 return newArray; 2418 } 2419 2420 void ArgumentsReplacer::visitArrayFromArgumentsObject( 2421 MArrayFromArgumentsObject* ins) { 2422 // Skip other arguments objects. 2423 if (ins->argsObject() != args_) { 2424 return; 2425 } 2426 2427 // We can only replace `arguments` because we've verified that the `arguments` 2428 // object hasn't been modified in any way. This implies that the arguments 2429 // stored in the stack frame haven't been changed either. 2430 // 2431 // The idea to replace `arguments` in spread calls `f(...arguments)` is now as 2432 // follows: 2433 // We replace |MArrayFromArgumentsObject| with the identical instructions we 2434 // emit when building a rest-array object, cf. |WarpBuilder::build_Rest()|. In 2435 // a next step, scalar replacement will then replace these new instructions 2436 // themselves. 2437 2438 Shape* shape = ins->shape(); 2439 MOZ_ASSERT(shape); 2440 2441 MDefinition* replacement; 2442 if (isInlinedArguments()) { 2443 auto* actualArgs = args_->toCreateInlinedArgumentsObject(); 2444 uint32_t numActuals = actualArgs->numActuals(); 2445 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs); 2446 2447 replacement = inlineArgsArray(ins, shape, 0, numActuals); 2448 } else { 2449 // We can use |MRest| to read all arguments, because we've guaranteed that 2450 // the arguments stored in the stack frame haven't changed; see the comment 2451 // at the start of this method. 2452 2453 auto* numActuals = MArgumentsLength::New(alloc()); 2454 ins->block()->insertBefore(ins, numActuals); 2455 2456 // Set |numFormals| to zero to read all arguments, including any formals. 2457 uint32_t numFormals = 0; 2458 2459 auto* rest = MRest::New(alloc(), numActuals, numFormals, shape); 2460 ins->block()->insertBefore(ins, rest); 2461 2462 replacement = rest; 2463 } 2464 2465 ins->replaceAllUsesWith(replacement); 2466 2467 // Remove original instruction. 2468 ins->block()->discard(ins); 2469 } 2470 2471 static uint32_t NormalizeSlice(MDefinition* def, uint32_t length) { 2472 int32_t value = def->toConstant()->toInt32(); 2473 if (value < 0) { 2474 return std::max(int32_t(uint32_t(value) + length), 0); 2475 } 2476 return std::min(uint32_t(value), length); 2477 } 2478 2479 void ArgumentsReplacer::visitArgumentsSlice(MArgumentsSlice* ins) { 2480 // Skip other arguments objects. 2481 if (ins->object() != args_) { 2482 return; 2483 } 2484 2485 // Optimise the common pattern |Array.prototype.slice.call(arguments, begin)|, 2486 // where |begin| is a non-negative, constant int32. 2487 // 2488 // An absent end-index is replaced by |arguments.length|, so we try to match 2489 // |Array.prototype.slice.call(arguments, begin, arguments.length)|. 2490 if (isInlinedArguments()) { 2491 // When this is an inlined arguments, |arguments.length| has been replaced 2492 // by a constant. 2493 if (ins->begin()->isConstant() && ins->end()->isConstant()) { 2494 auto* actualArgs = args_->toCreateInlinedArgumentsObject(); 2495 uint32_t numActuals = actualArgs->numActuals(); 2496 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs); 2497 2498 uint32_t begin = NormalizeSlice(ins->begin(), numActuals); 2499 uint32_t end = NormalizeSlice(ins->end(), numActuals); 2500 uint32_t count = end > begin ? end - begin : 0; 2501 MOZ_ASSERT(count <= numActuals); 2502 2503 Shape* shape = ins->templateObj()->shape(); 2504 auto* newArray = inlineArgsArray(ins, shape, begin, count); 2505 2506 ins->replaceAllUsesWith(newArray); 2507 2508 // Remove original instruction. 2509 ins->block()->discard(ins); 2510 return; 2511 } 2512 } else { 2513 // Otherwise |arguments.length| is emitted as MArgumentsLength. 2514 if (ins->begin()->isConstant() && ins->end()->isArgumentsLength()) { 2515 int32_t begin = ins->begin()->toConstant()->toInt32(); 2516 if (begin >= 0) { 2517 auto* numActuals = MArgumentsLength::New(alloc()); 2518 ins->block()->insertBefore(ins, numActuals); 2519 2520 // Set |numFormals| to read all arguments starting at |begin|. 2521 uint32_t numFormals = begin; 2522 2523 Shape* shape = ins->templateObj()->shape(); 2524 2525 // Use MRest because it can be scalar replaced, which enables further 2526 // optimizations. 2527 auto* rest = MRest::New(alloc(), numActuals, numFormals, shape); 2528 ins->block()->insertBefore(ins, rest); 2529 2530 ins->replaceAllUsesWith(rest); 2531 2532 // Remove original instruction. 2533 ins->block()->discard(ins); 2534 return; 2535 } 2536 } 2537 } 2538 2539 MInstruction* numArgs; 2540 if (isInlinedArguments()) { 2541 uint32_t argc = args_->toCreateInlinedArgumentsObject()->numActuals(); 2542 numArgs = MConstant::NewInt32(alloc(), argc); 2543 } else { 2544 numArgs = MArgumentsLength::New(alloc()); 2545 } 2546 ins->block()->insertBefore(ins, numArgs); 2547 2548 auto* begin = MNormalizeSliceTerm::New(alloc(), ins->begin(), numArgs); 2549 ins->block()->insertBefore(ins, begin); 2550 2551 auto* end = MNormalizeSliceTerm::New(alloc(), ins->end(), numArgs); 2552 ins->block()->insertBefore(ins, end); 2553 2554 auto* beginMin = MMinMax::NewMin(alloc(), begin, end, MIRType::Int32); 2555 ins->block()->insertBefore(ins, beginMin); 2556 2557 // Safe to truncate because both operands are positive and end >= beginMin. 2558 auto* count = MSub::New(alloc(), end, beginMin, MIRType::Int32); 2559 count->setTruncateKind(TruncateKind::Truncate); 2560 ins->block()->insertBefore(ins, count); 2561 2562 MInstruction* replacement; 2563 if (isInlinedArguments()) { 2564 auto* actualArgs = args_->toCreateInlinedArgumentsObject(); 2565 replacement = 2566 MInlineArgumentsSlice::New(alloc(), beginMin, count, actualArgs, 2567 ins->templateObj(), ins->initialHeap()); 2568 if (!replacement) { 2569 oom_ = true; 2570 return; 2571 } 2572 } else { 2573 replacement = MFrameArgumentsSlice::New( 2574 alloc(), beginMin, count, ins->templateObj(), ins->initialHeap()); 2575 } 2576 ins->block()->insertBefore(ins, replacement); 2577 2578 ins->replaceAllUsesWith(replacement); 2579 2580 // Remove original instruction. 2581 ins->block()->discard(ins); 2582 } 2583 2584 void ArgumentsReplacer::visitLoadFixedSlot(MLoadFixedSlot* ins) { 2585 // Skip other arguments objects. 2586 if (ins->object() != args_) { 2587 return; 2588 } 2589 2590 MOZ_ASSERT(ins->slot() == ArgumentsObject::CALLEE_SLOT); 2591 2592 MDefinition* replacement; 2593 if (isInlinedArguments()) { 2594 replacement = args_->toCreateInlinedArgumentsObject()->getCallee(); 2595 } else { 2596 auto* callee = MCallee::New(alloc()); 2597 ins->block()->insertBefore(ins, callee); 2598 replacement = callee; 2599 } 2600 ins->replaceAllUsesWith(replacement); 2601 2602 // Remove original instruction. 2603 ins->block()->discard(ins); 2604 } 2605 2606 static inline bool IsOptimizableRestInstruction(MInstruction* ins) { 2607 return ins->isRest(); 2608 } 2609 2610 class RestReplacer : public GenericArrayReplacer { 2611 private: 2612 const MIRGenerator* mir_; 2613 MIRGraph& graph_; 2614 2615 MRest* rest() const { return arr_->toRest(); } 2616 MDefinition* restLength(MInstruction* ins); 2617 2618 void visitLength(MInstruction* ins, MDefinition* elements); 2619 void visitLoadElement(MLoadElement* ins); 2620 void visitArrayLength(MArrayLength* ins); 2621 void visitInitializedLength(MInitializedLength* ins); 2622 void visitApplyArray(MApplyArray* ins); 2623 void visitConstructArray(MConstructArray* ins); 2624 2625 bool escapes(MElements* ins); 2626 2627 public: 2628 RestReplacer(const MIRGenerator* mir, MIRGraph& graph, MInstruction* rest) 2629 : GenericArrayReplacer(graph.alloc(), rest), mir_(mir), graph_(graph) { 2630 MOZ_ASSERT(IsOptimizableRestInstruction(arr_)); 2631 } 2632 2633 bool escapes(MInstruction* ins); 2634 bool run(); 2635 void assertSuccess(); 2636 }; 2637 2638 void RestReplacer::assertSuccess() { 2639 MOZ_ASSERT(arr_->canRecoverOnBailout()); 2640 MOZ_ASSERT(!arr_->hasLiveDefUses()); 2641 } 2642 2643 // Returns false if the rest array object does not escape. 2644 bool RestReplacer::escapes(MInstruction* ins) { 2645 MOZ_ASSERT(ins->type() == MIRType::Object); 2646 2647 JitSpewDef(JitSpew_Escape, "Check rest array\n", ins); 2648 JitSpewIndent spewIndent(JitSpew_Escape); 2649 2650 // We can replace rest arrays in scripts with OSR entries, but the outermost 2651 // rest object has already been allocated before we enter via OSR and can't be 2652 // replaced. 2653 // See also the same restriction when replacing |arguments|. 2654 if (graph_.osrBlock()) { 2655 JitSpew(JitSpew_Escape, "Can't replace outermost OSR rest array"); 2656 return true; 2657 } 2658 2659 // Check all uses to see whether they can be supported without allocating an 2660 // ArrayObject for the rest parameter. 2661 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) { 2662 MNode* consumer = (*i)->consumer(); 2663 2664 // If a resume point can observe this instruction, we can only optimize 2665 // if it is recoverable. 2666 if (consumer->isResumePoint()) { 2667 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) { 2668 JitSpew(JitSpew_Escape, "Observable rest array cannot be recovered"); 2669 return true; 2670 } 2671 continue; 2672 } 2673 2674 MDefinition* def = consumer->toDefinition(); 2675 switch (def->op()) { 2676 case MDefinition::Opcode::Elements: { 2677 auto* elem = def->toElements(); 2678 MOZ_ASSERT(elem->object() == ins); 2679 if (escapes(elem)) { 2680 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 2681 return true; 2682 } 2683 break; 2684 } 2685 2686 case MDefinition::Opcode::GuardShape: { 2687 const Shape* shape = rest()->shape(); 2688 if (!shape) { 2689 JitSpew(JitSpew_Escape, "No shape defined."); 2690 return true; 2691 } 2692 2693 auto* guard = def->toGuardShape(); 2694 if (shape != guard->shape()) { 2695 JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", def); 2696 return true; 2697 } 2698 if (escapes(guard)) { 2699 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 2700 return true; 2701 } 2702 break; 2703 } 2704 2705 case MDefinition::Opcode::GuardToClass: { 2706 auto* guard = def->toGuardToClass(); 2707 if (guard->getClass() != &ArrayObject::class_) { 2708 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", def); 2709 return true; 2710 } 2711 if (escapes(guard)) { 2712 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 2713 return true; 2714 } 2715 break; 2716 } 2717 2718 case MDefinition::Opcode::GuardArrayIsPacked: { 2719 // Rest arrays are always packed as long as they aren't modified. 2720 auto* guard = def->toGuardArrayIsPacked(); 2721 if (escapes(guard)) { 2722 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 2723 return true; 2724 } 2725 break; 2726 } 2727 2728 case MDefinition::Opcode::Unbox: { 2729 if (def->type() != MIRType::Object) { 2730 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def); 2731 return true; 2732 } 2733 if (escapes(def->toInstruction())) { 2734 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 2735 return true; 2736 } 2737 break; 2738 } 2739 2740 // This instruction is supported for |JSOp::OptimizeSpreadCall|. 2741 case MDefinition::Opcode::Compare: { 2742 bool canFold; 2743 if (!def->toCompare()->tryFold(&canFold)) { 2744 JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def); 2745 return true; 2746 } 2747 break; 2748 } 2749 2750 // This instruction is a no-op used to test that scalar replacement is 2751 // working as expected. 2752 case MDefinition::Opcode::AssertRecoveredOnBailout: 2753 break; 2754 2755 default: 2756 JitSpewDef(JitSpew_Escape, "is escaped by\n", def); 2757 return true; 2758 } 2759 } 2760 2761 JitSpew(JitSpew_Escape, "Rest array object is not escaped"); 2762 return false; 2763 } 2764 2765 bool RestReplacer::escapes(MElements* ins) { 2766 JitSpewDef(JitSpew_Escape, "Check rest array elements\n", ins); 2767 JitSpewIndent spewIndent(JitSpew_Escape); 2768 2769 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) { 2770 // The MIRType::Elements cannot be captured in a resume point as it does not 2771 // represent a value allocation. 2772 MDefinition* def = (*i)->consumer()->toDefinition(); 2773 2774 switch (def->op()) { 2775 case MDefinition::Opcode::LoadElement: 2776 MOZ_ASSERT(def->toLoadElement()->elements() == ins); 2777 break; 2778 2779 case MDefinition::Opcode::ArrayLength: 2780 MOZ_ASSERT(def->toArrayLength()->elements() == ins); 2781 break; 2782 2783 case MDefinition::Opcode::InitializedLength: 2784 MOZ_ASSERT(def->toInitializedLength()->elements() == ins); 2785 break; 2786 2787 case MDefinition::Opcode::ApplyArray: 2788 MOZ_ASSERT(def->toApplyArray()->getElements() == ins); 2789 break; 2790 2791 case MDefinition::Opcode::ConstructArray: 2792 MOZ_ASSERT(def->toConstructArray()->getElements() == ins); 2793 break; 2794 2795 case MDefinition::Opcode::GuardElementsArePacked: 2796 MOZ_ASSERT(def->toGuardElementsArePacked()->elements() == ins); 2797 break; 2798 2799 default: 2800 JitSpewDef(JitSpew_Escape, "is escaped by\n", def); 2801 return true; 2802 } 2803 } 2804 2805 JitSpew(JitSpew_Escape, "Rest array object is not escaped"); 2806 return false; 2807 } 2808 2809 // Replacing the rest array object is simpler than replacing an object or array, 2810 // because the rest array object does not change state. 2811 bool RestReplacer::run() { 2812 MBasicBlock* startBlock = arr_->block(); 2813 2814 // Iterate over each basic block. 2815 for (ReversePostorderIterator block = graph_.rpoBegin(startBlock); 2816 block != graph_.rpoEnd(); block++) { 2817 if (mir_->shouldCancel("Scalar replacement of rest array object")) { 2818 return false; 2819 } 2820 2821 // Iterates over phis and instructions. 2822 // We do not have to visit resume points. Any resume points that capture the 2823 // rest array object will be handled by the Sink pass. 2824 for (MDefinitionIterator iter(*block); iter;) { 2825 // Increment the iterator before visiting the instruction, as the visit 2826 // function might discard itself from the basic block. 2827 MDefinition* def = *iter++; 2828 switch (def->op()) { 2829 #define MIR_OP(op) \ 2830 case MDefinition::Opcode::op: \ 2831 visit##op(def->to##op()); \ 2832 break; 2833 MIR_OPCODE_LIST(MIR_OP) 2834 #undef MIR_OP 2835 } 2836 if (!alloc_.ensureBallast()) { 2837 return false; 2838 } 2839 } 2840 } 2841 2842 assertSuccess(); 2843 return true; 2844 } 2845 2846 void RestReplacer::visitLoadElement(MLoadElement* ins) { 2847 // Skip other array objects. 2848 MDefinition* elements = ins->elements(); 2849 if (!isTargetElements(elements)) { 2850 return; 2851 } 2852 2853 MDefinition* index = ins->index(); 2854 2855 // Adjust the index to skip any extra formals. 2856 if (uint32_t formals = rest()->numFormals()) { 2857 auto* numFormals = MConstant::NewInt32(alloc_, formals); 2858 ins->block()->insertBefore(ins, numFormals); 2859 2860 auto* add = MAdd::New(alloc_, index, numFormals, TruncateKind::Truncate); 2861 ins->block()->insertBefore(ins, add); 2862 2863 index = add; 2864 } 2865 2866 auto* loadArg = MGetFrameArgument::New(alloc_, index); 2867 2868 ins->block()->insertBefore(ins, loadArg); 2869 ins->replaceAllUsesWith(loadArg); 2870 2871 // Remove original instruction. 2872 discardInstruction(ins, elements); 2873 } 2874 2875 MDefinition* RestReplacer::restLength(MInstruction* ins) { 2876 // Compute |Math.max(numActuals - numFormals, 0)| for the rest array length. 2877 2878 auto* numActuals = rest()->numActuals(); 2879 2880 if (uint32_t formals = rest()->numFormals()) { 2881 auto* numFormals = MConstant::NewInt32(alloc_, formals); 2882 ins->block()->insertBefore(ins, numFormals); 2883 2884 auto* length = MSub::New(alloc_, numActuals, numFormals, MIRType::Int32); 2885 length->setTruncateKind(TruncateKind::Truncate); 2886 ins->block()->insertBefore(ins, length); 2887 2888 auto* zero = MConstant::NewInt32(alloc_, 0); 2889 ins->block()->insertBefore(ins, zero); 2890 2891 auto* minmax = MMinMax::NewMax(alloc_, length, zero, MIRType::Int32); 2892 ins->block()->insertBefore(ins, minmax); 2893 2894 return minmax; 2895 } 2896 2897 return numActuals; 2898 } 2899 2900 void RestReplacer::visitLength(MInstruction* ins, MDefinition* elements) { 2901 MOZ_ASSERT(ins->isArrayLength() || ins->isInitializedLength()); 2902 2903 // Skip other array objects. 2904 if (!isTargetElements(elements)) { 2905 return; 2906 } 2907 2908 MDefinition* replacement = restLength(ins); 2909 2910 ins->replaceAllUsesWith(replacement); 2911 2912 // Remove original instruction. 2913 discardInstruction(ins, elements); 2914 } 2915 2916 void RestReplacer::visitArrayLength(MArrayLength* ins) { 2917 visitLength(ins, ins->elements()); 2918 } 2919 2920 void RestReplacer::visitInitializedLength(MInitializedLength* ins) { 2921 // The initialized length of a rest array is equal to its length. 2922 visitLength(ins, ins->elements()); 2923 } 2924 2925 void RestReplacer::visitApplyArray(MApplyArray* ins) { 2926 // Skip other array objects. 2927 MDefinition* elements = ins->getElements(); 2928 if (!isTargetElements(elements)) { 2929 return; 2930 } 2931 2932 auto* numActuals = restLength(ins); 2933 2934 auto* apply = 2935 MApplyArgs::New(alloc_, ins->getSingleTarget(), ins->getFunction(), 2936 numActuals, ins->getThis(), rest()->numFormals()); 2937 apply->setBailoutKind(ins->bailoutKind()); 2938 if (!ins->maybeCrossRealm()) { 2939 apply->setNotCrossRealm(); 2940 } 2941 if (ins->ignoresReturnValue()) { 2942 apply->setIgnoresReturnValue(); 2943 } 2944 ins->block()->insertBefore(ins, apply); 2945 2946 ins->replaceAllUsesWith(apply); 2947 2948 apply->stealResumePoint(ins); 2949 2950 // Remove original instruction. 2951 discardInstruction(ins, elements); 2952 } 2953 2954 void RestReplacer::visitConstructArray(MConstructArray* ins) { 2955 // Skip other array objects. 2956 MDefinition* elements = ins->getElements(); 2957 if (!isTargetElements(elements)) { 2958 return; 2959 } 2960 2961 auto* numActuals = restLength(ins); 2962 2963 auto* construct = MConstructArgs::New( 2964 alloc_, ins->getSingleTarget(), ins->getFunction(), numActuals, 2965 ins->getThis(), ins->getNewTarget(), rest()->numFormals()); 2966 construct->setBailoutKind(ins->bailoutKind()); 2967 if (!ins->maybeCrossRealm()) { 2968 construct->setNotCrossRealm(); 2969 } 2970 2971 ins->block()->insertBefore(ins, construct); 2972 ins->replaceAllUsesWith(construct); 2973 2974 construct->stealResumePoint(ins); 2975 2976 // Remove original instruction. 2977 discardInstruction(ins, elements); 2978 } 2979 2980 static inline bool IsOptimizableSubarrayInstruction(MInstruction* ins) { 2981 return ins->isTypedArraySubarray(); 2982 } 2983 2984 class SubarrayReplacer : public MDefinitionVisitorDefaultNoop { 2985 private: 2986 const MIRGenerator* mir_; 2987 MIRGraph& graph_; 2988 MInstruction* subarray_; 2989 uint32_t initialNumInstrIds_; 2990 2991 TempAllocator& alloc() { return graph_.alloc(); } 2992 MTypedArraySubarray* subarray() const { 2993 return subarray_->toTypedArraySubarray(); 2994 } 2995 2996 void visitArrayBufferViewByteOffset(MArrayBufferViewByteOffset* ins); 2997 void visitArrayBufferViewElements(MArrayBufferViewElements* ins); 2998 void visitArrayBufferViewLength(MArrayBufferViewLength* ins); 2999 void visitGuardHasAttachedArrayBuffer(MGuardHasAttachedArrayBuffer* ins); 3000 void visitGuardShape(MGuardShape* ins); 3001 void visitTypedArrayElementSize(MTypedArrayElementSize* ins); 3002 void visitTypedArrayFill(MTypedArrayFill* ins); 3003 void visitTypedArraySet(MTypedArraySet* ins); 3004 void visitTypedArraySubarray(MTypedArraySubarray* ins); 3005 void visitUnbox(MUnbox* ins); 3006 3007 // New instructions created in SubarrayReplacer. 3008 bool isNewInstruction(MDefinition* ins) const { 3009 return ins->id() >= initialNumInstrIds_; 3010 } 3011 3012 bool isSubarrayOrGuard(MDefinition* ins) const { 3013 if (ins == subarray_) { 3014 return true; 3015 } 3016 3017 // GuardHasAttachedArrayBuffer is replaced with a guard on the subarray's 3018 // object. 3019 if (ins->isGuardHasAttachedArrayBuffer() && isNewInstruction(ins)) { 3020 MOZ_ASSERT(ins->toGuardHasAttachedArrayBuffer()->object() == 3021 subarray()->object()); 3022 return true; 3023 } 3024 3025 return false; 3026 } 3027 3028 MDefinition* toSubarrayObject(MDefinition* ins) const { 3029 MOZ_ASSERT(isSubarrayOrGuard(ins)); 3030 if (ins == subarray_) { 3031 return subarray()->object(); 3032 } 3033 return ins; 3034 } 3035 3036 auto* templateObject() const { 3037 JSObject* obj = subarray()->templateObject(); 3038 MOZ_ASSERT(obj, "missing template object"); 3039 return &obj->as<TypedArrayObject>(); 3040 } 3041 3042 auto elementType() const { return templateObject()->type(); } 3043 3044 bool isImmutable() const { 3045 return templateObject()->is<ImmutableTypedArrayObject>(); 3046 } 3047 3048 public: 3049 SubarrayReplacer(const MIRGenerator* mir, MIRGraph& graph, 3050 MInstruction* subarray) 3051 : mir_(mir), 3052 graph_(graph), 3053 subarray_(subarray), 3054 initialNumInstrIds_(graph.getNumInstructionIds()) { 3055 MOZ_ASSERT(IsOptimizableSubarrayInstruction(subarray_)); 3056 } 3057 3058 bool escapes(MInstruction* ins) const; 3059 bool run(); 3060 void assertSuccess() const; 3061 }; 3062 3063 void SubarrayReplacer::visitUnbox(MUnbox* ins) { 3064 // Skip unbox on other objects. 3065 if (ins->input() != subarray_) { 3066 return; 3067 } 3068 MOZ_ASSERT(ins->type() == MIRType::Object); 3069 3070 // Replace the unbox with the subarray object. 3071 ins->replaceAllUsesWith(subarray_); 3072 3073 // Remove the unbox. 3074 ins->block()->discard(ins); 3075 } 3076 3077 void SubarrayReplacer::visitGuardShape(MGuardShape* ins) { 3078 // Skip guards on other objects. 3079 if (ins->object() != subarray_) { 3080 return; 3081 } 3082 3083 // Replace the guard with the subarray object. 3084 ins->replaceAllUsesWith(subarray_); 3085 3086 // Remove the guard. 3087 ins->block()->discard(ins); 3088 } 3089 3090 void SubarrayReplacer::visitGuardHasAttachedArrayBuffer( 3091 MGuardHasAttachedArrayBuffer* ins) { 3092 // Skip guards on other objects. 3093 if (ins->object() != subarray_) { 3094 return; 3095 } 3096 3097 // Create a new guard on the subarray's input argument. 3098 auto* newGuard = 3099 MGuardHasAttachedArrayBuffer::New(alloc(), subarray()->object()); 3100 newGuard->setBailoutKind(ins->bailoutKind()); 3101 ins->block()->insertBefore(ins, newGuard); 3102 3103 // Replace the guard. 3104 ins->replaceAllUsesWith(newGuard); 3105 3106 // Remove original instruction. 3107 ins->block()->discard(ins); 3108 } 3109 3110 void SubarrayReplacer::visitArrayBufferViewLength(MArrayBufferViewLength* ins) { 3111 // Skip other typed array objects. 3112 if (!isSubarrayOrGuard(ins->object())) { 3113 return; 3114 } 3115 3116 MDefinition* replacement; 3117 if (!isImmutable()) { 3118 // Get length of |subarray->object()|. 3119 auto* length = MArrayBufferViewLength::New(alloc(), subarray()->object()); 3120 ins->block()->insertBefore(ins, length); 3121 3122 // Minimum to zero the length if the underlying buffer is now detached. 3123 auto* minmax = 3124 MMinMax::NewMin(alloc(), subarray()->length(), length, MIRType::IntPtr); 3125 ins->block()->insertBefore(ins, minmax); 3126 3127 replacement = minmax; 3128 } else { 3129 replacement = subarray()->length(); 3130 } 3131 3132 // Replace the instruction. 3133 ins->replaceAllUsesWith(replacement); 3134 3135 // Remove original instruction. 3136 ins->block()->discard(ins); 3137 } 3138 3139 void SubarrayReplacer::visitArrayBufferViewByteOffset( 3140 MArrayBufferViewByteOffset* ins) { 3141 // Skip other typed array objects. 3142 if (!isSubarrayOrGuard(ins->object())) { 3143 return; 3144 } 3145 3146 auto* shift = MConstant::NewIntPtr(alloc(), TypedArrayShift(elementType())); 3147 ins->block()->insertBefore(ins, shift); 3148 3149 MDefinition* start; 3150 if (!isImmutable()) { 3151 // Get length of |subarray->object()|. 3152 auto* length = MArrayBufferViewLength::New(alloc(), subarray()->object()); 3153 ins->block()->insertBefore(ins, length); 3154 3155 // Minimum to zero |start| if the underlying buffer is now detached. 3156 auto* minmax = 3157 MMinMax::NewMin(alloc(), subarray()->start(), length, MIRType::IntPtr); 3158 ins->block()->insertBefore(ins, minmax); 3159 3160 start = minmax; 3161 } else { 3162 start = subarray()->start(); 3163 } 3164 3165 // Shift to convert start index to start byte-offset. 3166 auto* adjustment = MLsh::New(alloc(), start, shift, MIRType::IntPtr); 3167 ins->block()->insertBefore(ins, adjustment); 3168 3169 // Byte-offset of |subarray->object()|. 3170 auto* byteOffset = 3171 MArrayBufferViewByteOffset::New(alloc(), subarray()->object()); 3172 ins->block()->insertBefore(ins, byteOffset); 3173 3174 // Actual byte-offset into the array buffer. 3175 auto* replacement = 3176 MAdd::New(alloc(), byteOffset, adjustment, MIRType::IntPtr); 3177 ins->block()->insertBefore(ins, replacement); 3178 3179 // Replace the byte-offset. 3180 ins->replaceAllUsesWith(replacement); 3181 3182 // Remove original instruction. 3183 ins->block()->discard(ins); 3184 } 3185 3186 void SubarrayReplacer::visitArrayBufferViewElements( 3187 MArrayBufferViewElements* ins) { 3188 // Skip other typed array objects. 3189 if (!isSubarrayOrGuard(ins->object())) { 3190 return; 3191 } 3192 3193 auto* replacement = MArrayBufferViewElementsWithOffset::New( 3194 alloc(), subarray()->object(), subarray()->start(), elementType()); 3195 ins->block()->insertBefore(ins, replacement); 3196 3197 // Replace the elements. 3198 ins->replaceAllUsesWith(replacement); 3199 3200 // Remove original instruction. 3201 ins->block()->discard(ins); 3202 } 3203 3204 void SubarrayReplacer::visitTypedArrayElementSize(MTypedArrayElementSize* ins) { 3205 // Skip other typed array objects. 3206 if (!isSubarrayOrGuard(ins->object())) { 3207 return; 3208 } 3209 3210 int32_t bytesPerElement = TypedArrayElemSize(elementType()); 3211 auto* replacement = MConstant::NewInt32(alloc(), bytesPerElement); 3212 ins->block()->insertBefore(ins, replacement); 3213 3214 // Replace the element-size. 3215 ins->replaceAllUsesWith(replacement); 3216 3217 // Remove original instruction. 3218 ins->block()->discard(ins); 3219 } 3220 3221 void SubarrayReplacer::visitTypedArrayFill(MTypedArrayFill* ins) { 3222 // Skip other typed array objects. 3223 if (!isSubarrayOrGuard(ins->object())) { 3224 return; 3225 } 3226 3227 auto* subarrayStart = subarray()->start(); 3228 auto* subarrayLength = subarray()->length(); 3229 3230 // Make |start| and |end| relative to |subarrayLength|. 3231 auto* relativeStart = 3232 MToIntegerIndex::New(alloc(), ins->start(), subarrayLength); 3233 ins->block()->insertBefore(ins, relativeStart); 3234 3235 auto* relativeEnd = MToIntegerIndex::New(alloc(), ins->end(), subarrayLength); 3236 ins->block()->insertBefore(ins, relativeEnd); 3237 3238 // Compute actual start and end indices by adding |subarrayStart|. 3239 auto* actualStart = 3240 MAdd::New(alloc(), relativeStart, subarrayStart, MIRType::IntPtr); 3241 ins->block()->insertBefore(ins, actualStart); 3242 3243 auto* actualEnd = 3244 MAdd::New(alloc(), relativeEnd, subarrayStart, MIRType::IntPtr); 3245 ins->block()->insertBefore(ins, actualEnd); 3246 3247 auto* newFill = 3248 MTypedArrayFill::New(alloc(), subarray()->object(), ins->value(), 3249 actualStart, actualEnd, ins->elementType()); 3250 newFill->setBailoutKind(ins->bailoutKind()); 3251 newFill->stealResumePoint(ins); 3252 ins->block()->insertBefore(ins, newFill); 3253 3254 // Replace the fill. 3255 ins->replaceAllUsesWith(newFill); 3256 3257 // Remove original instruction. 3258 ins->block()->discard(ins); 3259 } 3260 3261 void SubarrayReplacer::visitTypedArraySet(MTypedArraySet* ins) { 3262 // Skip other typed array objects. 3263 if (!isSubarrayOrGuard(ins->target()) && !isSubarrayOrGuard(ins->source())) { 3264 return; 3265 } 3266 3267 // The replaced |subarray| instruction can be the target, source, or both 3268 // operands of MTypedArraySet: 3269 // 3270 // - Target operand: `ta.subarray(...).set(...)` 3271 // - Source operand: `ta.set(src.subarray(...), ...)` 3272 // - Both operands: `sub = src.subarray(...); sub.set(sub, ...)`. 3273 // 3274 // When |subarray| is the target operand, |subarray->start| needs to be added 3275 // to |ins->offset|. 3276 // 3277 // When |subarray| is the source operand, MTypedArraySet is replaced with 3278 // MTypedArraySetFromSubarray to pass through |subarray->start| and 3279 // |subarray->length|. 3280 // 3281 // When |subarray| is both the target and the source operand, the call is 3282 // either a no-op instruction, or bails out and then throws an exception. 3283 3284 MInstruction* replacement; 3285 if (isSubarrayOrGuard(ins->target()) && isSubarrayOrGuard(ins->source())) { 3286 // Either a no-op when the offset is zero. Or bails out when the offset is 3287 // non-zero. (Bail-out happens through MGuardTypedArraySetOffset.) 3288 replacement = MNop::New(alloc()); 3289 } else if (isSubarrayOrGuard(ins->target())) { 3290 auto* target = toSubarrayObject(ins->target()); 3291 3292 // Addition can't overflow because preceding guards ensure: 3293 // 1. |ins->offset()| and |subarray->start()| are both non-negative. 3294 // 2. |ins->offset()| is a valid index into |subarray|. 3295 // 3. |subarray->start()| is a valid index |subarray->object()|. 3296 auto* newOffset = 3297 MAdd::New(alloc(), ins->offset(), subarray()->start(), MIRType::IntPtr); 3298 ins->block()->insertBefore(ins, newOffset); 3299 3300 replacement = MTypedArraySet::New(alloc(), target, ins->source(), newOffset, 3301 ins->canUseBitwiseCopy()); 3302 } else { 3303 auto* source = toSubarrayObject(ins->source()); 3304 3305 replacement = MTypedArraySetFromSubarray::New( 3306 alloc(), ins->target(), source, ins->offset(), subarray()->start(), 3307 subarray()->length(), ins->canUseBitwiseCopy()); 3308 } 3309 replacement->stealResumePoint(ins); 3310 ins->block()->insertBefore(ins, replacement); 3311 3312 // Replace the set. 3313 ins->replaceAllUsesWith(replacement); 3314 3315 // Remove original instruction. 3316 ins->block()->discard(ins); 3317 } 3318 3319 void SubarrayReplacer::visitTypedArraySubarray(MTypedArraySubarray* ins) { 3320 // Skip other typed array objects. 3321 if (!isSubarrayOrGuard(ins->object())) { 3322 return; 3323 } 3324 MOZ_ASSERT(!ins->isScalarReplaced()); 3325 3326 // Add both |start| operands to get the adjusted start index. 3327 auto* newStart = 3328 MAdd::New(alloc(), subarray()->start(), ins->start(), MIRType::IntPtr); 3329 ins->block()->insertBefore(ins, newStart); 3330 3331 auto* replacement = MTypedArraySubarray::New( 3332 alloc(), subarray()->object(), newStart, ins->length(), 3333 ins->templateObject(), ins->initialHeap()); 3334 replacement->stealResumePoint(ins); 3335 ins->block()->insertBefore(ins, replacement); 3336 3337 // Replace the subarray. 3338 ins->replaceAllUsesWith(replacement); 3339 3340 // Remove original instruction. 3341 ins->block()->discard(ins); 3342 } 3343 3344 // Returns false if the subarray typed array object does not escape. 3345 bool SubarrayReplacer::escapes(MInstruction* ins) const { 3346 MOZ_ASSERT(ins->type() == MIRType::Object); 3347 3348 JitSpewDef(JitSpew_Escape, "Check subarray typed array\n", ins); 3349 JitSpewIndent spewIndent(JitSpew_Escape); 3350 3351 // Check all uses to see whether they can be supported without allocating an 3352 // TypedArrayObject for the `%TypedArray%.prototype.subarray` call. 3353 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) { 3354 MNode* consumer = (*i)->consumer(); 3355 3356 // If a resume point can observe this instruction, we can only optimize if 3357 // it is recoverable. 3358 if (consumer->isResumePoint()) { 3359 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) { 3360 JitSpew(JitSpew_Escape, "Observable subarray cannot be recovered"); 3361 return true; 3362 } 3363 continue; 3364 } 3365 3366 MDefinition* def = consumer->toDefinition(); 3367 switch (def->op()) { 3368 case MDefinition::Opcode::GuardShape: { 3369 auto* guard = def->toGuardShape(); 3370 if (templateObject()->shape() != guard->shape()) { 3371 JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", def); 3372 return true; 3373 } 3374 if (escapes(guard)) { 3375 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 3376 return true; 3377 } 3378 break; 3379 } 3380 3381 case MDefinition::Opcode::Unbox: { 3382 if (def->type() != MIRType::Object) { 3383 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def); 3384 return true; 3385 } 3386 if (escapes(def->toInstruction())) { 3387 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 3388 return true; 3389 } 3390 break; 3391 } 3392 3393 case MDefinition::Opcode::GuardHasAttachedArrayBuffer: { 3394 auto* guard = def->toGuardHasAttachedArrayBuffer(); 3395 if (escapes(guard)) { 3396 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 3397 return true; 3398 } 3399 break; 3400 } 3401 3402 // Replacable instructions. 3403 case MDefinition::Opcode::ArrayBufferViewByteOffset: 3404 case MDefinition::Opcode::ArrayBufferViewElements: 3405 case MDefinition::Opcode::ArrayBufferViewLength: 3406 case MDefinition::Opcode::TypedArrayElementSize: 3407 case MDefinition::Opcode::TypedArrayFill: 3408 case MDefinition::Opcode::TypedArraySet: 3409 case MDefinition::Opcode::TypedArraySubarray: 3410 break; 3411 3412 // This instruction is a no-op used to test that scalar replacement is 3413 // working as expected. 3414 case MDefinition::Opcode::AssertRecoveredOnBailout: 3415 break; 3416 3417 default: 3418 JitSpewDef(JitSpew_Escape, "is escaped by\n", def); 3419 return true; 3420 } 3421 } 3422 3423 JitSpew(JitSpew_Escape, "Subarray typed array object is not escaped"); 3424 return false; 3425 } 3426 3427 bool SubarrayReplacer::run() { 3428 MBasicBlock* startBlock = subarray_->block(); 3429 3430 // Iterate over each basic block. 3431 for (ReversePostorderIterator block = graph_.rpoBegin(startBlock); 3432 block != graph_.rpoEnd(); block++) { 3433 if (mir_->shouldCancel("Scalar replacement of subarray object")) { 3434 return false; 3435 } 3436 3437 // Iterates over phis and instructions. 3438 // We do not have to visit resume points. Any resume points that capture the 3439 // subarray typed array object will be handled by the Sink pass. 3440 for (MDefinitionIterator iter(*block); iter;) { 3441 // Increment the iterator before visiting the instruction, as the visit 3442 // function might discard itself from the basic block. 3443 MDefinition* def = *iter++; 3444 switch (def->op()) { 3445 #define MIR_OP(op) \ 3446 case MDefinition::Opcode::op: \ 3447 visit##op(def->to##op()); \ 3448 break; 3449 MIR_OPCODE_LIST(MIR_OP) 3450 #undef MIR_OP 3451 } 3452 if (!graph_.alloc().ensureBallast()) { 3453 return false; 3454 } 3455 } 3456 } 3457 3458 assertSuccess(); 3459 return true; 3460 } 3461 3462 void SubarrayReplacer::assertSuccess() const { 3463 subarray()->setScalarReplaced(); 3464 MOZ_ASSERT(subarray_->canRecoverOnBailout()); 3465 MOZ_ASSERT(!subarray_->hasLiveDefUses()); 3466 } 3467 3468 // WebAssembly only supports scalar replacement of structs with only inline 3469 // data for now. 3470 static inline bool IsOptimizableWasmStructInstruction(MInstruction* ins) { 3471 return ins->isWasmNewStructObject() && 3472 !ins->toWasmNewStructObject()->isOutline(); 3473 } 3474 3475 class WasmStructMemoryView : public MDefinitionVisitorDefaultNoop { 3476 public: 3477 using BlockState = MWasmStructState; 3478 static const char phaseName[]; 3479 3480 private: 3481 TempAllocator& alloc_; 3482 MInstruction* struct_; 3483 MConstant* undefinedVal_; 3484 MBasicBlock* startBlock_; 3485 BlockState* state_; 3486 3487 bool oom_; 3488 3489 public: 3490 WasmStructMemoryView(TempAllocator& alloc, MInstruction* wasmStruct); 3491 3492 MBasicBlock* startingBlock(); 3493 bool initStartingState(BlockState** pState); 3494 3495 void setEntryBlockState(BlockState* state); 3496 bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ, 3497 BlockState** pSuccState); 3498 3499 #ifdef DEBUG 3500 void assertSuccess(); 3501 #else 3502 void assertSuccess() {} 3503 #endif 3504 3505 bool oom() const { return oom_; } 3506 3507 public: 3508 void visitResumePoint(MResumePoint* rp); 3509 void visitPhi(MPhi* ins); 3510 void visitWasmStoreField(MWasmStoreField* ins); 3511 void visitWasmStoreFieldRef(MWasmStoreFieldRef* ins); 3512 void visitWasmLoadField(MWasmLoadField* ins); 3513 void visitWasmPostWriteBarrierWholeCell(MWasmPostWriteBarrierWholeCell* ins); 3514 }; 3515 3516 void WasmStructMemoryView::setEntryBlockState(BlockState* state) { 3517 state_ = state; 3518 } 3519 3520 #ifdef DEBUG 3521 void WasmStructMemoryView::assertSuccess() { 3522 // Make sure that the undefined value used as a placeholder is not used. 3523 MOZ_ASSERT(!undefinedVal_->hasUses()); 3524 3525 // Make sure that the MWasmNewStruct instruction is not used anymore. 3526 MOZ_ASSERT(!struct_->hasUses()); 3527 } 3528 #endif 3529 3530 MBasicBlock* WasmStructMemoryView::startingBlock() { return startBlock_; } 3531 3532 bool WasmStructMemoryView::initStartingState(BlockState** pState) { 3533 // We need this undefined value to initialize phi inputs if we create some 3534 // later. 3535 undefinedVal_ = MConstant::NewUndefined(alloc_); 3536 3537 // Create a new block state and insert at it at the location of the new 3538 // struct. 3539 BlockState* state = BlockState::New(alloc_, struct_); 3540 if (!state || !state->init()) { 3541 return false; 3542 } 3543 3544 *pState = state; 3545 return true; 3546 } 3547 3548 // Return true if all phi operands are equal to |newStruct|. 3549 static bool WasmStructPhiOperandsEqualTo(MPhi* phi, MInstruction* newStruct) { 3550 MOZ_ASSERT(IsOptimizableWasmStructInstruction(newStruct)); 3551 3552 for (size_t i = 0; i < phi->numOperands(); i++) { 3553 if (!PhiOperandEqualTo(phi->getOperand(i), newStruct)) { 3554 return false; 3555 } 3556 } 3557 return true; 3558 } 3559 3560 void WasmStructMemoryView::visitPhi(MPhi* ins) { 3561 // Skip phis on other objects. 3562 if (!WasmStructPhiOperandsEqualTo(ins, struct_)) { 3563 return; 3564 } 3565 3566 // Replace the phi by its object. 3567 ins->replaceAllUsesWith(struct_); 3568 3569 // Remove original instruction. 3570 ins->block()->discardPhi(ins); 3571 } 3572 3573 // We need to define this method for the pass to work, 3574 // but we don't have resume points in wasm. 3575 void WasmStructMemoryView::visitResumePoint(MResumePoint* rp) {} 3576 3577 void WasmStructMemoryView::visitWasmStoreField(MWasmStoreField* ins) { 3578 // Skip stores made on other structs. 3579 MDefinition* base = ins->base(); 3580 if (base != struct_) { 3581 return; 3582 } 3583 3584 // Clone the state and update the field value. 3585 state_ = BlockState::Copy(alloc_, state_); 3586 if (!state_) { 3587 oom_ = true; 3588 return; 3589 } 3590 3591 // Update the state 3592 state_->setField(ins->structFieldIndex().value(), ins->value()); 3593 3594 // Remove original instruction. 3595 ins->block()->discard(ins); 3596 } 3597 3598 void WasmStructMemoryView::visitWasmStoreFieldRef(MWasmStoreFieldRef* ins) { 3599 // Skip stores made on other structs. 3600 MDefinition* base = ins->base(); 3601 if (base != struct_) { 3602 return; 3603 } 3604 3605 // Clone the state and update the field value. 3606 state_ = BlockState::Copy(alloc_, state_); 3607 if (!state_) { 3608 oom_ = true; 3609 return; 3610 } 3611 3612 // Update the state 3613 state_->setField(ins->structFieldIndex().value(), ins->value()); 3614 3615 // Remove original instruction. 3616 ins->block()->discard(ins); 3617 } 3618 3619 void WasmStructMemoryView::visitWasmLoadField(MWasmLoadField* ins) { 3620 // Skip loads made on other structs. 3621 MDefinition* base = ins->base(); 3622 if (base != struct_) { 3623 return; 3624 } 3625 3626 MDefinition* value = state_->getField(ins->structFieldIndex().value()); 3627 3628 // Replace load by the field value. 3629 ins->replaceAllUsesWith(value); 3630 3631 // Remove original instruction. 3632 ins->block()->discard(ins); 3633 } 3634 3635 void WasmStructMemoryView::visitWasmPostWriteBarrierWholeCell( 3636 MWasmPostWriteBarrierWholeCell* ins) { 3637 // Skip loads made on other objects. 3638 if (ins->object() != struct_) { 3639 return; 3640 } 3641 3642 // Remove original instruction. 3643 ins->block()->discard(ins); 3644 } 3645 3646 bool WasmStructMemoryView::mergeIntoSuccessorState(MBasicBlock* curr, 3647 MBasicBlock* succ, 3648 BlockState** pSuccState) { 3649 BlockState* succState = *pSuccState; 3650 3651 // When a block has no state yet, create an empty one for the 3652 // successor. 3653 if (!succState) { 3654 // If the successor is not dominated then the struct cannot flow 3655 // in this basic block without a Phi. We know that no Phi exist 3656 // in non-dominated successors as the conservative escaped 3657 // analysis fails otherwise. Such condition can succeed if the 3658 // successor is a join at the end of a if-block and the struct 3659 // only exists within the branch. 3660 if (!startBlock_->dominates(succ)) { 3661 return true; 3662 } 3663 3664 // If there is only one predecessor, carry over the last state of the 3665 // block to the successor. As the block state is immutable, if the 3666 // current block has multiple successors, they will share the same entry 3667 // state. 3668 if (succ->numPredecessors() <= 1 || !state_->numFields()) { 3669 *pSuccState = state_; 3670 return true; 3671 } 3672 3673 // If we have multiple predecessors, then we allocate one Phi node for 3674 // each predecessor, and create a new block state which only has phi 3675 // nodes. These would later be removed by the removal of redundant phi 3676 // nodes. 3677 succState = BlockState::Copy(alloc_, state_); 3678 if (!succState) { 3679 return false; 3680 } 3681 3682 size_t numPreds = succ->numPredecessors(); 3683 for (size_t index = 0; index < state_->numFields(); index++) { 3684 MPhi* phi = MPhi::New(alloc_.fallible()); 3685 if (!phi || !phi->reserveLength(numPreds)) { 3686 return false; 3687 } 3688 3689 // Fill the input of the successors Phi with undefined 3690 // values, and each block later fills the Phi inputs. 3691 for (size_t p = 0; p < numPreds; p++) { 3692 phi->addInput(undefinedVal_); 3693 } 3694 3695 // Add Phi in the list of Phis of the basic block. 3696 succ->addPhi(phi); 3697 3698 // Set the result type of this phi depending on the previous fields. 3699 phi->setResultType(succState->getField(index)->type()); 3700 succState->setField(index, phi); 3701 } 3702 3703 *pSuccState = succState; 3704 } 3705 3706 MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader()); 3707 if (succ->numPredecessors() > 1 && succState->numFields() && 3708 succ != startBlock_) { 3709 // We need to re-compute successorWithPhis as the previous EliminatePhis 3710 // phase might have removed all the Phis from the successor block. 3711 size_t currIndex; 3712 MOZ_ASSERT(!succ->phisEmpty()); 3713 if (curr->successorWithPhis()) { 3714 MOZ_ASSERT(curr->successorWithPhis() == succ); 3715 currIndex = curr->positionInPhiSuccessor(); 3716 } else { 3717 currIndex = succ->indexForPredecessor(curr); 3718 curr->setSuccessorWithPhis(succ, currIndex); 3719 } 3720 MOZ_ASSERT(succ->getPredecessor(currIndex) == curr); 3721 3722 // Copy the current element states to the index of current block in all 3723 // the Phi created during the first visit of the successor. 3724 for (size_t index = 0; index < state_->numFields(); index++) { 3725 MPhi* phi = succState->getField(index)->toPhi(); 3726 phi->replaceOperand(currIndex, state_->getField(index)); 3727 } 3728 } 3729 3730 return true; 3731 } 3732 3733 // Returns False if the wasm struct is not escaped and if it is optimizable by 3734 // ScalarReplacementOfStruct. 3735 static bool IsWasmStructEscaped(MDefinition* ins, MInstruction* newStruct) { 3736 MOZ_ASSERT(ins->type() == MIRType::WasmAnyRef); 3737 MOZ_ASSERT(IsOptimizableWasmStructInstruction(newStruct)); 3738 3739 JitSpewDef(JitSpew_Escape, "Check wasm struct\n", ins); 3740 JitSpewIndent spewIndent(JitSpew_Escape); 3741 3742 // Don't do scalar replacement on big structs. 3743 if (newStruct->isWasmNewStructObject()) { 3744 if (newStruct->toWasmNewStructObject()->structType().fields_.length() > 3745 wasm::MaxFieldsScalarReplacementStructs) { 3746 JitSpew(JitSpew_Escape, "struct too big for scalar replacement\n"); 3747 return true; 3748 } 3749 } 3750 3751 // We don't support defaultable structs for now. 3752 if (newStruct->isWasmNewStructObject() && 3753 newStruct->toWasmNewStructObject()->zeroFields()) { 3754 JitSpew(JitSpew_Escape, "Struct is created with new_default\n"); 3755 return true; 3756 } 3757 3758 // Check if the struct is escaped. If the object is not the first argument 3759 // of either a known Store / Load, then we consider it as escaped. This is a 3760 // cheap and conservative escape analysis. 3761 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) { 3762 MNode* consumer = (*i)->consumer(); 3763 3764 if (!consumer->isDefinition()) { 3765 JitSpew(JitSpew_Escape, "Wasm struct is escaped"); 3766 return true; 3767 } 3768 3769 MDefinition* def = consumer->toDefinition(); 3770 switch (def->op()) { 3771 // This instruction can only store primitive types. 3772 // Another struct won't be able to escape through it. 3773 case MDefinition::Opcode::WasmStoreField: { 3774 break; 3775 } 3776 case MDefinition::Opcode::WasmStoreFieldRef: { 3777 // Escaped if it's stored into another struct. 3778 if (def->toWasmStoreFieldRef()->value() == newStruct) { 3779 JitSpewDef(JitSpew_Escape, "is escaped by\n", def); 3780 return true; 3781 } 3782 break; 3783 } 3784 // Not escaped if we load into a field of this struct. 3785 case MDefinition::Opcode::WasmLoadField: { 3786 break; 3787 } 3788 // Handle phis. 3789 case MDefinition::Opcode::Phi: { 3790 auto* phi = def->toPhi(); 3791 if (!WasmStructPhiOperandsEqualTo(phi, newStruct)) { 3792 JitSpewDef(JitSpew_Escape, "has different phi operands\n", def); 3793 return true; 3794 } 3795 if (IsWasmStructEscaped(phi, newStruct)) { 3796 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 3797 return true; 3798 } 3799 break; 3800 } 3801 3802 case MDefinition::Opcode::WasmPostWriteBarrierWholeCell: { 3803 break; 3804 } 3805 3806 // By default, we consider the struct as escaped. 3807 default: 3808 JitSpewDef(JitSpew_Escape, "is escaped by\n", def); 3809 return true; 3810 } 3811 } 3812 3813 JitSpew(JitSpew_Escape, "Struct is not escaped"); 3814 return false; 3815 } 3816 3817 /* static */ const char WasmStructMemoryView::phaseName[] = 3818 "Scalar Replacement of wasm structs"; 3819 3820 WasmStructMemoryView::WasmStructMemoryView(TempAllocator& alloc, 3821 MInstruction* wasmStruct) 3822 : alloc_(alloc), 3823 struct_(wasmStruct), 3824 undefinedVal_(nullptr), 3825 startBlock_(wasmStruct->block()), 3826 state_(nullptr), 3827 oom_(false) {} 3828 3829 static inline bool IsOptimizableObjectKeysInstruction(MInstruction* ins) { 3830 return ins->isObjectKeys(); 3831 } 3832 3833 class ObjectKeysReplacer : public GenericArrayReplacer { 3834 private: 3835 const MIRGenerator* mir_; 3836 MIRGraph& graph_; 3837 MObjectToIterator* objToIter_ = nullptr; 3838 3839 MObjectKeys* objectKeys() const { return arr_->toObjectKeys(); } 3840 3841 MDefinition* objectKeysLength(MInstruction* ins); 3842 void visitLength(MInstruction* ins, MDefinition* elements); 3843 3844 void visitLoadElement(MLoadElement* ins); 3845 void visitArrayLength(MArrayLength* ins); 3846 void visitInitializedLength(MInitializedLength* ins); 3847 3848 bool escapes(MElements* ins); 3849 3850 public: 3851 ObjectKeysReplacer(const MIRGenerator* mir, MIRGraph& graph, 3852 MInstruction* arr) 3853 : GenericArrayReplacer(graph.alloc(), arr), mir_(mir), graph_(graph) { 3854 MOZ_ASSERT(IsOptimizableObjectKeysInstruction(arr_)); 3855 } 3856 3857 bool escapes(MInstruction* ins); 3858 bool run(MInstructionIterator& outerIterator); 3859 void assertSuccess(); 3860 }; 3861 3862 // Returns false if the Object.keys array object does not escape. 3863 bool ObjectKeysReplacer::escapes(MInstruction* ins) { 3864 MOZ_ASSERT(ins->type() == MIRType::Object); 3865 3866 JitSpewDef(JitSpew_Escape, "Check Object.keys array\n", ins); 3867 JitSpewIndent spewIndent(JitSpew_Escape); 3868 3869 // Check all uses to see whether they can be supported without allocating an 3870 // ArrayObject for the Object.keys parameter. 3871 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) { 3872 MNode* consumer = (*i)->consumer(); 3873 3874 // If a resume point can observe this instruction, we can only optimize 3875 // if it is recoverable. 3876 if (consumer->isResumePoint()) { 3877 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) { 3878 JitSpew(JitSpew_Escape, 3879 "Observable Object.keys array cannot be recovered"); 3880 return true; 3881 } 3882 continue; 3883 } 3884 3885 MDefinition* def = consumer->toDefinition(); 3886 switch (def->op()) { 3887 case MDefinition::Opcode::Elements: { 3888 auto* elem = def->toElements(); 3889 MOZ_ASSERT(elem->object() == ins); 3890 if (escapes(elem)) { 3891 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 3892 return true; 3893 } 3894 break; 3895 } 3896 3897 case MDefinition::Opcode::GuardShape: { 3898 const Shape* shape = objectKeys()->resultShape(); 3899 MOZ_DIAGNOSTIC_ASSERT(shape); 3900 auto* guard = def->toGuardShape(); 3901 if (shape != guard->shape()) { 3902 JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", def); 3903 return true; 3904 } 3905 if (escapes(guard)) { 3906 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 3907 return true; 3908 } 3909 break; 3910 } 3911 3912 case MDefinition::Opcode::GuardToClass: { 3913 auto* guard = def->toGuardToClass(); 3914 if (guard->getClass() != &ArrayObject::class_) { 3915 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", def); 3916 return true; 3917 } 3918 if (escapes(guard)) { 3919 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 3920 return true; 3921 } 3922 break; 3923 } 3924 3925 case MDefinition::Opcode::GuardArrayIsPacked: { 3926 // Object.keys arrays are always packed as long as they aren't modified. 3927 auto* guard = def->toGuardArrayIsPacked(); 3928 if (escapes(guard)) { 3929 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 3930 return true; 3931 } 3932 break; 3933 } 3934 3935 case MDefinition::Opcode::Unbox: { 3936 if (def->type() != MIRType::Object) { 3937 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def); 3938 return true; 3939 } 3940 if (escapes(def->toInstruction())) { 3941 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); 3942 return true; 3943 } 3944 break; 3945 } 3946 3947 // This instruction is supported for |JSOp::OptimizeSpreadCall|. 3948 case MDefinition::Opcode::Compare: { 3949 bool canFold; 3950 if (!def->toCompare()->tryFold(&canFold)) { 3951 JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def); 3952 return true; 3953 } 3954 break; 3955 } 3956 3957 // This instruction is a no-op used to test that scalar replacement is 3958 // working as expected. 3959 case MDefinition::Opcode::AssertRecoveredOnBailout: 3960 break; 3961 3962 default: 3963 JitSpewDef(JitSpew_Escape, "is escaped by\n", def); 3964 return true; 3965 } 3966 } 3967 3968 JitSpew(JitSpew_Escape, "Object.keys array object is not escaped"); 3969 return false; 3970 } 3971 3972 bool ObjectKeysReplacer::escapes(MElements* ins) { 3973 JitSpewDef(JitSpew_Escape, "Check Object.keys array elements\n", ins); 3974 JitSpewIndent spewIndent(JitSpew_Escape); 3975 3976 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) { 3977 // The MIRType::Elements cannot be captured in a resume point as it does not 3978 // represent a value allocation. 3979 MDefinition* def = (*i)->consumer()->toDefinition(); 3980 3981 switch (def->op()) { 3982 case MDefinition::Opcode::LoadElement: { 3983 MOZ_ASSERT(def->toLoadElement()->elements() == ins); 3984 break; 3985 } 3986 3987 case MDefinition::Opcode::ArrayLength: 3988 MOZ_ASSERT(def->toArrayLength()->elements() == ins); 3989 break; 3990 3991 case MDefinition::Opcode::InitializedLength: 3992 MOZ_ASSERT(def->toInitializedLength()->elements() == ins); 3993 break; 3994 3995 case MDefinition::Opcode::GuardElementsArePacked: 3996 MOZ_ASSERT(def->toGuardElementsArePacked()->elements() == ins); 3997 break; 3998 3999 default: 4000 JitSpewDef(JitSpew_Escape, "is escaped by\n", def); 4001 return true; 4002 } 4003 } 4004 4005 JitSpew(JitSpew_Escape, "Object.keys array object is not escaped"); 4006 return false; 4007 } 4008 4009 bool ObjectKeysReplacer::run(MInstructionIterator& outerIterator) { 4010 MBasicBlock* startBlock = arr_->block(); 4011 4012 objToIter_ = MObjectToIterator::New(alloc_, objectKeys()->object(), nullptr); 4013 objToIter_->setSkipRegistration(true); 4014 arr_->block()->insertBefore(arr_, objToIter_); 4015 4016 // Iterate over each basic block. 4017 for (ReversePostorderIterator block = graph_.rpoBegin(startBlock); 4018 block != graph_.rpoEnd(); block++) { 4019 if (mir_->shouldCancel("Scalar replacement of Object.keys array object")) { 4020 return false; 4021 } 4022 4023 // Iterates over phis and instructions. 4024 // We do not have to visit resume points. Any resume points that capture the 4025 // Object.keys array object will be handled by the Sink pass. 4026 for (MDefinitionIterator iter(*block); iter;) { 4027 // Increment the iterator before visiting the instruction, as the visit 4028 // function might discard itself from the basic block. 4029 MDefinition* def = *iter++; 4030 switch (def->op()) { 4031 #define MIR_OP(op) \ 4032 case MDefinition::Opcode::op: \ 4033 visit##op(def->to##op()); \ 4034 break; 4035 MIR_OPCODE_LIST(MIR_OP) 4036 #undef MIR_OP 4037 } 4038 if (!graph_.alloc().ensureBallast()) { 4039 return false; 4040 } 4041 } 4042 } 4043 4044 assertSuccess(); 4045 4046 auto* forRecovery = MObjectKeysFromIterator::New(alloc_, objToIter_); 4047 arr_->block()->insertBefore(arr_, forRecovery); 4048 forRecovery->stealResumePoint(arr_); 4049 arr_->replaceAllUsesWith(forRecovery); 4050 4051 // We need to explicitly discard the instruction since it's marked as 4052 // effectful and we stole its resume point, which will trip assertion 4053 // failures later. We can't discard the instruction out from underneath 4054 // the iterator though, and we can't do the trick where we increment the 4055 // iterator at the top of the loop because we might discard the *next* 4056 // instruction, so we do this goofiness. 4057 outerIterator--; 4058 arr_->block()->discard(arr_); 4059 4060 if (!graph_.alloc().ensureBallast()) { 4061 return false; 4062 } 4063 4064 return true; 4065 } 4066 4067 void ObjectKeysReplacer::assertSuccess() { 4068 MOZ_ASSERT(!arr_->hasLiveDefUses()); 4069 } 4070 4071 void ObjectKeysReplacer::visitLoadElement(MLoadElement* ins) { 4072 if (!isTargetElements(ins->elements())) { 4073 return; 4074 } 4075 4076 auto* load = MLoadIteratorElement::New(alloc_, objToIter_, ins->index()); 4077 ins->block()->insertBefore(ins, load); 4078 4079 ins->replaceAllUsesWith(load); 4080 discardInstruction(ins, ins->elements()); 4081 } 4082 4083 void ObjectKeysReplacer::visitLength(MInstruction* ins, MDefinition* elements) { 4084 if (!isTargetElements(elements)) { 4085 return; 4086 } 4087 4088 auto* newLen = MIteratorLength::New(alloc_, objToIter_); 4089 ins->block()->insertBefore(ins, newLen); 4090 4091 ins->replaceAllUsesWith(newLen); 4092 discardInstruction(ins, elements); 4093 } 4094 4095 void ObjectKeysReplacer::visitArrayLength(MArrayLength* ins) { 4096 visitLength(ins, ins->elements()); 4097 } 4098 4099 void ObjectKeysReplacer::visitInitializedLength(MInitializedLength* ins) { 4100 // The initialized length of an Object.keys array is equal to its length. 4101 visitLength(ins, ins->elements()); 4102 } 4103 4104 bool ScalarReplacement(const MIRGenerator* mir, MIRGraph& graph) { 4105 JitSpew(JitSpew_Escape, "Begin (ScalarReplacement)"); 4106 4107 EmulateStateOf<ObjectMemoryView> replaceObject(mir, graph); 4108 EmulateStateOf<ArrayMemoryView> replaceArray(mir, graph); 4109 EmulateStateOf<WasmStructMemoryView> replaceWasmStructs(mir, graph); 4110 bool addedPhi = false; 4111 4112 for (ReversePostorderIterator block = graph.rpoBegin(); 4113 block != graph.rpoEnd(); block++) { 4114 if (mir->shouldCancel("Scalar Replacement (main loop)")) { 4115 return false; 4116 } 4117 4118 for (MInstructionIterator ins = block->begin(); ins != block->end(); 4119 ins++) { 4120 if (IsOptimizableObjectInstruction(*ins) && 4121 !IsObjectEscaped(*ins, *ins)) { 4122 ObjectMemoryView view(graph.alloc(), *ins); 4123 if (!replaceObject.run(view)) { 4124 return false; 4125 } 4126 view.assertSuccess(); 4127 addedPhi = true; 4128 continue; 4129 } 4130 4131 if (IsOptimizableArrayInstruction(*ins) && !IsArrayEscaped(*ins, *ins)) { 4132 ArrayMemoryView view(graph.alloc(), *ins); 4133 if (!replaceArray.run(view)) { 4134 return false; 4135 } 4136 view.assertSuccess(); 4137 addedPhi = true; 4138 continue; 4139 } 4140 4141 if (IsOptimizableArgumentsInstruction(*ins)) { 4142 ArgumentsReplacer replacer(mir, graph, *ins); 4143 if (replacer.escapes(*ins)) { 4144 continue; 4145 } 4146 if (!replacer.run()) { 4147 return false; 4148 } 4149 continue; 4150 } 4151 4152 if (IsOptimizableRestInstruction(*ins)) { 4153 RestReplacer replacer(mir, graph, *ins); 4154 if (replacer.escapes(*ins)) { 4155 continue; 4156 } 4157 if (!replacer.run()) { 4158 return false; 4159 } 4160 continue; 4161 } 4162 4163 if (IsOptimizableSubarrayInstruction(*ins)) { 4164 SubarrayReplacer replacer(mir, graph, *ins); 4165 if (replacer.escapes(*ins)) { 4166 continue; 4167 } 4168 if (!replacer.run()) { 4169 return false; 4170 } 4171 continue; 4172 } 4173 4174 if (IsOptimizableWasmStructInstruction(*ins) && 4175 !IsWasmStructEscaped(*ins, *ins)) { 4176 WasmStructMemoryView view(graph.alloc(), *ins); 4177 if (!replaceWasmStructs.run(view)) { 4178 return false; 4179 } 4180 view.assertSuccess(); 4181 addedPhi = true; 4182 continue; 4183 } 4184 } 4185 } 4186 4187 if (addedPhi) { 4188 // Phis added by Scalar Replacement are only redundant Phis which are 4189 // not directly captured by any resume point but only by the MDefinition 4190 // state. The conservative observability only focuses on Phis which are 4191 // not used as resume points operands. 4192 AssertExtendedGraphCoherency(graph); 4193 if (!EliminatePhis(mir, graph, ConservativeObservability)) { 4194 return false; 4195 } 4196 } 4197 4198 return true; 4199 } 4200 4201 bool ReplaceObjectKeys(const MIRGenerator* mir, MIRGraph& graph) { 4202 JitSpew(JitSpew_Escape, "Begin (Object.Keys Replacement)"); 4203 4204 for (ReversePostorderIterator block = graph.rpoBegin(); 4205 block != graph.rpoEnd(); block++) { 4206 if (mir->shouldCancel("Object.Keys Replacement (main loop)")) { 4207 return false; 4208 } 4209 4210 for (MInstructionIterator ins = block->begin(); ins != block->end(); 4211 ins++) { 4212 if (IsOptimizableObjectKeysInstruction(*ins)) { 4213 ObjectKeysReplacer replacer(mir, graph, *ins); 4214 if (replacer.escapes(*ins)) { 4215 continue; 4216 } 4217 if (!replacer.run(ins)) { 4218 return false; 4219 } 4220 continue; 4221 } 4222 } 4223 } 4224 4225 return true; 4226 } 4227 4228 } /* namespace jit */ 4229 } /* namespace js */