tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

WasmIonCompile.cpp (363792B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 *
      4 * Copyright 2015 Mozilla Foundation
      5 *
      6 * Licensed under the Apache License, Version 2.0 (the "License");
      7 * you may not use this file except in compliance with the License.
      8 * You may obtain a copy of the License at
      9 *
     10 *     http://www.apache.org/licenses/LICENSE-2.0
     11 *
     12 * Unless required by applicable law or agreed to in writing, software
     13 * distributed under the License is distributed on an "AS IS" BASIS,
     14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     15 * See the License for the specific language governing permissions and
     16 * limitations under the License.
     17 */
     18 
     19 #include "wasm/WasmIonCompile.h"
     20 
     21 #include "mozilla/DebugOnly.h"
     22 #include "mozilla/MathAlgorithms.h"
     23 
     24 #include <algorithm>
     25 
     26 #include "jit/ABIArgGenerator.h"
     27 #include "jit/CodeGenerator.h"
     28 #include "jit/CompileInfo.h"
     29 #include "jit/Ion.h"
     30 #include "jit/IonOptimizationLevels.h"
     31 #include "jit/MIR-wasm.h"
     32 #include "jit/MIR.h"
     33 #include "jit/ShuffleAnalysis.h"
     34 #include "js/GCAPI.h"       // JS::AutoSuppressGCAnalysis
     35 #include "js/ScalarType.h"  // js::Scalar::Type
     36 #include "util/DifferentialTesting.h"
     37 #include "wasm/WasmBaselineCompile.h"
     38 #include "wasm/WasmBuiltinModule.h"
     39 #include "wasm/WasmBuiltins.h"
     40 #include "wasm/WasmCodegenTypes.h"
     41 #include "wasm/WasmGC.h"
     42 #include "wasm/WasmGcObject.h"
     43 #include "wasm/WasmGenerator.h"
     44 #include "wasm/WasmOpIter.h"
     45 #include "wasm/WasmSignalHandlers.h"
     46 #include "wasm/WasmStubs.h"
     47 #include "wasm/WasmValidate.h"
     48 
     49 using namespace js;
     50 using namespace js::jit;
     51 using namespace js::wasm;
     52 
     53 using mozilla::IsPowerOfTwo;
     54 using mozilla::Nothing;
     55 
     56 namespace {
     57 
     58 using UniqueCompileInfo = UniquePtr<CompileInfo>;
     59 using UniqueCompileInfoVector = Vector<UniqueCompileInfo, 1, SystemAllocPolicy>;
     60 
     61 using BlockVector = Vector<MBasicBlock*, 8, SystemAllocPolicy>;
     62 using DefVector = Vector<MDefinition*, 8, SystemAllocPolicy>;
     63 using ControlInstructionVector =
     64    Vector<MControlInstruction*, 8, SystemAllocPolicy>;
     65 
     66 // [SMDOC] WebAssembly Exception Handling in Ion
     67 // =======================================================
     68 //
     69 // ## Throwing instructions
     70 //
     71 // Wasm exceptions can be thrown by either a throw instruction (local throw),
     72 // or by a wasm call.
     73 //
     74 // ## The "catching try control"
     75 //
     76 // We know we are in try-code if there is a surrounding ControlItem with
     77 // LabelKind::Try. The innermost such control is called the
     78 // "catching try control".
     79 //
     80 // ## Throws without a catching try control
     81 //
     82 // Such throws are implemented with an instance call that triggers the exception
     83 // unwinding runtime. The exception unwinding runtime will not return to the
     84 // function.
     85 //
     86 // ## "landing pad" and "pre-pad" blocks
     87 //
     88 // When an exception is thrown, the unwinder will search for the nearest
     89 // enclosing try block and redirect control flow to it. The code that executes
     90 // before any catch blocks is called the 'landing pad'. The 'landing pad' is
     91 // responsible to:
     92 //   1. Consume the pending exception state from
     93 //      Instance::pendingException(Tag)
     94 //   2. Branch to the correct catch block, or else rethrow
     95 //
     96 // There is one landing pad for each try block. The immediate predecessors of
     97 // the landing pad are called 'pre-pad' blocks. There is one pre-pad block per
     98 // throwing instruction.
     99 //
    100 // ## Creating pre-pad blocks
    101 //
    102 // There are two possible sorts of pre-pad blocks, depending on whether we
    103 // are branching after a local throw instruction, or after a wasm call:
    104 //
    105 // - If we encounter a local throw, we create the exception and tag objects,
    106 //   store them to Instance::pendingException(Tag), and then jump to the
    107 //   landing pad.
    108 //
    109 // - If we encounter a wasm call, we construct a MWasmCallCatchable which is a
    110 //   control instruction with either a branch to a fallthrough block or
    111 //   to a pre-pad block.
    112 //
    113 //   The pre-pad block for a wasm call is empty except for a jump to the
    114 //   landing pad. It only exists to avoid critical edges which when split would
    115 //   violate the invariants of MWasmCallCatchable. The pending exception state
    116 //   is taken care of by the unwinder.
    117 //
    118 // Each pre-pad ends with a pending jump to the landing pad. The pending jumps
    119 // to the landing pad are tracked in `tryPadPatches`. These are called
    120 // "pad patches".
    121 //
    122 // ## Creating the landing pad
    123 //
    124 // When we exit try-code, we check if tryPadPatches has captured any control
    125 // instructions (pad patches). If not, we don't compile any catches and we mark
    126 // the rest as dead code.
    127 //
    128 // If there are pre-pad blocks, we join them to create a landing pad (or just
    129 // "pad"). The pad's last two slots are the caught exception, and the
    130 // exception's tag object.
    131 //
    132 // There are three different forms of try-catch/catch_all Wasm instructions,
    133 // which result in different form of landing pad.
    134 //
    135 // 1. A catchless try, so a Wasm instruction of the form "try ... end".
    136 //    - In this case, we end the pad by rethrowing the caught exception.
    137 //
    138 // 2. A single catch_all after a try.
    139 //    - If the first catch after a try is a catch_all, then there won't be
    140 //      any more catches, but we need the exception and its tag object, in
    141 //      case the code in a catch_all contains "rethrow" instructions.
    142 //      - The Wasm instruction "rethrow", gets the exception and tag object to
    143 //        rethrow from the last two slots of the landing pad which, due to
    144 //        validation, is the l'th surrounding ControlItem.
    145 //      - We immediately GoTo to a new block after the pad and pop both the
    146 //        exception and tag object, as we don't need them anymore in this case.
    147 //
    148 // 3. Otherwise, there is one or more catch code blocks following.
    149 //    - In this case, we construct the landing pad by creating a sequence
    150 //      of compare and branch blocks that compare the pending exception tag
    151 //      object to the tag object of the current tagged catch block. This is
    152 //      done incrementally as we visit each tagged catch block in the bytecode
    153 //      stream. At every step, we update the ControlItem's block to point to
    154 //      the next block to be created in the landing pad sequence. The final
    155 //      block will either be a rethrow, if there is no catch_all, or else a
    156 //      jump to a catch_all block.
    157 
    158 struct TryControl {
    159  // Branches to bind to the try's landing pad.
    160  ControlInstructionVector landingPadPatches;
    161  // For `try_table`, the list of tagged catches and labels to branch to.
    162  TryTableCatchVector catches;
    163  // The pending exception for the try's landing pad.
    164  MDefinition* pendingException;
    165  // The pending exception's tag for the try's landing pad.
    166  MDefinition* pendingExceptionTag;
    167  // Whether this try is in the body and should catch any thrown exception.
    168  bool inBody;
    169 
    170  TryControl()
    171      : pendingException(nullptr),
    172        pendingExceptionTag(nullptr),
    173        inBody(false) {}
    174 
    175  // Reset the try control for when it is cached in FunctionCompiler.
    176  void reset() {
    177    landingPadPatches.clearAndFree();
    178    catches.clearAndFree();
    179    inBody = false;
    180  }
    181 };
    182 using UniqueTryControl = UniquePtr<TryControl>;
    183 using VectorUniqueTryControl = Vector<UniqueTryControl, 2, SystemAllocPolicy>;
    184 
    185 struct ControlFlowPatch {
    186  MControlInstruction* ins;
    187  uint32_t index;
    188  ControlFlowPatch(MControlInstruction* ins, uint32_t index)
    189      : ins(ins), index(index) {}
    190 };
    191 
    192 using ControlFlowPatchVector = Vector<ControlFlowPatch, 0, SystemAllocPolicy>;
    193 
    194 struct PendingBlockTarget {
    195  ControlFlowPatchVector patches;
    196  BranchHint hint = BranchHint::Invalid;
    197 };
    198 
    199 using PendingBlockTargetVector =
    200    Vector<PendingBlockTarget, 0, SystemAllocPolicy>;
    201 
    202 // Inlined functions accumulate all returns to be bound to a caller function
    203 // after compilation is finished.
    204 struct PendingInlineReturn {
    205  PendingInlineReturn(MGoto* jump, DefVector&& results)
    206      : jump(jump), results(std::move(results)) {}
    207 
    208  MGoto* jump;
    209  DefVector results;
    210 };
    211 
    212 using PendingInlineReturnVector =
    213    Vector<PendingInlineReturn, 1, SystemAllocPolicy>;
    214 
    215 // CallCompileState describes a call that is being compiled.
    216 struct CallCompileState {
    217  // A generator object that is passed each argument as it is compiled.
    218  ABIArgGenerator abi;
    219 
    220  // Whether we pass FP values through GPRs or FPRs.
    221  bool hardFP = true;
    222 
    223  // The ABI we are using for this call.
    224  ABIKind abiKind;
    225 
    226  // Accumulates the register arguments while compiling arguments.
    227  MWasmCallBase::Args regArgs;
    228 
    229  // Reserved argument for passing Instance* to builtin instance method calls.
    230  ABIArg instanceArg;
    231 
    232  // The stack area in which the callee will write stack return values, or
    233  // nullptr if no stack results.
    234  MWasmStackResultArea* stackResultArea = nullptr;
    235 
    236  // Indicates that the call is a return/tail call.
    237  bool returnCall = false;
    238 
    239  // The landing pad patches for the nearest enclosing try-catch. This is
    240  // non-null iff the call is catchable.
    241  ControlInstructionVector* tryLandingPadPatches = nullptr;
    242 
    243  // The index of the try note for a catchable call.
    244  uint32_t tryNoteIndex = UINT32_MAX;
    245 
    246  // The block to take for fallthrough execution for a catchable call.
    247  MBasicBlock* fallthroughBlock = nullptr;
    248 
    249  // The block to take for exceptional execution for a catchable call.
    250  MBasicBlock* prePadBlock = nullptr;
    251 
    252  explicit CallCompileState(ABIKind abiKind) : abi(abiKind), abiKind(abiKind) {
    253    if (abiKind == ABIKind::System) {
    254      // The system ABI follows the hardFP setting on ARM32.
    255 #if defined(JS_CODEGEN_ARM)
    256      hardFP = ARMFlags::UseHardFpABI();
    257      abi.setUseHardFp(hardFP);
    258 #endif
    259    } else {
    260 #if defined(JS_CODEGEN_ARM)
    261      MOZ_ASSERT(hardFP, "The WASM ABI passes FP arguments in registers");
    262 #endif
    263    }
    264  }
    265 
    266  bool isCatchable() const { return tryLandingPadPatches != nullptr; }
    267 };
    268 
    269 struct Control {
    270  MBasicBlock* block;
    271  UniqueTryControl tryControl;
    272 
    273  Control() : block(nullptr), tryControl(nullptr) {}
    274  Control(Control&&) = default;
    275  Control(const Control&) = delete;
    276 };
    277 
    278 struct IonCompilePolicy {
    279  // We store SSA definitions in the value stack.
    280  using Value = MDefinition*;
    281  using ValueVector = DefVector;
    282 
    283  // We store loop headers and then/else blocks in the control flow stack.
    284  // In the case of try-catch control blocks, we collect additional information
    285  // regarding the possible paths from throws and calls to a landing pad, as
    286  // well as information on the landing pad's handlers (its catches).
    287  using ControlItem = Control;
    288 };
    289 
    290 using IonOpIter = OpIter<IonCompilePolicy>;
    291 
    292 // Statistics for inlining (at all depths) into the root function.
    293 struct InliningStats {
    294  size_t inlinedDirectBytecodeSize = 0;   // sum of sizes of inlinees
    295  size_t inlinedDirectFunctions = 0;      // number of inlinees
    296  size_t inlinedCallRefBytecodeSize = 0;  // sum of sizes of inlinees
    297  size_t inlinedCallRefFunctions = 0;     // number of inlinees
    298  bool largeFunctionBackoff = false;      // did large function backoff happen?
    299 };
    300 
    301 // Encapsulates the generation of MIR for a wasm function and any functions
    302 // that become inlined into it.
    303 class RootCompiler {
    304  const CompilerEnvironment& compilerEnv_;
    305  const CodeMetadata& codeMeta_;
    306  const CodeTailMetadata* codeTailMeta_;
    307 
    308  const ValTypeVector& locals_;
    309  const FuncCompileInput& func_;
    310  Decoder& decoder_;
    311  FeatureUsage observedFeatures_;
    312 
    313  CompileInfo compileInfo_;
    314  const JitCompileOptions options_;
    315  TempAllocator& alloc_;
    316  MIRGraph mirGraph_;
    317  MIRGenerator mirGen_;
    318 
    319  // The current loop depth we're generating inside of. This includes all
    320  // callee functions when we're generating an inlined function, and so it
    321  // lives here on the root compiler.
    322  uint32_t loopDepth_;
    323 
    324  // The current stack of bytecode offsets of the caller functions of the
    325  // function currently being inlined.
    326  BytecodeOffsetVector inlinedCallerOffsets_;
    327  InlinedCallerOffsetIndex inlinedCallerOffsetsIndex_;
    328 
    329  // Compilation statistics for this function.
    330  CompileStats funcStats_;
    331 
    332  // Accumulated inlining statistics for this function.
    333  InliningStats inliningStats_;
    334  // The remaining inlining budget, in terms of bytecode bytes. This may go
    335  // negative and so is signed.
    336  int64_t localInliningBudget_;
    337 
    338  // All jit::CompileInfo objects created during this compilation. This must
    339  // be kept alive for as long as the MIR graph is alive.
    340  UniqueCompileInfoVector compileInfos_;
    341 
    342  // Cache of TryControl to minimize heap allocations.
    343  VectorUniqueTryControl tryControlCache_;
    344 
    345  // Reference to masm.tryNotes()
    346  wasm::TryNoteVector& tryNotes_;
    347 
    348  // Reference to masm.inliningContext()
    349  wasm::InliningContext& inliningContext_;
    350 
    351 public:
    352  RootCompiler(const CompilerEnvironment& compilerEnv,
    353               const CodeMetadata& codeMeta,
    354               const CodeTailMetadata* codeTailMeta, TempAllocator& alloc,
    355               const ValTypeVector& locals, const FuncCompileInput& func,
    356               Decoder& decoder, wasm::TryNoteVector& tryNotes,
    357               wasm::InliningContext& inliningContext)
    358      : compilerEnv_(compilerEnv),
    359        codeMeta_(codeMeta),
    360        codeTailMeta_(codeTailMeta),
    361        locals_(locals),
    362        func_(func),
    363        decoder_(decoder),
    364        observedFeatures_(FeatureUsage::None),
    365        compileInfo_(locals.length()),
    366        alloc_(alloc),
    367        mirGraph_(&alloc),
    368        mirGen_(nullptr, options_, &alloc_, &mirGraph_, &compileInfo_,
    369                IonOptimizations.get(OptimizationLevel::Wasm), &codeMeta),
    370        loopDepth_(0),
    371        localInliningBudget_(0),
    372        tryNotes_(tryNotes),
    373        inliningContext_(inliningContext) {}
    374 
    375  const CompilerEnvironment& compilerEnv() const { return compilerEnv_; }
    376  const CodeMetadata& codeMeta() const { return codeMeta_; }
    377  const CodeTailMetadata* codeTailMeta() const { return codeTailMeta_; }
    378  const FuncCompileInput& func() const { return func_; }
    379  TempAllocator& alloc() { return alloc_; }
    380  MIRGraph& mirGraph() { return mirGraph_; }
    381  MIRGenerator& mirGen() { return mirGen_; }
    382  int64_t inliningBudget() const { return localInliningBudget_; }
    383  FeatureUsage observedFeatures() const { return observedFeatures_; }
    384  const CompileStats& funcStats() const { return funcStats_; }
    385  void noteLargeFunctionBackoffWasApplied() {
    386    inliningStats_.largeFunctionBackoff = true;
    387  }
    388 
    389  uint32_t loopDepth() const { return loopDepth_; }
    390  void startLoop() { loopDepth_++; }
    391  void closeLoop() { loopDepth_--; }
    392 
    393  [[nodiscard]] bool generate();
    394 
    395  InlinedCallerOffsetIndex inlinedCallerOffsetsIndex() const {
    396    return inlinedCallerOffsetsIndex_;
    397  }
    398 
    399  // Add a compile info for an inlined function. This keeps the inlined
    400  // function's compile info alive for the outermost function's
    401  // compilation.
    402  [[nodiscard]] CompileInfo* startInlineCall(
    403      uint32_t callerFuncIndex, BytecodeOffset callerOffset,
    404      uint32_t calleeFuncIndex, uint32_t numLocals, size_t inlineeBytecodeSize,
    405      InliningHeuristics::CallKind callKind);
    406  void finishInlineCall();
    407 
    408  // Add a try note and return the index.
    409  [[nodiscard]] bool addTryNote(uint32_t* tryNoteIndex) {
    410    if (!tryNotes_.append(wasm::TryNote())) {
    411      return false;
    412    }
    413    *tryNoteIndex = tryNotes_.length() - 1;
    414    return true;
    415  }
    416 
    417  // Try to get a free TryControl from the cache, or allocate a new one.
    418  [[nodiscard]] UniqueTryControl newTryControl() {
    419    if (tryControlCache_.empty()) {
    420      return UniqueTryControl(js_new<TryControl>());
    421    }
    422    UniqueTryControl tryControl = std::move(tryControlCache_.back());
    423    tryControlCache_.popBack();
    424    return tryControl;
    425  }
    426 
    427  // Release the TryControl to the cache.
    428  void freeTryControl(UniqueTryControl&& tryControl) {
    429    // Ensure that it's in a consistent state
    430    tryControl->reset();
    431    // Ignore any OOM, as we'll fail later
    432    (void)tryControlCache_.append(std::move(tryControl));
    433  }
    434 };
    435 
    436 // Encapsulates the generation of MIR for a single function in a wasm module.
    437 class FunctionCompiler {
    438  // The root function compiler we are being compiled within.
    439  RootCompiler& rootCompiler_;
    440 
    441  // The caller function compiler, if any, that we are being inlined into.
    442  // Note that `inliningDepth_` is zero for the first inlinee, one for the
    443  // second inlinee, etc.
    444  const FunctionCompiler* callerCompiler_;
    445  const uint32_t inliningDepth_;
    446 
    447  // Information about this function's bytecode and parsing state
    448  IonOpIter iter_;
    449  uint32_t functionBodyOffset_;
    450  const FuncCompileInput& func_;
    451  const ValTypeVector& locals_;
    452  size_t lastReadCallSite_;
    453  size_t numCallRefs_;
    454  size_t numAllocSites_;
    455 
    456  // CompileInfo for compiling the MIR for this function. Allocated inside of
    457  // RootCompiler::compileInfos, and kept alive for the duration of the
    458  // total compilation.
    459  const jit::CompileInfo& info_;
    460 
    461  MBasicBlock* curBlock_;
    462  uint32_t maxStackArgBytes_;
    463 
    464  // When generating a forward branch we haven't created the basic block that
    465  // the branch needs to target. We handle this by accumulating all the branch
    466  // instructions that want to target a block we have not yet created into
    467  // `pendingBlocks_` and then patching them in `bindBranches`.
    468  //
    469  // For performance reasons we only grow `pendingBlocks_` as needed, never
    470  // shrink it. So the length of the vector has no relation to the current
    471  // nesting depth of wasm blocks. We use `pendingBlockDepth_` to track the
    472  // current wasm block depth. We assert that all entries beyond the current
    473  // block depth are empty.
    474  uint32_t pendingBlockDepth_;
    475  PendingBlockTargetVector pendingBlocks_;
    476  // Control flow patches for exceptions that are caught without a landing
    477  // pad they can directly jump to. This happens when either:
    478  //  (1) `delegate` targets the function body label.
    479  //  (2) A `try` ends without any cases, and there is no enclosing `try`.
    480  //  (3) There is no `try` in this function, but a caller function (when
    481  //      inlining) has a `try`.
    482  //
    483  // These exceptions will be rethrown using `emitBodyRethrowPad`.
    484  ControlInstructionVector bodyRethrowPadPatches_;
    485  // A vector of the returns in this function for use when we're being inlined
    486  // into another function.
    487  PendingInlineReturnVector pendingInlineReturns_;
    488  // A block that all uncaught exceptions in this function will jump to. The
    489  // inline caller will link this to the nearest enclosing catch handler.
    490  MBasicBlock* pendingInlineCatchBlock_;
    491 
    492  // Instance pointer argument to the current function.
    493  MWasmParameter* instancePointer_;
    494  MWasmParameter* stackResultPointer_;
    495 
    496 public:
    497  // Construct a FunctionCompiler for the root function of a compilation
    498  FunctionCompiler(RootCompiler& rootCompiler, Decoder& decoder,
    499                   const FuncCompileInput& func, const ValTypeVector& locals,
    500                   const CompileInfo& compileInfo)
    501      : rootCompiler_(rootCompiler),
    502        callerCompiler_(nullptr),
    503        inliningDepth_(0),
    504        iter_(rootCompiler.codeMeta(), decoder, locals),
    505        functionBodyOffset_(decoder.beginOffset()),
    506        func_(func),
    507        locals_(locals),
    508        lastReadCallSite_(0),
    509        numCallRefs_(0),
    510        numAllocSites_(0),
    511        info_(compileInfo),
    512        curBlock_(nullptr),
    513        maxStackArgBytes_(0),
    514        pendingBlockDepth_(0),
    515        pendingInlineCatchBlock_(nullptr),
    516        instancePointer_(nullptr),
    517        stackResultPointer_(nullptr) {}
    518 
    519  // Construct a FunctionCompiler for an inlined callee of a compilation
    520  FunctionCompiler(const FunctionCompiler* callerCompiler, Decoder& decoder,
    521                   const FuncCompileInput& func, const ValTypeVector& locals,
    522                   const CompileInfo& compileInfo)
    523      : rootCompiler_(callerCompiler->rootCompiler_),
    524        callerCompiler_(callerCompiler),
    525        inliningDepth_(callerCompiler_->inliningDepth() + 1),
    526        iter_(rootCompiler_.codeMeta(), decoder, locals),
    527        functionBodyOffset_(decoder.beginOffset()),
    528        func_(func),
    529        locals_(locals),
    530        lastReadCallSite_(0),
    531        numCallRefs_(0),
    532        numAllocSites_(0),
    533        info_(compileInfo),
    534        curBlock_(nullptr),
    535        maxStackArgBytes_(0),
    536        pendingBlockDepth_(0),
    537        pendingInlineCatchBlock_(nullptr),
    538        instancePointer_(callerCompiler_->instancePointer_),
    539        stackResultPointer_(nullptr) {}
    540 
    541  RootCompiler& rootCompiler() { return rootCompiler_; }
    542  const CodeMetadata& codeMeta() const { return rootCompiler_.codeMeta(); }
    543  const CodeTailMetadata* codeTailMeta() const {
    544    return rootCompiler_.codeTailMeta();
    545  }
    546 
    547  IonOpIter& iter() { return iter_; }
    548  uint32_t relativeBytecodeOffset() {
    549    return readBytecodeOffset() - functionBodyOffset_;
    550  }
    551  TempAllocator& alloc() const { return rootCompiler_.alloc(); }
    552  // FIXME(1401675): Replace with BlockType.
    553  uint32_t funcIndex() const { return func_.index; }
    554  const FuncType& funcType() const {
    555    return codeMeta().getFuncType(func_.index);
    556  }
    557 
    558  bool isInlined() const { return callerCompiler_ != nullptr; }
    559  uint32_t inliningDepth() const { return inliningDepth_; }
    560 
    561  MBasicBlock* getCurBlock() const { return curBlock_; }
    562  BytecodeOffset bytecodeOffset() const { return iter_.bytecodeOffset(); }
    563  TrapSiteDesc trapSiteDesc() {
    564    return TrapSiteDesc(wasm::BytecodeOffset(bytecodeOffset()),
    565                        rootCompiler_.inlinedCallerOffsetsIndex());
    566  }
    567  TrapSiteDesc trapSiteDescWithCallSiteLineNumber() {
    568    return TrapSiteDesc(wasm::BytecodeOffset(readCallSiteLineOrBytecode()),
    569                        rootCompiler_.inlinedCallerOffsetsIndex());
    570  }
    571  FeatureUsage featureUsage() const { return iter_.featureUsage(); }
    572 
    573  [[nodiscard]] bool initRoot() {
    574    // We are not being inlined into something
    575    MOZ_ASSERT(!callerCompiler_);
    576 
    577    // Prepare the entry block for MIR generation:
    578 
    579    const FuncType& ft = funcType();
    580    const ArgTypeVector args(ft);
    581 
    582    if (!mirGen().ensureBallast()) {
    583      return false;
    584    }
    585    if (!newBlock(/* prev */ nullptr, &curBlock_)) {
    586      return false;
    587    }
    588 
    589    for (ABIArgIter i(args, ABIKind::Wasm); !i.done(); i++) {
    590      MaybeRefType argRefType;
    591      if (!args.isSyntheticStackResultPointerArg(i.index())) {
    592        ValType argType = ft.arg(i.index());
    593        argRefType = argType.isRefType() ? MaybeRefType(argType.refType())
    594                                         : MaybeRefType();
    595      }
    596 
    597      MWasmParameter* ins =
    598          MWasmParameter::New(alloc(), *i, i.mirType(), argRefType);
    599      curBlock_->add(ins);
    600      if (args.isSyntheticStackResultPointerArg(i.index())) {
    601        MOZ_ASSERT(stackResultPointer_ == nullptr);
    602        stackResultPointer_ = ins;
    603      } else {
    604        curBlock_->initSlot(info().localSlot(args.naturalIndex(i.index())),
    605                            ins);
    606      }
    607      if (!mirGen().ensureBallast()) {
    608        return false;
    609      }
    610    }
    611 
    612    // Set up a parameter that receives the hidden instance pointer argument.
    613    instancePointer_ =
    614        MWasmParameter::New(alloc(), ABIArg(InstanceReg), MIRType::Pointer);
    615    curBlock_->add(instancePointer_);
    616    if (!mirGen().ensureBallast()) {
    617      return false;
    618    }
    619 
    620    for (size_t i = args.lengthWithoutStackResults(); i < locals_.length();
    621         i++) {
    622      ValType slotValType = locals_[i];
    623 #ifndef ENABLE_WASM_SIMD
    624      if (slotValType == ValType::V128) {
    625        return iter().fail("Ion has no SIMD support yet");
    626      }
    627 #endif
    628      MDefinition* zero = constantZeroOfValType(slotValType);
    629      curBlock_->initSlot(info().localSlot(i), zero);
    630      if (!mirGen().ensureBallast()) {
    631        return false;
    632      }
    633    }
    634 
    635    return true;
    636  }
    637 
    638  [[nodiscard]] bool initInline(const DefVector& argValues) {
    639    // "This is an inlined-callee FunctionCompiler"
    640    MOZ_ASSERT(callerCompiler_);
    641 
    642    // Prepare the entry block for MIR generation:
    643    if (!mirGen().ensureBallast()) {
    644      return false;
    645    }
    646    if (!newBlock(nullptr, &curBlock_)) {
    647      return false;
    648    }
    649 
    650    MBasicBlock* pred = callerCompiler_->curBlock_;
    651    pred->end(MGoto::New(alloc(), curBlock_));
    652    if (!curBlock_->addPredecessorWithoutPhis(pred)) {
    653      return false;
    654    }
    655 
    656    // Set up args slots to point to passed argument values
    657    const FuncType& type = funcType();
    658    for (uint32_t argIndex = 0; argIndex < type.args().length(); argIndex++) {
    659      curBlock_->initSlot(info().localSlot(argIndex), argValues[argIndex]);
    660    }
    661 
    662    // Set up a parameter that receives the hidden instance pointer argument.
    663    instancePointer_ = callerCompiler_->instancePointer_;
    664 
    665    // Initialize all local slots to zero value
    666    for (size_t i = type.args().length(); i < locals_.length(); i++) {
    667      ValType slotValType = locals_[i];
    668 #ifndef ENABLE_WASM_SIMD
    669      if (slotValType == ValType::V128) {
    670        return iter().fail("Ion has no SIMD support yet");
    671      }
    672 #endif
    673      MDefinition* zero = constantZeroOfValType(slotValType);
    674      curBlock_->initSlot(info().localSlot(i), zero);
    675      if (!mirGen().ensureBallast()) {
    676        return false;
    677      }
    678    }
    679 
    680    return true;
    681  }
    682 
    683  void finish() {
    684    mirGen().accumulateWasmMaxStackArgBytes(maxStackArgBytes_);
    685 
    686    MOZ_ASSERT(pendingBlockDepth_ == 0);
    687 #ifdef DEBUG
    688    for (PendingBlockTarget& targets : pendingBlocks_) {
    689      MOZ_ASSERT(targets.patches.empty());
    690    }
    691 #endif
    692    MOZ_ASSERT(inDeadCode());
    693    MOZ_ASSERT(done());
    694    MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
    695    MOZ_ASSERT_IF(
    696        compilerEnv().mode() == CompileMode::LazyTiering,
    697        codeTailMeta()->getFuncDefCallRefs(funcIndex()).length == numCallRefs_);
    698    MOZ_ASSERT_IF(codeTailMeta(),
    699                  codeTailMeta()->getFuncDefAllocSites(funcIndex()).length ==
    700                      numAllocSites_);
    701    MOZ_ASSERT_IF(!isInlined(),
    702                  pendingInlineReturns_.empty() && !pendingInlineCatchBlock_);
    703    MOZ_ASSERT(bodyRethrowPadPatches_.empty());
    704  }
    705 
    706  /************************* Read-only interface (after local scope setup) */
    707 
    708  MIRGenerator& mirGen() const { return rootCompiler_.mirGen(); }
    709  MIRGraph& mirGraph() const { return rootCompiler_.mirGraph(); }
    710  const CompileInfo& info() const { return info_; }
    711  const CompilerEnvironment& compilerEnv() const {
    712    return rootCompiler_.compilerEnv();
    713  }
    714 
    715  MDefinition* getLocalDef(unsigned slot) {
    716    if (inDeadCode()) {
    717      return nullptr;
    718    }
    719    return curBlock_->getSlot(info().localSlot(slot));
    720  }
    721 
    722  const ValTypeVector& locals() const { return locals_; }
    723 
    724  /*********************************************************** Constants ***/
    725 
    726  MDefinition* constantF32(float f) {
    727    if (inDeadCode()) {
    728      return nullptr;
    729    }
    730    auto* cst = MWasmFloatConstant::NewFloat32(alloc(), f);
    731    curBlock_->add(cst);
    732    return cst;
    733  }
    734  // Hide all other overloads, to guarantee no implicit argument conversion.
    735  template <typename T>
    736  MDefinition* constantF32(T) = delete;
    737 
    738  MDefinition* constantF64(double d) {
    739    if (inDeadCode()) {
    740      return nullptr;
    741    }
    742    auto* cst = MWasmFloatConstant::NewDouble(alloc(), d);
    743    curBlock_->add(cst);
    744    return cst;
    745  }
    746  template <typename T>
    747  MDefinition* constantF64(T) = delete;
    748 
    749  MDefinition* constantI32(int32_t i) {
    750    if (inDeadCode()) {
    751      return nullptr;
    752    }
    753    MConstant* constant = MConstant::NewInt32(alloc(), i);
    754    curBlock_->add(constant);
    755    return constant;
    756  }
    757  template <typename T>
    758  MDefinition* constantI32(T) = delete;
    759 
    760  MDefinition* constantI64(int64_t i) {
    761    if (inDeadCode()) {
    762      return nullptr;
    763    }
    764    MConstant* constant = MConstant::NewInt64(alloc(), i);
    765    curBlock_->add(constant);
    766    return constant;
    767  }
    768  template <typename T>
    769  MDefinition* constantI64(T) = delete;
    770 
    771  // Produce an MConstant of the machine's target int type (Int32 or Int64).
    772  MDefinition* constantTargetWord(intptr_t n) {
    773    return targetIs64Bit() ? constantI64(int64_t(n)) : constantI32(int32_t(n));
    774  }
    775  template <typename T>
    776  MDefinition* constantTargetWord(T) = delete;
    777 
    778 #ifdef ENABLE_WASM_SIMD
    779  MDefinition* constantV128(V128 v) {
    780    if (inDeadCode()) {
    781      return nullptr;
    782    }
    783    MWasmFloatConstant* constant = MWasmFloatConstant::NewSimd128(
    784        alloc(), SimdConstant::CreateSimd128((int8_t*)v.bytes));
    785    curBlock_->add(constant);
    786    return constant;
    787  }
    788  template <typename T>
    789  MDefinition* constantV128(T) = delete;
    790 #endif
    791 
    792  MDefinition* constantNullRef(MaybeRefType type) {
    793    if (inDeadCode()) {
    794      return nullptr;
    795    }
    796    // MConstant has a lot of baggage so we don't use that here.
    797    MWasmNullConstant* constant = MWasmNullConstant::New(alloc(), type);
    798    curBlock_->add(constant);
    799    return constant;
    800  }
    801 
    802  // Produce a zero constant for the specified ValType.
    803  MDefinition* constantZeroOfValType(ValType valType) {
    804    switch (valType.kind()) {
    805      case ValType::I32:
    806        return constantI32(0);
    807      case ValType::I64:
    808        return constantI64(int64_t(0));
    809 #ifdef ENABLE_WASM_SIMD
    810      case ValType::V128:
    811        return constantV128(V128(0));
    812 #endif
    813      case ValType::F32:
    814        return constantF32(0.0f);
    815      case ValType::F64:
    816        return constantF64(0.0);
    817      case ValType::Ref:
    818        return constantNullRef(MaybeRefType(valType.refType()));
    819      default:
    820        MOZ_CRASH();
    821    }
    822  }
    823 
    824  /***************************** Code generation (after local scope setup) */
    825 
    826  void fence() {
    827    if (inDeadCode()) {
    828      return;
    829    }
    830    MWasmFence* ins = MWasmFence::New(alloc());
    831    curBlock_->add(ins);
    832  }
    833 
    834  template <class T>
    835  MDefinition* unary(MDefinition* op) {
    836    if (inDeadCode()) {
    837      return nullptr;
    838    }
    839    T* ins = T::New(alloc(), op);
    840    curBlock_->add(ins);
    841    return ins;
    842  }
    843 
    844  template <class T>
    845  MDefinition* unary(MDefinition* op, MIRType type) {
    846    if (inDeadCode()) {
    847      return nullptr;
    848    }
    849    T* ins = T::New(alloc(), op, type);
    850    curBlock_->add(ins);
    851    return ins;
    852  }
    853 
    854  template <class T>
    855  MDefinition* binary(MDefinition* lhs, MDefinition* rhs) {
    856    if (inDeadCode()) {
    857      return nullptr;
    858    }
    859    T* ins = T::New(alloc(), lhs, rhs);
    860    curBlock_->add(ins);
    861    return ins;
    862  }
    863 
    864  template <class T>
    865  MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type) {
    866    if (inDeadCode()) {
    867      return nullptr;
    868    }
    869    T* ins = T::New(alloc(), lhs, rhs, type);
    870    curBlock_->add(ins);
    871    return ins;
    872  }
    873 
    874  template <class T>
    875  MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type,
    876                      MWasmBinaryBitwise::SubOpcode subOpc) {
    877    if (inDeadCode()) {
    878      return nullptr;
    879    }
    880    T* ins = T::New(alloc(), lhs, rhs, type, subOpc);
    881    curBlock_->add(ins);
    882    return ins;
    883  }
    884 
    885  MDefinition* ursh(MDefinition* lhs, MDefinition* rhs, MIRType type) {
    886    if (inDeadCode()) {
    887      return nullptr;
    888    }
    889    auto* ins = MUrsh::NewWasm(alloc(), lhs, rhs, type);
    890    curBlock_->add(ins);
    891    return ins;
    892  }
    893 
    894  MDefinition* add(MDefinition* lhs, MDefinition* rhs, MIRType type) {
    895    if (inDeadCode()) {
    896      return nullptr;
    897    }
    898    auto* ins = MAdd::NewWasm(alloc(), lhs, rhs, type);
    899    curBlock_->add(ins);
    900    return ins;
    901  }
    902 
    903  bool mustPreserveNaN(MIRType type) {
    904    return IsFloatingPointType(type) && !codeMeta().isAsmJS();
    905  }
    906 
    907  MDefinition* sub(MDefinition* lhs, MDefinition* rhs, MIRType type) {
    908    if (inDeadCode()) {
    909      return nullptr;
    910    }
    911 
    912    // wasm can't fold x - 0.0 because of NaN with custom payloads.
    913    MSub* ins = MSub::NewWasm(alloc(), lhs, rhs, type, mustPreserveNaN(type));
    914    curBlock_->add(ins);
    915    return ins;
    916  }
    917 
    918  MDefinition* nearbyInt(MDefinition* input, RoundingMode roundingMode) {
    919    if (inDeadCode()) {
    920      return nullptr;
    921    }
    922 
    923    auto* ins = MNearbyInt::New(alloc(), input, input->type(), roundingMode);
    924    curBlock_->add(ins);
    925    return ins;
    926  }
    927 
    928  MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type,
    929                      bool isMax) {
    930    if (inDeadCode()) {
    931      return nullptr;
    932    }
    933 
    934    if (mustPreserveNaN(type)) {
    935      // Convert signaling NaN to quiet NaNs.
    936      MDefinition* zero = constantZeroOfValType(ValType::fromMIRType(type));
    937      lhs = sub(lhs, zero, type);
    938      rhs = sub(rhs, zero, type);
    939    }
    940 
    941    MMinMax* ins = MMinMax::NewWasm(alloc(), lhs, rhs, type, isMax);
    942    curBlock_->add(ins);
    943    return ins;
    944  }
    945 
    946  MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type,
    947                   MMul::Mode mode) {
    948    if (inDeadCode()) {
    949      return nullptr;
    950    }
    951 
    952    // wasm can't fold x * 1.0 because of NaN with custom payloads.
    953    auto* ins =
    954        MMul::NewWasm(alloc(), lhs, rhs, type, mode, mustPreserveNaN(type));
    955    curBlock_->add(ins);
    956    return ins;
    957  }
    958 
    959  MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type,
    960                   bool unsignd) {
    961    if (inDeadCode()) {
    962      return nullptr;
    963    }
    964    bool trapOnError = !codeMeta().isAsmJS();
    965    if (!unsignd && type == MIRType::Int32) {
    966      // Enforce the signedness of the operation by coercing the operands
    967      // to signed.  Otherwise, operands that "look" unsigned to Ion but
    968      // are not unsigned to Baldr (eg, unsigned right shifts) may lead to
    969      // the operation being executed unsigned.  Applies to mod() as well.
    970      //
    971      // Do this for Int32 only since Int64 is not subject to the same
    972      // issues.
    973      //
    974      // Note the offsets passed to MWasmBuiltinTruncateToInt32 are wrong here,
    975      // but it doesn't matter: they're not codegen'd to calls since inputs
    976      // already are int32.
    977      auto* lhs2 = createTruncateToInt32(lhs);
    978      curBlock_->add(lhs2);
    979      lhs = lhs2;
    980      auto* rhs2 = createTruncateToInt32(rhs);
    981      curBlock_->add(rhs2);
    982      rhs = rhs2;
    983    }
    984 
    985    // For x86 and arm we implement i64 div via c++ builtin.
    986    // A call to c++ builtin requires instance pointer.
    987 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
    988    if (type == MIRType::Int64) {
    989      auto* ins = MWasmBuiltinDivI64::New(alloc(), lhs, rhs, instancePointer_,
    990                                          unsignd, trapOnError, trapSiteDesc());
    991      curBlock_->add(ins);
    992      return ins;
    993    }
    994 #endif
    995 
    996    auto* ins = MDiv::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
    997                          trapSiteDesc(), mustPreserveNaN(type));
    998    curBlock_->add(ins);
    999    return ins;
   1000  }
   1001 
   1002  MInstruction* createTruncateToInt32(MDefinition* op) {
   1003    if (op->type() == MIRType::Double || op->type() == MIRType::Float32) {
   1004      return MWasmBuiltinTruncateToInt32::New(alloc(), op, instancePointer_);
   1005    }
   1006 
   1007    return MTruncateToInt32::New(alloc(), op);
   1008  }
   1009 
   1010  MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type,
   1011                   bool unsignd) {
   1012    if (inDeadCode()) {
   1013      return nullptr;
   1014    }
   1015    bool trapOnError = !codeMeta().isAsmJS();
   1016    if (!unsignd && type == MIRType::Int32) {
   1017      // See block comment in div().
   1018      auto* lhs2 = createTruncateToInt32(lhs);
   1019      curBlock_->add(lhs2);
   1020      lhs = lhs2;
   1021      auto* rhs2 = createTruncateToInt32(rhs);
   1022      curBlock_->add(rhs2);
   1023      rhs = rhs2;
   1024    }
   1025 
   1026    // For x86 and arm we implement i64 mod via c++ builtin.
   1027    // A call to c++ builtin requires instance pointer.
   1028 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
   1029    if (type == MIRType::Int64) {
   1030      auto* ins = MWasmBuiltinModI64::New(alloc(), lhs, rhs, instancePointer_,
   1031                                          unsignd, trapOnError, trapSiteDesc());
   1032      curBlock_->add(ins);
   1033      return ins;
   1034    }
   1035 #endif
   1036 
   1037    // Should be handled separately because we call BuiltinThunk for this case
   1038    // and so, need to add the dependency from instancePointer.
   1039    if (type == MIRType::Double) {
   1040      auto* ins = MWasmBuiltinModD::New(alloc(), lhs, rhs, instancePointer_,
   1041                                        type, bytecodeOffset());
   1042      curBlock_->add(ins);
   1043      return ins;
   1044    }
   1045 
   1046    auto* ins = MMod::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
   1047                          trapSiteDesc());
   1048    curBlock_->add(ins);
   1049    return ins;
   1050  }
   1051 
   1052  MDefinition* bitnot(MDefinition* op, MIRType type) {
   1053    if (inDeadCode()) {
   1054      return nullptr;
   1055    }
   1056    auto* ins = MBitNot::New(alloc(), op, type);
   1057    curBlock_->add(ins);
   1058    return ins;
   1059  }
   1060 
   1061  MDefinition* select(MDefinition* trueExpr, MDefinition* falseExpr,
   1062                      MDefinition* condExpr) {
   1063    if (inDeadCode()) {
   1064      return nullptr;
   1065    }
   1066    auto* ins = MWasmSelect::New(alloc(), trueExpr, falseExpr, condExpr);
   1067    curBlock_->add(ins);
   1068    return ins;
   1069  }
   1070 
   1071  MDefinition* extendI32(MDefinition* op, bool isUnsigned) {
   1072    if (inDeadCode()) {
   1073      return nullptr;
   1074    }
   1075    auto* ins = MExtendInt32ToInt64::New(alloc(), op, isUnsigned);
   1076    curBlock_->add(ins);
   1077    return ins;
   1078  }
   1079 
   1080  MDefinition* signExtend(MDefinition* op, uint32_t srcSize,
   1081                          uint32_t targetSize) {
   1082    if (inDeadCode()) {
   1083      return nullptr;
   1084    }
   1085    MInstruction* ins;
   1086    switch (targetSize) {
   1087      case 4: {
   1088        MSignExtendInt32::Mode mode;
   1089        switch (srcSize) {
   1090          case 1:
   1091            mode = MSignExtendInt32::Byte;
   1092            break;
   1093          case 2:
   1094            mode = MSignExtendInt32::Half;
   1095            break;
   1096          default:
   1097            MOZ_CRASH("Bad sign extension");
   1098        }
   1099        ins = MSignExtendInt32::New(alloc(), op, mode);
   1100        break;
   1101      }
   1102      case 8: {
   1103        MSignExtendInt64::Mode mode;
   1104        switch (srcSize) {
   1105          case 1:
   1106            mode = MSignExtendInt64::Byte;
   1107            break;
   1108          case 2:
   1109            mode = MSignExtendInt64::Half;
   1110            break;
   1111          case 4:
   1112            mode = MSignExtendInt64::Word;
   1113            break;
   1114          default:
   1115            MOZ_CRASH("Bad sign extension");
   1116        }
   1117        ins = MSignExtendInt64::New(alloc(), op, mode);
   1118        break;
   1119      }
   1120      default: {
   1121        MOZ_CRASH("Bad sign extension");
   1122      }
   1123    }
   1124    curBlock_->add(ins);
   1125    return ins;
   1126  }
   1127 
   1128  MDefinition* convertI64ToFloatingPoint(MDefinition* op, MIRType type,
   1129                                         bool isUnsigned) {
   1130    if (inDeadCode()) {
   1131      return nullptr;
   1132    }
   1133 #if defined(JS_CODEGEN_ARM)
   1134    auto* ins = MBuiltinInt64ToFloatingPoint::New(
   1135        alloc(), op, instancePointer_, type, bytecodeOffset(), isUnsigned);
   1136 #else
   1137    auto* ins = MInt64ToFloatingPoint::New(alloc(), op, type, bytecodeOffset(),
   1138                                           isUnsigned);
   1139 #endif
   1140    curBlock_->add(ins);
   1141    return ins;
   1142  }
   1143 
   1144  MDefinition* rotate(MDefinition* input, MDefinition* count, MIRType type,
   1145                      bool left) {
   1146    if (inDeadCode()) {
   1147      return nullptr;
   1148    }
   1149    auto* ins = MRotate::New(alloc(), input, count, type, left);
   1150    curBlock_->add(ins);
   1151    return ins;
   1152  }
   1153 
   1154  template <class T>
   1155  MDefinition* truncate(MDefinition* op, TruncFlags flags) {
   1156    if (inDeadCode()) {
   1157      return nullptr;
   1158    }
   1159    auto* ins = T::New(alloc(), op, flags, trapSiteDesc());
   1160    curBlock_->add(ins);
   1161    return ins;
   1162  }
   1163 
   1164 #if defined(JS_CODEGEN_ARM)
   1165  MDefinition* truncateWithInstance(MDefinition* op, TruncFlags flags) {
   1166    if (inDeadCode()) {
   1167      return nullptr;
   1168    }
   1169    auto* ins = MWasmBuiltinTruncateToInt64::New(alloc(), op, instancePointer_,
   1170                                                 flags, trapSiteDesc());
   1171    curBlock_->add(ins);
   1172    return ins;
   1173  }
   1174 #endif
   1175 
   1176  MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op,
   1177                       MCompare::CompareType type) {
   1178    if (inDeadCode()) {
   1179      return nullptr;
   1180    }
   1181    auto* ins = MCompare::NewWasm(alloc(), lhs, rhs, op, type);
   1182    curBlock_->add(ins);
   1183    return ins;
   1184  }
   1185 
   1186  void assign(unsigned slot, MDefinition* def) {
   1187    if (inDeadCode()) {
   1188      return;
   1189    }
   1190    curBlock_->setSlot(info().localSlot(slot), def);
   1191  }
   1192 
   1193  MDefinition* compareIsNull(MDefinition* ref, JSOp compareOp) {
   1194    MDefinition* nullVal = constantNullRef(MaybeRefType());
   1195    if (!nullVal) {
   1196      return nullptr;
   1197    }
   1198    return compare(ref, nullVal, compareOp, MCompare::Compare_WasmAnyRef);
   1199  }
   1200 
   1201  [[nodiscard]] MDefinition* refAsNonNull(MDefinition* ref) {
   1202    MOZ_ASSERT(!inDeadCode());
   1203    auto* ins = MWasmRefAsNonNull::New(alloc(), ref, trapSiteDesc());
   1204    if (!ins) {
   1205      return nullptr;
   1206    }
   1207    curBlock_->add(ins);
   1208    return ins;
   1209  }
   1210 
   1211  [[nodiscard]] bool brOnNull(uint32_t relativeDepth, const DefVector& values,
   1212                              const ResultType& type, MDefinition* condition) {
   1213    if (inDeadCode()) {
   1214      return true;
   1215    }
   1216 
   1217    MBasicBlock* fallthroughBlock = nullptr;
   1218    if (!newBlock(curBlock_, &fallthroughBlock)) {
   1219      return false;
   1220    }
   1221 
   1222    MDefinition* check = compareIsNull(condition, JSOp::Eq);
   1223    if (!check) {
   1224      return false;
   1225    }
   1226    MTest* test = MTest::New(alloc(), check, nullptr, fallthroughBlock);
   1227    if (!test ||
   1228        !addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
   1229      return false;
   1230    }
   1231 
   1232    if (!pushDefs(values)) {
   1233      return false;
   1234    }
   1235 
   1236    curBlock_->end(test);
   1237    curBlock_ = fallthroughBlock;
   1238    return true;
   1239  }
   1240 
   1241  [[nodiscard]] bool brOnNonNull(uint32_t relativeDepth,
   1242                                 const DefVector& values,
   1243                                 const ResultType& type,
   1244                                 MDefinition* condition) {
   1245    if (inDeadCode()) {
   1246      return true;
   1247    }
   1248 
   1249    MBasicBlock* fallthroughBlock = nullptr;
   1250    if (!newBlock(curBlock_, &fallthroughBlock)) {
   1251      return false;
   1252    }
   1253 
   1254    MDefinition* check = compareIsNull(condition, JSOp::Ne);
   1255    if (!check) {
   1256      return false;
   1257    }
   1258    MTest* test = MTest::New(alloc(), check, nullptr, fallthroughBlock);
   1259    if (!test ||
   1260        !addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
   1261      return false;
   1262    }
   1263 
   1264    if (!pushDefs(values)) {
   1265      return false;
   1266    }
   1267 
   1268    curBlock_->end(test);
   1269    curBlock_ = fallthroughBlock;
   1270    return true;
   1271  }
   1272 
   1273  MDefinition* refI31(MDefinition* input) {
   1274    auto* ins = MWasmNewI31Ref::New(alloc(), input);
   1275    curBlock_->add(ins);
   1276    return ins;
   1277  }
   1278 
   1279  MDefinition* i31Get(MDefinition* input, FieldWideningOp wideningOp) {
   1280    auto* ins = MWasmI31RefGet::New(alloc(), input, wideningOp);
   1281    curBlock_->add(ins);
   1282    return ins;
   1283  }
   1284 
   1285 #ifdef ENABLE_WASM_SIMD
   1286  // About Wasm SIMD as supported by Ion:
   1287  //
   1288  // The expectation is that Ion will only ever support SIMD on x86 and x64,
   1289  // since ARMv7 will cease to be a tier-1 platform soon, and MIPS64 will never
   1290  // implement SIMD.
   1291  //
   1292  // The division of the operations into MIR nodes reflects that expectation,
   1293  // and is a good fit for x86/x64.  Should the expectation change we'll
   1294  // possibly want to re-architect the SIMD support to be a little more general.
   1295  //
   1296  // Most SIMD operations map directly to a single MIR node that ultimately ends
   1297  // up being expanded in the macroassembler.
   1298  //
   1299  // Some SIMD operations that do have a complete macroassembler expansion are
   1300  // open-coded into multiple MIR nodes here; in some cases that's just
   1301  // convenience, in other cases it may also allow them to benefit from Ion
   1302  // optimizations.  The reason for the expansions will be documented by a
   1303  // comment.
   1304 
   1305  // (v128,v128) -> v128 effect-free binary operations
   1306  MDefinition* binarySimd128(MDefinition* lhs, MDefinition* rhs,
   1307                             bool commutative, SimdOp op) {
   1308    if (inDeadCode()) {
   1309      return nullptr;
   1310    }
   1311 
   1312    MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
   1313               rhs->type() == MIRType::Simd128);
   1314 
   1315    auto* ins = MWasmBinarySimd128::New(alloc(), lhs, rhs, commutative, op);
   1316    curBlock_->add(ins);
   1317    return ins;
   1318  }
   1319 
   1320  // (v128,i32) -> v128 effect-free shift operations
   1321  MDefinition* shiftSimd128(MDefinition* lhs, MDefinition* rhs, SimdOp op) {
   1322    if (inDeadCode()) {
   1323      return nullptr;
   1324    }
   1325 
   1326    MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
   1327               rhs->type() == MIRType::Int32);
   1328 
   1329    int32_t maskBits;
   1330    if (MacroAssembler::MustMaskShiftCountSimd128(op, &maskBits)) {
   1331      MDefinition* mask = constantI32(maskBits);
   1332      auto* rhs2 = MBitAnd::New(alloc(), rhs, mask, MIRType::Int32);
   1333      curBlock_->add(rhs2);
   1334      rhs = rhs2;
   1335    }
   1336 
   1337    auto* ins = MWasmShiftSimd128::New(alloc(), lhs, rhs, op);
   1338    curBlock_->add(ins);
   1339    return ins;
   1340  }
   1341 
   1342  // (v128,scalar,imm) -> v128
   1343  MDefinition* replaceLaneSimd128(MDefinition* lhs, MDefinition* rhs,
   1344                                  uint32_t laneIndex, SimdOp op) {
   1345    if (inDeadCode()) {
   1346      return nullptr;
   1347    }
   1348 
   1349    MOZ_ASSERT(lhs->type() == MIRType::Simd128);
   1350 
   1351    auto* ins = MWasmReplaceLaneSimd128::New(alloc(), lhs, rhs, laneIndex, op);
   1352    curBlock_->add(ins);
   1353    return ins;
   1354  }
   1355 
   1356  // (scalar) -> v128 effect-free unary operations
   1357  MDefinition* scalarToSimd128(MDefinition* src, SimdOp op) {
   1358    if (inDeadCode()) {
   1359      return nullptr;
   1360    }
   1361 
   1362    auto* ins = MWasmScalarToSimd128::New(alloc(), src, op);
   1363    curBlock_->add(ins);
   1364    return ins;
   1365  }
   1366 
   1367  // (v128) -> v128 effect-free unary operations
   1368  MDefinition* unarySimd128(MDefinition* src, SimdOp op) {
   1369    if (inDeadCode()) {
   1370      return nullptr;
   1371    }
   1372 
   1373    MOZ_ASSERT(src->type() == MIRType::Simd128);
   1374    auto* ins = MWasmUnarySimd128::New(alloc(), src, op);
   1375    curBlock_->add(ins);
   1376    return ins;
   1377  }
   1378 
   1379  // (v128, imm) -> scalar effect-free unary operations
   1380  MDefinition* reduceSimd128(MDefinition* src, SimdOp op, ValType outType,
   1381                             uint32_t imm = 0) {
   1382    if (inDeadCode()) {
   1383      return nullptr;
   1384    }
   1385 
   1386    MOZ_ASSERT(src->type() == MIRType::Simd128);
   1387    auto* ins =
   1388        MWasmReduceSimd128::New(alloc(), src, op, outType.toMIRType(), imm);
   1389    curBlock_->add(ins);
   1390    return ins;
   1391  }
   1392 
   1393  // (v128, v128, v128) -> v128 effect-free operations
   1394  MDefinition* ternarySimd128(MDefinition* v0, MDefinition* v1, MDefinition* v2,
   1395                              SimdOp op) {
   1396    if (inDeadCode()) {
   1397      return nullptr;
   1398    }
   1399 
   1400    MOZ_ASSERT(v0->type() == MIRType::Simd128 &&
   1401               v1->type() == MIRType::Simd128 &&
   1402               v2->type() == MIRType::Simd128);
   1403 
   1404    auto* ins = MWasmTernarySimd128::New(alloc(), v0, v1, v2, op);
   1405    curBlock_->add(ins);
   1406    return ins;
   1407  }
   1408 
   1409  // (v128, v128, imm_v128) -> v128 effect-free operations
   1410  MDefinition* shuffleSimd128(MDefinition* v1, MDefinition* v2, V128 control) {
   1411    if (inDeadCode()) {
   1412      return nullptr;
   1413    }
   1414 
   1415    MOZ_ASSERT(v1->type() == MIRType::Simd128);
   1416    MOZ_ASSERT(v2->type() == MIRType::Simd128);
   1417    auto* ins = BuildWasmShuffleSimd128(
   1418        alloc(), reinterpret_cast<int8_t*>(control.bytes), v1, v2);
   1419    curBlock_->add(ins);
   1420    return ins;
   1421  }
   1422 
   1423  // Also see below for SIMD memory references
   1424 
   1425 #endif  // ENABLE_WASM_SIMD
   1426 
   1427  /************************************************ Linear memory accesses */
   1428 
   1429  // For detailed information about memory accesses, see "Linear memory
   1430  // addresses and bounds checking" in WasmMemory.cpp.
   1431 
   1432 private:
   1433  // If the platform does not have a HeapReg, load the memory base from
   1434  // instance.
   1435  MDefinition* maybeLoadMemoryBase(uint32_t memoryIndex) {
   1436 #ifdef WASM_HAS_HEAPREG
   1437    if (memoryIndex == 0) {
   1438      return nullptr;
   1439    }
   1440 #endif
   1441    return memoryBase(memoryIndex);
   1442  }
   1443 
   1444 public:
   1445  // A value holding the memory base, whether that's HeapReg or some other
   1446  // register.
   1447  MDefinition* memoryBase(uint32_t memoryIndex) {
   1448    AliasSet aliases = !codeMeta().memories[memoryIndex].canMovingGrow()
   1449                           ? AliasSet::None()
   1450                           : AliasSet::Load(AliasSet::WasmHeapMeta);
   1451 #ifdef WASM_HAS_HEAPREG
   1452    if (memoryIndex == 0) {
   1453      MWasmHeapReg* base = MWasmHeapReg::New(alloc(), aliases);
   1454      curBlock_->add(base);
   1455      return base;
   1456    }
   1457 #endif
   1458    uint32_t offset =
   1459        memoryIndex == 0
   1460            ? Instance::offsetOfMemory0Base()
   1461            : (Instance::offsetInData(
   1462                  codeMeta().offsetOfMemoryInstanceData(memoryIndex) +
   1463                  offsetof(MemoryInstanceData, base)));
   1464    MWasmLoadInstance* base = MWasmLoadInstance::New(
   1465        alloc(), instancePointer_, offset, MIRType::Pointer, aliases);
   1466    curBlock_->add(base);
   1467    return base;
   1468  }
   1469 
   1470 private:
   1471  // If the bounds checking strategy requires it, load the bounds check limit
   1472  // from the instance.
   1473  MWasmLoadInstance* maybeLoadBoundsCheckLimit(uint32_t memoryIndex,
   1474                                               MIRType type) {
   1475    MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64);
   1476    if (codeMeta().hugeMemoryEnabled(memoryIndex)) {
   1477      return nullptr;
   1478    }
   1479    uint32_t offset =
   1480        memoryIndex == 0
   1481            ? Instance::offsetOfMemory0BoundsCheckLimit()
   1482            : (Instance::offsetInData(
   1483                  codeMeta().offsetOfMemoryInstanceData(memoryIndex) +
   1484                  offsetof(MemoryInstanceData, boundsCheckLimit)));
   1485    AliasSet aliases = !codeMeta().memories[memoryIndex].canMovingGrow()
   1486                           ? AliasSet::None()
   1487                           : AliasSet::Load(AliasSet::WasmHeapMeta);
   1488    auto* load = MWasmLoadInstance::New(alloc(), instancePointer_, offset, type,
   1489                                        aliases);
   1490    curBlock_->add(load);
   1491    return load;
   1492  }
   1493 
   1494  MDefinition* maybeCanonicalizeNaN(Scalar::Type accessType,
   1495                                    MDefinition* value) {
   1496    MOZ_ASSERT(codeMeta().isAsmJS());
   1497 
   1498    // Canonicalize floating point values for differential testing.
   1499    if (Scalar::isFloatingType(accessType) &&
   1500        js::SupportDifferentialTesting()) {
   1501      auto* canonicalize = MCanonicalizeNaN::New(alloc(), value);
   1502      curBlock_->add(canonicalize);
   1503      return canonicalize;
   1504    }
   1505    return value;
   1506  }
   1507 
   1508  // Return true if the access requires an alignment check.  If so, sets
   1509  // *mustAdd to true if the offset must be added to the pointer before
   1510  // checking.
   1511  bool needAlignmentCheck(MemoryAccessDesc* access, MDefinition* base,
   1512                          bool* mustAdd) {
   1513    MOZ_ASSERT(!*mustAdd);
   1514 
   1515    // asm.js accesses are always aligned and need no checks.
   1516    if (codeMeta().isAsmJS() || !access->isAtomic()) {
   1517      return false;
   1518    }
   1519 
   1520    // If the EA is known and aligned it will need no checks.
   1521    if (base->isConstant()) {
   1522      // We only care about the low bits, so overflow is OK, as is chopping off
   1523      // the high bits of an i64 pointer.
   1524      uint32_t ptr = 0;
   1525      if (isMem64(access->memoryIndex())) {
   1526        ptr = uint32_t(base->toConstant()->toInt64());
   1527      } else {
   1528        ptr = base->toConstant()->toInt32();
   1529      }
   1530      if (((ptr + access->offset64()) & (access->byteSize() - 1)) == 0) {
   1531        return false;
   1532      }
   1533    }
   1534 
   1535    // If the offset is aligned then the EA is just the pointer, for
   1536    // the purposes of this check.
   1537    *mustAdd = (access->offset64() & (access->byteSize() - 1)) != 0;
   1538    return true;
   1539  }
   1540 
   1541  // Fold a constant base into the offset and make the base 0, provided the
   1542  // offset stays below the guard limit.  The reason for folding the base into
   1543  // the offset rather than vice versa is that a small offset can be ignored
   1544  // by both explicit bounds checking and bounds check elimination.
   1545  void foldConstantPointer(MemoryAccessDesc* access, MDefinition** base) {
   1546    uint64_t offsetGuardLimit = GetMaxOffsetGuardLimit(
   1547        codeMeta().hugeMemoryEnabled(access->memoryIndex()),
   1548        codeMeta().memories[access->memoryIndex()].pageSize());
   1549 
   1550    if ((*base)->isConstant()) {
   1551      uint64_t basePtr = 0;
   1552      if (isMem64(access->memoryIndex())) {
   1553        basePtr = uint64_t((*base)->toConstant()->toInt64());
   1554      } else {
   1555        basePtr = uint64_t(int64_t((*base)->toConstant()->toInt32()));
   1556      }
   1557 
   1558      uint64_t offset = access->offset64();
   1559      if (offset < offsetGuardLimit && basePtr < offsetGuardLimit - offset) {
   1560        offset += basePtr;
   1561        access->setOffset32(uint32_t(offset));
   1562        *base = isMem64(access->memoryIndex()) ? constantI64(int64_t(0))
   1563                                               : constantI32(0);
   1564      }
   1565    }
   1566  }
   1567 
   1568  // If the offset must be added because it is large or because the true EA must
   1569  // be checked, compute the effective address, trapping on overflow.
   1570  void maybeComputeEffectiveAddress(MemoryAccessDesc* access,
   1571                                    MDefinition** base, bool mustAddOffset) {
   1572    uint64_t offsetGuardLimit = GetMaxOffsetGuardLimit(
   1573        codeMeta().hugeMemoryEnabled(access->memoryIndex()),
   1574        codeMeta().memories[access->memoryIndex()].pageSize());
   1575 
   1576    if (access->offset64() >= offsetGuardLimit ||
   1577        access->offset64() > UINT32_MAX || mustAddOffset ||
   1578        !JitOptions.wasmFoldOffsets) {
   1579      *base = computeEffectiveAddress(*base, access);
   1580    }
   1581  }
   1582 
   1583  MWasmLoadInstance* needBoundsCheck(uint32_t memoryIndex) {
   1584    MOZ_RELEASE_ASSERT(codeMeta().memories[memoryIndex].pageSize() ==
   1585                       PageSize::Standard);
   1586 #ifdef JS_64BIT
   1587    // For 32-bit base pointers:
   1588    //
   1589    // If the bounds check uses the full 64 bits of the bounds check limit, then
   1590    // the base pointer must be zero-extended to 64 bits before checking and
   1591    // wrapped back to 32-bits after Spectre masking.  (And it's important that
   1592    // the value we end up with has flowed through the Spectre mask.)
   1593    //
   1594    // If the memory's max size is known to be smaller than 64K pages exactly,
   1595    // we can use a 32-bit check and avoid extension and wrapping.
   1596    bool mem32LimitIs64Bits =
   1597        isMem32(memoryIndex) &&
   1598        !codeMeta().memories[memoryIndex].boundsCheckLimitIsAlways32Bits() &&
   1599        MaxMemoryBytes(codeMeta().memories[memoryIndex].addressType(),
   1600                       codeMeta().memories[memoryIndex].pageSize()) >=
   1601            0x100000000;
   1602 #else
   1603    // On 32-bit platforms we have no more than 2GB memory and the limit for a
   1604    // 32-bit base pointer is never a 64-bit value.
   1605    bool mem32LimitIs64Bits = false;
   1606 #endif
   1607    return maybeLoadBoundsCheckLimit(memoryIndex,
   1608                                     mem32LimitIs64Bits || isMem64(memoryIndex)
   1609                                         ? MIRType::Int64
   1610                                         : MIRType::Int32);
   1611  }
   1612 
   1613  void performBoundsCheck(uint32_t memoryIndex, MDefinition** base,
   1614                          MWasmLoadInstance* boundsCheckLimit) {
   1615    // At the outset, actualBase could be the result of pretty much any integer
   1616    // operation, or it could be the load of an integer constant.  If its type
   1617    // is i32, we may assume the value has a canonical representation for the
   1618    // platform, see doc block in MacroAssembler.h.
   1619    MDefinition* actualBase = *base;
   1620 
   1621    // Extend an i32 index value to perform a 64-bit bounds check if the memory
   1622    // can be 4GB or larger.
   1623    bool extendAndWrapIndex =
   1624        isMem32(memoryIndex) && boundsCheckLimit->type() == MIRType::Int64;
   1625    if (extendAndWrapIndex) {
   1626      auto* extended = MWasmExtendU32Index::New(alloc(), actualBase);
   1627      curBlock_->add(extended);
   1628      actualBase = extended;
   1629    }
   1630 
   1631    auto* ins = MWasmBoundsCheck::New(alloc(), actualBase, boundsCheckLimit,
   1632                                      trapSiteDesc(), MWasmBoundsCheck::Memory,
   1633                                      memoryIndex);
   1634    curBlock_->add(ins);
   1635    actualBase = ins;
   1636 
   1637    // If we're masking, then we update *base to create a dependency chain
   1638    // through the masked index.  But we will first need to wrap the index
   1639    // value if it was extended above.
   1640    if (JitOptions.spectreIndexMasking) {
   1641      if (extendAndWrapIndex) {
   1642        auto* wrapped = MWasmWrapU32Index::New(alloc(), actualBase);
   1643        curBlock_->add(wrapped);
   1644        actualBase = wrapped;
   1645      }
   1646      *base = actualBase;
   1647    }
   1648  }
   1649 
   1650  // Perform all necessary checking before a wasm heap access, based on the
   1651  // attributes of the access and base pointer.
   1652  //
   1653  // For 64-bit indices on platforms that are limited to indices that fit into
   1654  // 32 bits (all 32-bit platforms and mips64), this returns a bounds-checked
   1655  // `base` that has type Int32.  Lowering code depends on this and will assert
   1656  // that the base has this type.  See the end of this function.
   1657 
   1658  void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access,
   1659                                        MDefinition** base) {
   1660    MOZ_ASSERT(!inDeadCode());
   1661    MOZ_ASSERT(!codeMeta().isAsmJS());
   1662 
   1663    // Attempt to fold a constant base pointer into the offset so as to simplify
   1664    // the addressing expression. This may update *base.
   1665    foldConstantPointer(access, base);
   1666 
   1667    // Determine whether an alignment check is needed and whether the offset
   1668    // must be checked too.
   1669    bool mustAddOffsetForAlignmentCheck = false;
   1670    bool alignmentCheck =
   1671        needAlignmentCheck(access, *base, &mustAddOffsetForAlignmentCheck);
   1672 
   1673    // If bounds checking or alignment checking requires it, compute the
   1674    // effective address: add the offset into the pointer and trap on overflow.
   1675    // This may update *base.
   1676    maybeComputeEffectiveAddress(access, base, mustAddOffsetForAlignmentCheck);
   1677 
   1678    // Emit the alignment check if necessary; it traps if it fails.
   1679    if (alignmentCheck) {
   1680      curBlock_->add(MWasmAlignmentCheck::New(
   1681          alloc(), *base, access->byteSize(), trapSiteDesc()));
   1682    }
   1683 
   1684    // Emit the bounds check if necessary; it traps if it fails.  This may
   1685    // update *base.
   1686    MWasmLoadInstance* boundsCheckLimit =
   1687        needBoundsCheck(access->memoryIndex());
   1688    if (boundsCheckLimit) {
   1689      performBoundsCheck(access->memoryIndex(), base, boundsCheckLimit);
   1690    }
   1691 
   1692 #ifndef JS_64BIT
   1693    if (isMem64(access->memoryIndex())) {
   1694      // We must have had an explicit bounds check (or one was elided if it was
   1695      // proved redundant), and on 32-bit systems the index will for sure fit in
   1696      // 32 bits: the max memory is 2GB.  So chop the index down to 32-bit to
   1697      // simplify the back-end.
   1698      MOZ_ASSERT((*base)->type() == MIRType::Int64);
   1699      MOZ_ASSERT(!codeMeta().hugeMemoryEnabled(access->memoryIndex()));
   1700      auto* chopped = MWasmWrapU32Index::New(alloc(), *base);
   1701      MOZ_ASSERT(chopped->type() == MIRType::Int32);
   1702      curBlock_->add(chopped);
   1703      *base = chopped;
   1704    }
   1705 #endif
   1706  }
   1707 
   1708  bool isSmallerAccessForI64(ValType result, const MemoryAccessDesc* access) {
   1709    if (result == ValType::I64 && access->byteSize() <= 4) {
   1710      // These smaller accesses should all be zero-extending.
   1711      MOZ_ASSERT(!isSignedIntType(access->type()));
   1712      return true;
   1713    }
   1714    return false;
   1715  }
   1716 
   1717 public:
   1718  bool isMem32(uint32_t memoryIndex) {
   1719    return codeMeta().memories[memoryIndex].addressType() == AddressType::I32;
   1720  }
   1721  bool isMem64(uint32_t memoryIndex) {
   1722    return codeMeta().memories[memoryIndex].addressType() == AddressType::I64;
   1723  }
   1724  bool hugeMemoryEnabled(uint32_t memoryIndex) {
   1725    return codeMeta().hugeMemoryEnabled(memoryIndex);
   1726  }
   1727 
   1728  // Add the offset into the pointer to yield the EA; trap on overflow. Clears
   1729  // the offset on the memory access as a result.
   1730  MDefinition* computeEffectiveAddress(MDefinition* base,
   1731                                       MemoryAccessDesc* access) {
   1732    if (inDeadCode()) {
   1733      return nullptr;
   1734    }
   1735    uint64_t offset = access->offset64();
   1736    if (offset == 0) {
   1737      return base;
   1738    }
   1739    auto* ins = MWasmAddOffset::New(alloc(), base, offset, trapSiteDesc());
   1740    curBlock_->add(ins);
   1741    access->clearOffset();
   1742    return ins;
   1743  }
   1744 
   1745  MDefinition* load(MDefinition* base, MemoryAccessDesc* access,
   1746                    ValType result) {
   1747    if (inDeadCode()) {
   1748      return nullptr;
   1749    }
   1750 
   1751    MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
   1752    MInstruction* load = nullptr;
   1753    if (codeMeta().isAsmJS()) {
   1754      MOZ_ASSERT(access->offset64() == 0);
   1755      MWasmLoadInstance* boundsCheckLimit =
   1756          maybeLoadBoundsCheckLimit(access->memoryIndex(), MIRType::Int32);
   1757      load = MAsmJSLoadHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
   1758                                 access->type());
   1759    } else {
   1760      checkOffsetAndAlignmentAndBounds(access, &base);
   1761 #ifndef JS_64BIT
   1762      MOZ_ASSERT(base->type() == MIRType::Int32);
   1763 #endif
   1764      load = MWasmLoad::New(alloc(), memoryBase, base, *access,
   1765                            result.toMIRType());
   1766    }
   1767    if (!load) {
   1768      return nullptr;
   1769    }
   1770    curBlock_->add(load);
   1771    return load;
   1772  }
   1773 
   1774  void store(MDefinition* base, MemoryAccessDesc* access, MDefinition* v) {
   1775    if (inDeadCode()) {
   1776      return;
   1777    }
   1778 
   1779    MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
   1780    MInstruction* store = nullptr;
   1781    if (codeMeta().isAsmJS()) {
   1782      MOZ_ASSERT(access->offset64() == 0);
   1783      MWasmLoadInstance* boundsCheckLimit =
   1784          maybeLoadBoundsCheckLimit(access->memoryIndex(), MIRType::Int32);
   1785      v = maybeCanonicalizeNaN(access->type(), v);
   1786      store = MAsmJSStoreHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
   1787                                   access->type(), v);
   1788    } else {
   1789      checkOffsetAndAlignmentAndBounds(access, &base);
   1790 #ifndef JS_64BIT
   1791      MOZ_ASSERT(base->type() == MIRType::Int32);
   1792 #endif
   1793      store = MWasmStore::New(alloc(), memoryBase, base, *access, v);
   1794    }
   1795    if (!store) {
   1796      return;
   1797    }
   1798    curBlock_->add(store);
   1799  }
   1800 
   1801  MDefinition* atomicCompareExchangeHeap(MDefinition* base,
   1802                                         MemoryAccessDesc* access,
   1803                                         ValType result, MDefinition* oldv,
   1804                                         MDefinition* newv) {
   1805    if (inDeadCode()) {
   1806      return nullptr;
   1807    }
   1808 
   1809    checkOffsetAndAlignmentAndBounds(access, &base);
   1810 #ifndef JS_64BIT
   1811    MOZ_ASSERT(base->type() == MIRType::Int32);
   1812 #endif
   1813 
   1814    if (isSmallerAccessForI64(result, access)) {
   1815      auto* cvtOldv =
   1816          MWrapInt64ToInt32::New(alloc(), oldv, /*bottomHalf=*/true);
   1817      curBlock_->add(cvtOldv);
   1818      oldv = cvtOldv;
   1819 
   1820      auto* cvtNewv =
   1821          MWrapInt64ToInt32::New(alloc(), newv, /*bottomHalf=*/true);
   1822      curBlock_->add(cvtNewv);
   1823      newv = cvtNewv;
   1824    }
   1825 
   1826    MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
   1827    MInstruction* cas = MWasmCompareExchangeHeap::New(
   1828        alloc(), bytecodeOffset(), memoryBase, base, *access, oldv, newv,
   1829        instancePointer_);
   1830    if (!cas) {
   1831      return nullptr;
   1832    }
   1833    curBlock_->add(cas);
   1834 
   1835    if (isSmallerAccessForI64(result, access)) {
   1836      cas = MExtendInt32ToInt64::New(alloc(), cas, true);
   1837      curBlock_->add(cas);
   1838    }
   1839 
   1840    return cas;
   1841  }
   1842 
   1843  MDefinition* atomicExchangeHeap(MDefinition* base, MemoryAccessDesc* access,
   1844                                  ValType result, MDefinition* value) {
   1845    if (inDeadCode()) {
   1846      return nullptr;
   1847    }
   1848 
   1849    checkOffsetAndAlignmentAndBounds(access, &base);
   1850 #ifndef JS_64BIT
   1851    MOZ_ASSERT(base->type() == MIRType::Int32);
   1852 #endif
   1853 
   1854    if (isSmallerAccessForI64(result, access)) {
   1855      auto* cvtValue =
   1856          MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
   1857      curBlock_->add(cvtValue);
   1858      value = cvtValue;
   1859    }
   1860 
   1861    MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
   1862    MInstruction* xchg =
   1863        MWasmAtomicExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
   1864                                     base, *access, value, instancePointer_);
   1865    if (!xchg) {
   1866      return nullptr;
   1867    }
   1868    curBlock_->add(xchg);
   1869 
   1870    if (isSmallerAccessForI64(result, access)) {
   1871      xchg = MExtendInt32ToInt64::New(alloc(), xchg, true);
   1872      curBlock_->add(xchg);
   1873    }
   1874 
   1875    return xchg;
   1876  }
   1877 
   1878  MDefinition* atomicBinopHeap(AtomicOp op, MDefinition* base,
   1879                               MemoryAccessDesc* access, ValType result,
   1880                               MDefinition* value) {
   1881    if (inDeadCode()) {
   1882      return nullptr;
   1883    }
   1884 
   1885    checkOffsetAndAlignmentAndBounds(access, &base);
   1886 #ifndef JS_64BIT
   1887    MOZ_ASSERT(base->type() == MIRType::Int32);
   1888 #endif
   1889 
   1890    if (isSmallerAccessForI64(result, access)) {
   1891      auto* cvtValue =
   1892          MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
   1893      curBlock_->add(cvtValue);
   1894      value = cvtValue;
   1895    }
   1896 
   1897    MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
   1898    MInstruction* binop =
   1899        MWasmAtomicBinopHeap::New(alloc(), bytecodeOffset(), op, memoryBase,
   1900                                  base, *access, value, instancePointer_);
   1901    if (!binop) {
   1902      return nullptr;
   1903    }
   1904    curBlock_->add(binop);
   1905 
   1906    if (isSmallerAccessForI64(result, access)) {
   1907      binop = MExtendInt32ToInt64::New(alloc(), binop, true);
   1908      curBlock_->add(binop);
   1909    }
   1910 
   1911    return binop;
   1912  }
   1913 
   1914 #ifdef ENABLE_WASM_SIMD
   1915  MDefinition* loadSplatSimd128(Scalar::Type viewType,
   1916                                const LinearMemoryAddress<MDefinition*>& addr,
   1917                                wasm::SimdOp splatOp) {
   1918    if (inDeadCode()) {
   1919      return nullptr;
   1920    }
   1921 
   1922    MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
   1923                            trapSiteDesc(),
   1924                            hugeMemoryEnabled(addr.memoryIndex));
   1925 
   1926    // Generate better code (on x86)
   1927    // If AVX2 is enabled, more broadcast operators are available.
   1928    if (viewType == Scalar::Float64
   1929 #  if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
   1930        || (js::jit::CPUInfo::IsAVX2Present() &&
   1931            (viewType == Scalar::Uint8 || viewType == Scalar::Uint16 ||
   1932             viewType == Scalar::Float32))
   1933 #  endif
   1934    ) {
   1935      access.setSplatSimd128Load();
   1936      return load(addr.base, &access, ValType::V128);
   1937    }
   1938 
   1939    ValType resultType = ValType::I32;
   1940    if (viewType == Scalar::Float32) {
   1941      resultType = ValType::F32;
   1942      splatOp = wasm::SimdOp::F32x4Splat;
   1943    }
   1944    auto* scalar = load(addr.base, &access, resultType);
   1945    if (!inDeadCode() && !scalar) {
   1946      return nullptr;
   1947    }
   1948    return scalarToSimd128(scalar, splatOp);
   1949  }
   1950 
   1951  MDefinition* loadExtendSimd128(const LinearMemoryAddress<MDefinition*>& addr,
   1952                                 wasm::SimdOp op) {
   1953    if (inDeadCode()) {
   1954      return nullptr;
   1955    }
   1956 
   1957    // Generate better code (on x86) by loading as a double with an
   1958    // operation that sign extends directly.
   1959    MemoryAccessDesc access(addr.memoryIndex, Scalar::Float64, addr.align,
   1960                            addr.offset, trapSiteDesc(),
   1961                            hugeMemoryEnabled(addr.memoryIndex));
   1962    access.setWidenSimd128Load(op);
   1963    return load(addr.base, &access, ValType::V128);
   1964  }
   1965 
   1966  MDefinition* loadZeroSimd128(Scalar::Type viewType, size_t numBytes,
   1967                               const LinearMemoryAddress<MDefinition*>& addr) {
   1968    if (inDeadCode()) {
   1969      return nullptr;
   1970    }
   1971 
   1972    MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
   1973                            trapSiteDesc(),
   1974                            hugeMemoryEnabled(addr.memoryIndex));
   1975    access.setZeroExtendSimd128Load();
   1976    return load(addr.base, &access, ValType::V128);
   1977  }
   1978 
   1979  MDefinition* loadLaneSimd128(uint32_t laneSize,
   1980                               const LinearMemoryAddress<MDefinition*>& addr,
   1981                               uint32_t laneIndex, MDefinition* src) {
   1982    if (inDeadCode()) {
   1983      return nullptr;
   1984    }
   1985 
   1986    MemoryAccessDesc access(addr.memoryIndex, Scalar::Simd128, addr.align,
   1987                            addr.offset, trapSiteDesc(),
   1988                            hugeMemoryEnabled(addr.memoryIndex));
   1989    MDefinition* memoryBase = maybeLoadMemoryBase(access.memoryIndex());
   1990    MDefinition* base = addr.base;
   1991    MOZ_ASSERT(!codeMeta().isAsmJS());
   1992    checkOffsetAndAlignmentAndBounds(&access, &base);
   1993 #  ifndef JS_64BIT
   1994    MOZ_ASSERT(base->type() == MIRType::Int32);
   1995 #  endif
   1996    MInstruction* load = MWasmLoadLaneSimd128::New(
   1997        alloc(), memoryBase, base, access, laneSize, laneIndex, src);
   1998    if (!load) {
   1999      return nullptr;
   2000    }
   2001    curBlock_->add(load);
   2002    return load;
   2003  }
   2004 
   2005  void storeLaneSimd128(uint32_t laneSize,
   2006                        const LinearMemoryAddress<MDefinition*>& addr,
   2007                        uint32_t laneIndex, MDefinition* src) {
   2008    if (inDeadCode()) {
   2009      return;
   2010    }
   2011    MemoryAccessDesc access(addr.memoryIndex, Scalar::Simd128, addr.align,
   2012                            addr.offset, trapSiteDesc(),
   2013                            hugeMemoryEnabled(addr.memoryIndex));
   2014    MDefinition* memoryBase = maybeLoadMemoryBase(access.memoryIndex());
   2015    MDefinition* base = addr.base;
   2016    MOZ_ASSERT(!codeMeta().isAsmJS());
   2017    checkOffsetAndAlignmentAndBounds(&access, &base);
   2018 #  ifndef JS_64BIT
   2019    MOZ_ASSERT(base->type() == MIRType::Int32);
   2020 #  endif
   2021    MInstruction* store = MWasmStoreLaneSimd128::New(
   2022        alloc(), memoryBase, base, access, laneSize, laneIndex, src);
   2023    if (!store) {
   2024      return;
   2025    }
   2026    curBlock_->add(store);
   2027  }
   2028 #endif  // ENABLE_WASM_SIMD
   2029 
   2030  /************************************************ Global variable accesses */
   2031 
   2032  MDefinition* loadGlobalVar(const GlobalDesc& global) {
   2033    if (inDeadCode()) {
   2034      return nullptr;
   2035    }
   2036 
   2037    MInstruction* load;
   2038    if (global.isIndirect()) {
   2039      // Pull a pointer to the value out of Instance::globalArea, then
   2040      // load from that pointer.  Note that the pointer is immutable
   2041      // even though the value it points at may change, hence the use of
   2042      // |true| for the first node's |isConst| value, irrespective of
   2043      // the |isConst| formal parameter to this method.  The latter
   2044      // applies to the denoted value as a whole.
   2045      auto* cellPtr = MWasmLoadInstanceDataField::New(
   2046          alloc(), MIRType::Pointer, global.offset(),
   2047          /*isConst=*/true, instancePointer_);
   2048      curBlock_->add(cellPtr);
   2049      load = MWasmLoadGlobalCell::New(alloc(), global.type().toMIRType(),
   2050                                      cellPtr, global.type());
   2051    } else {
   2052      // Pull the value directly out of Instance::globalArea.
   2053      load = MWasmLoadInstanceDataField::New(
   2054          alloc(), global.type().toMIRType(), global.offset(),
   2055          !global.isMutable(), instancePointer_,
   2056          global.type().toMaybeRefType());
   2057    }
   2058    curBlock_->add(load);
   2059    return load;
   2060  }
   2061 
   2062  [[nodiscard]] bool storeGlobalVar(uint32_t lineOrBytecode,
   2063                                    const GlobalDesc& global, MDefinition* v) {
   2064    if (inDeadCode()) {
   2065      return true;
   2066    }
   2067 
   2068    if (global.isIndirect()) {
   2069      // Pull a pointer to the value out of Instance::globalArea, then
   2070      // store through that pointer.
   2071      auto* valueAddr = MWasmLoadInstanceDataField::New(
   2072          alloc(), MIRType::Pointer, global.offset(),
   2073          /*isConstant=*/true, instancePointer_);
   2074      curBlock_->add(valueAddr);
   2075 
   2076      // Handle a store to a ref-typed field specially
   2077      if (global.type().toMIRType() == MIRType::WasmAnyRef) {
   2078        // Load the previous value for the post-write barrier
   2079        MOZ_ASSERT(v->type() == MIRType::WasmAnyRef);
   2080        auto* prevValue = MWasmLoadGlobalCell::New(alloc(), MIRType::WasmAnyRef,
   2081                                                   valueAddr, global.type());
   2082        curBlock_->add(prevValue);
   2083 
   2084        // Store the new value
   2085        auto* store =
   2086            MWasmStoreRef::New(alloc(), instancePointer_, valueAddr,
   2087                               /*valueOffset=*/0, v, AliasSet::WasmGlobalCell,
   2088                               WasmPreBarrierKind::Normal);
   2089        curBlock_->add(store);
   2090 
   2091        // Call the post-write barrier
   2092        return postBarrierEdgePrecise(lineOrBytecode, valueAddr, prevValue);
   2093      }
   2094 
   2095      auto* store = MWasmStoreGlobalCell::New(alloc(), v, valueAddr);
   2096      curBlock_->add(store);
   2097      return true;
   2098    }
   2099    // Or else store the value directly in Instance::globalArea.
   2100 
   2101    // Handle a store to a ref-typed field specially
   2102    if (global.type().toMIRType() == MIRType::WasmAnyRef) {
   2103      // Compute the address of the ref-typed global
   2104      auto* valueAddr = MWasmDerivedPointer::New(
   2105          alloc(), instancePointer_,
   2106          wasm::Instance::offsetInData(global.offset()));
   2107      curBlock_->add(valueAddr);
   2108 
   2109      // Load the previous value for the post-write barrier
   2110      MOZ_ASSERT(v->type() == MIRType::WasmAnyRef);
   2111      auto* prevValue = MWasmLoadGlobalCell::New(alloc(), MIRType::WasmAnyRef,
   2112                                                 valueAddr, global.type());
   2113      curBlock_->add(prevValue);
   2114 
   2115      // Store the new value
   2116      auto* store =
   2117          MWasmStoreRef::New(alloc(), instancePointer_, valueAddr,
   2118                             /*valueOffset=*/0, v, AliasSet::WasmInstanceData,
   2119                             WasmPreBarrierKind::Normal);
   2120      curBlock_->add(store);
   2121 
   2122      // Call the post-write barrier
   2123      return postBarrierEdgePrecise(lineOrBytecode, valueAddr, prevValue);
   2124    }
   2125 
   2126    auto* store = MWasmStoreInstanceDataField::New(alloc(), global.offset(), v,
   2127                                                   instancePointer_);
   2128    curBlock_->add(store);
   2129    return true;
   2130  }
   2131 
   2132  // Load the slot on the instance where the result of `ref.func` is cached.
   2133  // This may be null if a function reference for this function has not been
   2134  // asked for yet.
   2135  MDefinition* loadCachedRefFunc(uint32_t funcIndex) {
   2136    uint32_t exportedFuncIndex = codeMeta().findFuncExportIndex(funcIndex);
   2137    MWasmLoadInstanceDataField* refFunc = MWasmLoadInstanceDataField::New(
   2138        alloc(), MIRType::WasmAnyRef,
   2139        codeMeta().offsetOfFuncExportInstanceData(exportedFuncIndex) +
   2140            offsetof(FuncExportInstanceData, func),
   2141        true, instancePointer_);
   2142    curBlock_->add(refFunc);
   2143    return refFunc;
   2144  }
   2145 
   2146  MDefinition* loadTableField(uint32_t tableIndex, unsigned fieldOffset,
   2147                              MIRType type) {
   2148    uint32_t instanceDataOffset = wasm::Instance::offsetInData(
   2149        codeMeta().offsetOfTableInstanceData(tableIndex) + fieldOffset);
   2150    auto* load =
   2151        MWasmLoadInstance::New(alloc(), instancePointer_, instanceDataOffset,
   2152                               type, AliasSet::Load(AliasSet::WasmTableMeta));
   2153    curBlock_->add(load);
   2154    return load;
   2155  }
   2156 
   2157  MDefinition* loadTableLength(uint32_t tableIndex) {
   2158    const TableDesc& table = codeMeta().tables[tableIndex];
   2159    if (table.maximumLength().isSome() &&
   2160        table.initialLength() == *table.maximumLength()) {
   2161      return table.addressType() == AddressType::I64
   2162                 ? constantI64(int64_t(table.initialLength()))
   2163                 : constantI32(int32_t(table.initialLength()));
   2164    }
   2165    return loadTableField(tableIndex, offsetof(TableInstanceData, length),
   2166                          table.addressType() == AddressType::I64
   2167                              ? MIRType::Int64
   2168                              : MIRType::Int32);
   2169  }
   2170 
   2171  MDefinition* loadTableElements(uint32_t tableIndex) {
   2172    return loadTableField(tableIndex, offsetof(TableInstanceData, elements),
   2173                          MIRType::Pointer);
   2174  }
   2175 
   2176  // Clamps a table address into i32 range. If the value is too large to fit in
   2177  // an i32, it will be replaced with UINT32_MAX so that it will always fail a
   2178  // 32-bit bounds check. Consider using an actual 64-bit bounds check if
   2179  // possible.
   2180  MDefinition* clampTableAddressToI32(AddressType addressType,
   2181                                      MDefinition* address) {
   2182    switch (addressType) {
   2183      case AddressType::I32:
   2184        return address;
   2185      case AddressType::I64:
   2186        auto* clamp = MWasmClampTable64Address::New(alloc(), address);
   2187        if (!clamp) {
   2188          return nullptr;
   2189        }
   2190        curBlock_->add(clamp);
   2191        return clamp;
   2192    }
   2193    MOZ_CRASH("unknown address type");
   2194  }
   2195 
   2196  MDefinition* boundsCheckAndWrapTableAddress(uint32_t tableIndex,
   2197                                              MDefinition* address) {
   2198    const TableDesc& table = codeMeta().tables[tableIndex];
   2199 
   2200    // Load the table length and perform a bounds check with spectre index
   2201    // masking
   2202    auto* length = loadTableLength(tableIndex);
   2203    auto* check =
   2204        MWasmBoundsCheck::New(alloc(), address, length, trapSiteDesc(),
   2205                              MWasmBoundsCheck::Table, tableIndex);
   2206    curBlock_->add(check);
   2207    if (JitOptions.spectreIndexMasking) {
   2208      address = check;
   2209    }
   2210 
   2211    if (table.addressType() == AddressType::I64) {
   2212      auto* address32 =
   2213          MWrapInt64ToInt32::New(alloc(), address, /*bottomHalf=*/true);
   2214      curBlock_->add(address32);
   2215      return address32;
   2216    }
   2217    return address;
   2218  };
   2219 
   2220  MDefinition* tableGetAnyRef(uint32_t tableIndex, MDefinition* address) {
   2221    const TableDesc& table = codeMeta().tables[tableIndex];
   2222 
   2223    auto* address32 = boundsCheckAndWrapTableAddress(tableIndex, address);
   2224 
   2225    // Load the table elements and load the element
   2226    auto* elements = loadTableElements(tableIndex);
   2227    auto* element = MWasmLoadTableElement::New(alloc(), elements, address32,
   2228                                               table.elemType);
   2229    curBlock_->add(element);
   2230    return element;
   2231  }
   2232 
   2233  [[nodiscard]] bool tableSetAnyRef(uint32_t tableIndex, MDefinition* address,
   2234                                    MDefinition* value,
   2235                                    uint32_t lineOrBytecode) {
   2236    const TableDesc& table = codeMeta().tables[tableIndex];
   2237 
   2238    auto* address32 = boundsCheckAndWrapTableAddress(tableIndex, address);
   2239 
   2240    // Load the table elements
   2241    auto* elements = loadTableElements(tableIndex);
   2242 
   2243    // Load the previous value
   2244    auto* prevValue = MWasmLoadTableElement::New(alloc(), elements, address32,
   2245                                                 table.elemType);
   2246    curBlock_->add(prevValue);
   2247 
   2248    // Compute the value's location for the post barrier
   2249    auto* loc = MWasmDerivedIndexPointer::New(alloc(), elements, address32,
   2250                                              ScalePointer);
   2251    curBlock_->add(loc);
   2252 
   2253    // Store the new value
   2254    auto* store = MWasmStoreRef::New(
   2255        alloc(), instancePointer_, loc, /*valueOffset=*/0, value,
   2256        AliasSet::WasmTableElement, WasmPreBarrierKind::Normal);
   2257    curBlock_->add(store);
   2258 
   2259    // Perform the post barrier
   2260    return postBarrierEdgePrecise(lineOrBytecode, loc, prevValue);
   2261  }
   2262 
   2263  void addInterruptCheck() {
   2264    if (inDeadCode()) {
   2265      return;
   2266    }
   2267    curBlock_->add(
   2268        MWasmInterruptCheck::New(alloc(), instancePointer_, trapSiteDesc()));
   2269  }
   2270 
   2271  // Perform a post-write barrier to update the generational store buffer. This
   2272  // version stores the entire containing object (e.g. a struct) rather than a
   2273  // single edge.
   2274  [[nodiscard]] bool postBarrierWholeCell(uint32_t lineOrBytecode,
   2275                                          MDefinition* object,
   2276                                          MDefinition* newValue) {
   2277    auto* barrier = MWasmPostWriteBarrierWholeCell::New(
   2278        alloc(), instancePointer_, object, newValue);
   2279    if (!barrier) {
   2280      return false;
   2281    }
   2282    curBlock_->add(barrier);
   2283    return true;
   2284  }
   2285 
   2286  // Perform a post-write barrier to update the generational store buffer. This
   2287  // version tracks a single tenured -> nursery edge, and will remove a previous
   2288  // store buffer entry if it is no longer needed.
   2289  [[nodiscard]] bool postBarrierEdgePrecise(uint32_t lineOrBytecode,
   2290                                            MDefinition* valueAddr,
   2291                                            MDefinition* value) {
   2292    return emitInstanceCall2(lineOrBytecode, SASigPostBarrierEdgePrecise,
   2293                             valueAddr, value);
   2294  }
   2295 
   2296  // Perform a post-write barrier to update the generational store buffer. This
   2297  // version does not remove a previous store buffer entry if it is no longer
   2298  // needed.
   2299  [[nodiscard]] bool postBarrierEdgeAtIndex(uint32_t lineOrBytecode,
   2300                                            MDefinition* object,
   2301                                            MDefinition* valueBase,
   2302                                            MDefinition* index, uint32_t scale,
   2303                                            MDefinition* newValue) {
   2304    auto* barrier = MWasmPostWriteBarrierEdgeAtIndex::New(
   2305        alloc(), instancePointer_, object, valueBase, index, scale, newValue);
   2306    if (!barrier) {
   2307      return false;
   2308    }
   2309    curBlock_->add(barrier);
   2310    return true;
   2311  }
   2312 
   2313  /***************************************************************** Calls */
   2314 
   2315  // The IonMonkey backend maintains a single stack offset (from the stack
   2316  // pointer to the base of the frame) by adding the total amount of spill
   2317  // space required plus the maximum stack required for argument passing.
   2318  // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
   2319  // manually accumulate, for the entire function, the maximum required stack
   2320  // space for argument passing. (This is passed to the CodeGenerator via
   2321  // MIRGenerator::maxWasmStackArgBytes.) This is just be the maximum of the
   2322  // stack space required for each individual call (as determined by the call
   2323  // ABI).
   2324 
   2325  [[nodiscard]]
   2326  bool passInstanceCallArg(MIRType instanceType, CallCompileState* callState) {
   2327    if (inDeadCode()) {
   2328      return true;
   2329    }
   2330 
   2331    // Should only pass an instance once.  And it must be a non-GC pointer.
   2332    MOZ_ASSERT(callState->instanceArg == ABIArg());
   2333    MOZ_ASSERT(instanceType == MIRType::Pointer);
   2334    callState->instanceArg = callState->abi.next(MIRType::Pointer);
   2335    return true;
   2336  }
   2337 
   2338  // Do not call this directly.  Call one of the passCallArg() variants instead.
   2339  [[nodiscard]]
   2340  bool passCallArgWorker(MDefinition* argDef, MIRType type,
   2341                         CallCompileState* callState) {
   2342    MOZ_ASSERT(argDef->type() == type);
   2343 
   2344    // Calling a softFP function requires moving our floats into GPRs.
   2345    if (!callState->hardFP &&
   2346        (type == MIRType::Double || type == MIRType::Float32)) {
   2347      MIRType softType =
   2348          (type == MIRType::Double) ? MIRType::Int64 : MIRType::Int32;
   2349      auto* softDef = MReinterpretCast::New(alloc(), argDef, softType);
   2350      if (!softDef) {
   2351        return false;
   2352      }
   2353      curBlock_->add(softDef);
   2354      argDef = softDef;
   2355    }
   2356 
   2357    ABIArg arg = callState->abi.next(type);
   2358    switch (arg.kind()) {
   2359 #ifdef JS_CODEGEN_REGISTER_PAIR
   2360      case ABIArg::GPR_PAIR: {
   2361        auto mirLow =
   2362            MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ true);
   2363        curBlock_->add(mirLow);
   2364        auto mirHigh =
   2365            MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ false);
   2366        curBlock_->add(mirHigh);
   2367        return callState->regArgs.append(
   2368                   MWasmCallBase::Arg(AnyRegister(arg.gpr64().low), mirLow)) &&
   2369               callState->regArgs.append(
   2370                   MWasmCallBase::Arg(AnyRegister(arg.gpr64().high), mirHigh));
   2371      }
   2372 #endif
   2373      case ABIArg::GPR:
   2374      case ABIArg::FPU:
   2375        return callState->regArgs.append(MWasmCallBase::Arg(arg.reg(), argDef));
   2376      case ABIArg::Stack: {
   2377        auto* mir =
   2378            MWasmStackArg::New(alloc(), arg.offsetFromArgBase(), argDef);
   2379        curBlock_->add(mir);
   2380        return true;
   2381      }
   2382      case ABIArg::Uninitialized:
   2383        MOZ_ASSERT_UNREACHABLE("Uninitialized ABIArg kind");
   2384    }
   2385    MOZ_CRASH("Unknown ABIArg kind.");
   2386  }
   2387 
   2388  template <typename VecT>
   2389  [[nodiscard]]
   2390  bool passCallArgs(const DefVector& argDefs, const VecT& types,
   2391                    CallCompileState* callState) {
   2392    MOZ_ASSERT(argDefs.length() == types.length());
   2393    for (uint32_t i = 0; i < argDefs.length(); i++) {
   2394      MDefinition* def = argDefs[i];
   2395      ValType type = types[i];
   2396      if (!passCallArg(def, type, callState)) {
   2397        return false;
   2398      }
   2399    }
   2400    return true;
   2401  }
   2402 
   2403  [[nodiscard]]
   2404  bool passCallArg(MDefinition* argDef, MIRType type,
   2405                   CallCompileState* callState) {
   2406    if (inDeadCode()) {
   2407      return true;
   2408    }
   2409    return passCallArgWorker(argDef, type, callState);
   2410  }
   2411 
   2412  [[nodiscard]]
   2413  bool passCallArg(MDefinition* argDef, ValType type,
   2414                   CallCompileState* callState) {
   2415    if (inDeadCode()) {
   2416      return true;
   2417    }
   2418    return passCallArgWorker(argDef, type.toMIRType(), callState);
   2419  }
   2420 
   2421  // If the call returns results on the stack, prepare a stack area to receive
   2422  // them, and pass the address of the stack area to the callee as an additional
   2423  // argument.
   2424  [[nodiscard]]
   2425  bool passStackResultAreaCallArg(const ResultType& resultType,
   2426                                  CallCompileState* callState) {
   2427    if (inDeadCode()) {
   2428      return true;
   2429    }
   2430    ABIResultIter iter(resultType);
   2431    while (!iter.done() && iter.cur().inRegister()) {
   2432      iter.next();
   2433    }
   2434    if (iter.done()) {
   2435      // No stack results.
   2436      return true;
   2437    }
   2438 
   2439    // The builtin ABI only supports a single result value, so it doesn't
   2440    // use stack results.
   2441    MOZ_ASSERT(callState->abiKind == ABIKind::Wasm);
   2442 
   2443    auto* stackResultArea = MWasmStackResultArea::New(alloc());
   2444    if (!stackResultArea) {
   2445      return false;
   2446    }
   2447    if (!stackResultArea->init(alloc(), iter.remaining())) {
   2448      return false;
   2449    }
   2450    for (uint32_t base = iter.index(); !iter.done(); iter.next()) {
   2451      MWasmStackResultArea::StackResult loc(iter.cur().stackOffset(),
   2452                                            iter.cur().type().toMIRType());
   2453      stackResultArea->initResult(iter.index() - base, loc);
   2454    }
   2455    curBlock_->add(stackResultArea);
   2456    MDefinition* def = callState->returnCall ? (MDefinition*)stackResultPointer_
   2457                                             : (MDefinition*)stackResultArea;
   2458    if (!passCallArg(def, MIRType::StackResults, callState)) {
   2459      return false;
   2460    }
   2461    callState->stackResultArea = stackResultArea;
   2462    return true;
   2463  }
   2464 
   2465  [[nodiscard]]
   2466  bool finishCallArgs(CallCompileState* callState) {
   2467    if (inDeadCode()) {
   2468      return true;
   2469    }
   2470 
   2471    if (!callState->regArgs.append(
   2472            MWasmCallBase::Arg(AnyRegister(InstanceReg), instancePointer_))) {
   2473      return false;
   2474    }
   2475 
   2476    uint32_t stackBytes = callState->abi.stackBytesConsumedSoFar();
   2477 
   2478    maxStackArgBytes_ = std::max(maxStackArgBytes_, stackBytes);
   2479    return true;
   2480  }
   2481 
   2482  [[nodiscard]]
   2483  bool emitCallArgs(const FuncType& funcType, const DefVector& args,
   2484                    CallCompileState* callState) {
   2485    for (size_t i = 0, n = funcType.args().length(); i < n; ++i) {
   2486      if (!mirGen().ensureBallast()) {
   2487        return false;
   2488      }
   2489      if (!passCallArg(args[i], funcType.args()[i], callState)) {
   2490        return false;
   2491      }
   2492    }
   2493 
   2494    ResultType resultType = ResultType::Vector(funcType.results());
   2495    if (!passStackResultAreaCallArg(resultType, callState)) {
   2496      return false;
   2497    }
   2498 
   2499    return finishCallArgs(callState);
   2500  }
   2501 
   2502  [[nodiscard]]
   2503  bool collectBuiltinCallResult(MIRType type, MDefinition** result,
   2504                                CallCompileState* callState) {
   2505    MInstruction* def;
   2506    switch (type) {
   2507      case MIRType::Int32:
   2508        def = MWasmRegisterResult::New(alloc(), MIRType::Int32, ReturnReg);
   2509        break;
   2510      case MIRType::Int64:
   2511        def = MWasmRegister64Result::New(alloc(), ReturnReg64);
   2512        break;
   2513      case MIRType::Float32: {
   2514        if (callState->abiKind == ABIKind::System) {
   2515          def = MWasmSystemFloatRegisterResult::New(
   2516              alloc(), type, ReturnFloat32Reg, callState->hardFP);
   2517        } else {
   2518          def = MWasmFloatRegisterResult::New(alloc(), MIRType::Float32,
   2519                                              ReturnFloat32Reg);
   2520        }
   2521        break;
   2522      }
   2523      case MIRType::Double: {
   2524        if (callState->abiKind == ABIKind::System) {
   2525          def = MWasmSystemFloatRegisterResult::New(
   2526              alloc(), type, ReturnDoubleReg, callState->hardFP);
   2527        } else {
   2528          def = MWasmFloatRegisterResult::New(alloc(), MIRType::Double,
   2529                                              ReturnDoubleReg);
   2530        }
   2531        break;
   2532      }
   2533 #ifdef ENABLE_WASM_SIMD
   2534      case MIRType::Simd128:
   2535        MOZ_CRASH("SIMD128 not supported in builtin ABI");
   2536 #endif
   2537      case MIRType::WasmAnyRef:
   2538        def = MWasmRegisterResult::New(alloc(), MIRType::WasmAnyRef, ReturnReg);
   2539        break;
   2540      case MIRType::None:
   2541        MOZ_ASSERT(result == nullptr, "Not expecting any results created");
   2542        return true;
   2543      default:
   2544        MOZ_CRASH("unexpected MIRType result for builtin call");
   2545    }
   2546 
   2547    if (!def) {
   2548      return false;
   2549    }
   2550 
   2551    curBlock_->add(def);
   2552    *result = def;
   2553 
   2554    return true;
   2555  }
   2556 
   2557  [[nodiscard]]
   2558  bool collectWasmCallResults(const ResultType& type,
   2559                              CallCompileState* callState, DefVector* results) {
   2560    // This function uses wasm::ABIResultIter which does not handle the system
   2561    // ABI. Use collectBuiltinCallResult instead for builtin calls.
   2562    MOZ_ASSERT(callState->abiKind == ABIKind::Wasm);
   2563    MOZ_ASSERT(callState->hardFP);
   2564 
   2565    if (!results->reserve(type.length())) {
   2566      return false;
   2567    }
   2568 
   2569    // The result iterator goes in the order in which results would be popped
   2570    // off; we want the order in which they would be pushed.
   2571    ABIResultIter iter(type);
   2572    uint32_t stackResultCount = 0;
   2573    while (!iter.done()) {
   2574      if (iter.cur().onStack()) {
   2575        stackResultCount++;
   2576      }
   2577      iter.next();
   2578    }
   2579 
   2580    for (iter.switchToPrev(); !iter.done(); iter.prev()) {
   2581      if (!mirGen().ensureBallast()) {
   2582        return false;
   2583      }
   2584      const ABIResult& result = iter.cur();
   2585      MInstruction* def;
   2586      if (result.inRegister()) {
   2587        switch (result.type().kind()) {
   2588          case wasm::ValType::I32:
   2589            def =
   2590                MWasmRegisterResult::New(alloc(), MIRType::Int32, result.gpr());
   2591            break;
   2592          case wasm::ValType::I64:
   2593            def = MWasmRegister64Result::New(alloc(), result.gpr64());
   2594            break;
   2595          case wasm::ValType::F32:
   2596            def = MWasmFloatRegisterResult::New(alloc(), MIRType::Float32,
   2597                                                result.fpr());
   2598            break;
   2599          case wasm::ValType::F64:
   2600            def = MWasmFloatRegisterResult::New(alloc(), MIRType::Double,
   2601                                                result.fpr());
   2602            break;
   2603          case wasm::ValType::Ref:
   2604            def = MWasmRegisterResult::New(alloc(), MIRType::WasmAnyRef,
   2605                                           result.gpr(),
   2606                                           result.type().toMaybeRefType());
   2607            break;
   2608          case wasm::ValType::V128:
   2609 #ifdef ENABLE_WASM_SIMD
   2610            def = MWasmFloatRegisterResult::New(alloc(), MIRType::Simd128,
   2611                                                result.fpr());
   2612 #else
   2613            return this->iter().fail("Ion has no SIMD support yet");
   2614 #endif
   2615        }
   2616      } else {
   2617        MOZ_ASSERT(callState->stackResultArea);
   2618        MOZ_ASSERT(stackResultCount);
   2619        uint32_t idx = --stackResultCount;
   2620        def = MWasmStackResult::New(alloc(), callState->stackResultArea, idx);
   2621      }
   2622 
   2623      if (!def) {
   2624        return false;
   2625      }
   2626      curBlock_->add(def);
   2627      results->infallibleAppend(def);
   2628    }
   2629 
   2630    MOZ_ASSERT(results->length() == type.length());
   2631 
   2632    return true;
   2633  }
   2634 
   2635  [[nodiscard]]
   2636  bool call(CallCompileState* callState, const CallSiteDesc& desc,
   2637            const CalleeDesc& callee, const ArgTypeVector& argTypes,
   2638            MDefinition* addressOrRef = nullptr) {
   2639    if (!beginCatchableCall(callState)) {
   2640      return false;
   2641    }
   2642 
   2643    MInstruction* ins;
   2644    if (callState->isCatchable()) {
   2645      ins = MWasmCallCatchable::New(
   2646          alloc(), desc, callee, callState->regArgs,
   2647          StackArgAreaSizeUnaligned(argTypes, callState->abiKind),
   2648          callState->tryNoteIndex, callState->fallthroughBlock,
   2649          callState->prePadBlock, addressOrRef);
   2650    } else {
   2651      ins = MWasmCallUncatchable::New(
   2652          alloc(), desc, callee, callState->regArgs,
   2653          StackArgAreaSizeUnaligned(argTypes, callState->abiKind),
   2654          addressOrRef);
   2655    }
   2656    if (!ins) {
   2657      return false;
   2658    }
   2659    curBlock_->add(ins);
   2660 
   2661    return finishCatchableCall(callState);
   2662  }
   2663 
   2664  [[nodiscard]]
   2665  CallRefHint auditInlineableCallees(InliningHeuristics::CallKind kind,
   2666                                     CallRefHint hints) {
   2667    // Takes candidates for inlining as provided in `hints`, and returns a
   2668    // subset (or all) of them for which inlining is approved.  To indicate
   2669    // that they are all disallowed, return an empty CallRefHint.
   2670 
   2671    MOZ_ASSERT_IF(kind == InliningHeuristics::CallKind::Direct,
   2672                  hints.length() == 1);
   2673 
   2674    // We only support inlining when lazy tiering. This is currently a
   2675    // requirement because we need the full module bytecode and function
   2676    // definition ranges, which are not available in other modes.
   2677    if (compilerEnv().mode() != CompileMode::LazyTiering) {
   2678      return CallRefHint();
   2679    }
   2680 
   2681    // We don't support asm.js and inlining. asm.js also doesn't support
   2682    // baseline, which is required for lazy tiering, so we should never get
   2683    // here. The biggest complication for asm.js is getting correct stack
   2684    // traces with inlining.
   2685    MOZ_ASSERT(!codeMeta().isAsmJS());
   2686 
   2687    // If we were given no candidates, give up now.
   2688    if (hints.empty()) {
   2689      return CallRefHint();
   2690    }
   2691 
   2692    // We can't inline if we've exceeded our per-root-function inlining
   2693    // budget.
   2694    //
   2695    // This logic will cause `availableBudget` to be driven slightly negative
   2696    // if a budget overshoot happens, so we will have performed slightly more
   2697    // inlining than allowed by the initial setting of `availableBudget`.  The
   2698    // size of this overshoot is however very limited -- it can't exceed the
   2699    // size of three function bodies that are inlined (3 because that's what
   2700    // CallRefHint can hold).  And the max size of an inlineable function body
   2701    // is limited by InliningHeuristics::isSmallEnoughToInline.
   2702    if (rootCompiler_.inliningBudget() < 0) {
   2703      return CallRefHint();
   2704    }
   2705 
   2706    // Check each candidate in turn, and add all acceptable ones to `filtered`.
   2707    // It is important that `filtered` retains the same ordering as `hints`.
   2708    CallRefHint filtered;
   2709    for (uint32_t i = 0; i < hints.length(); i++) {
   2710      uint32_t funcIndex = hints.get(i);
   2711 
   2712      // We can't inline an imported function.
   2713      if (codeMeta().funcIsImport(funcIndex)) {
   2714        continue;
   2715      }
   2716 
   2717      // We do not support inlining a callee which uses tail calls
   2718      FeatureUsage funcFeatureUsage =
   2719          codeTailMeta()->funcDefFeatureUsage(funcIndex);
   2720      if (funcFeatureUsage & FeatureUsage::ReturnCall) {
   2721        continue;
   2722      }
   2723 
   2724      // Ask the heuristics system if we're allowed to inline a function of
   2725      // this size and kind at the current inlining depth.
   2726      uint32_t inlineeBodySize = codeTailMeta()->funcDefRange(funcIndex).size();
   2727      uint32_t rootFunctionBodySize = rootCompiler_.func().bytecodeSize();
   2728      bool largeFunctionBackoff;
   2729      bool smallEnough = InliningHeuristics::isSmallEnoughToInline(
   2730          kind, inliningDepth(), inlineeBodySize, rootFunctionBodySize,
   2731          &largeFunctionBackoff);
   2732      if (largeFunctionBackoff) {
   2733        rootCompiler_.noteLargeFunctionBackoffWasApplied();
   2734      }
   2735      if (!smallEnough) {
   2736        continue;
   2737      }
   2738 
   2739      filtered.append(funcIndex);
   2740    }
   2741 
   2742    // Whatever ends up in `filtered` is approved for inlining.
   2743    return filtered;
   2744  }
   2745 
   2746  [[nodiscard]]
   2747  bool finishInlinedCallDirect(FunctionCompiler& calleeCompiler,
   2748                               DefVector* results) {
   2749    const PendingInlineReturnVector& calleeReturns =
   2750        calleeCompiler.pendingInlineReturns_;
   2751    MBasicBlock* calleeCatchBlock = calleeCompiler.pendingInlineCatchBlock_;
   2752    const FuncType& calleeFuncType = calleeCompiler.funcType();
   2753    MBasicBlock* lastBlockBeforeCall = curBlock_;
   2754 
   2755    // Add the observed features from the inlined function to this function
   2756    iter_.addFeatureUsage(calleeCompiler.featureUsage());
   2757 
   2758    // Create a block, if needed, to handle exceptions from the callee function
   2759    if (calleeCatchBlock) {
   2760      ControlInstructionVector* tryLandingPadPatches;
   2761      bool inTryCode = inTryBlock(&tryLandingPadPatches);
   2762 
   2763      // The callee compiler should never create a catch block unless we have
   2764      // a landing pad for it
   2765      MOZ_RELEASE_ASSERT(inTryCode);
   2766 
   2767      // Create a block in our function to jump to the nearest try block. We
   2768      // cannot just use the callee's catch block for this, as the slots on it
   2769      // are set up for all the locals from that function. We need to create a
   2770      // new block in our function with the slots for this function, that then
   2771      // does the jump to the landing pad. Ion should be able to optimize this
   2772      // away using jump threading.
   2773      MBasicBlock* callerCatchBlock = nullptr;
   2774      if (!newBlock(nullptr, &callerCatchBlock)) {
   2775        return false;
   2776      }
   2777 
   2778      // Our catch block inherits all of the locals state from immediately
   2779      // before the inlined call
   2780      callerCatchBlock->inheritSlots(lastBlockBeforeCall);
   2781 
   2782      // The callee catch block jumps to our catch block
   2783      calleeCatchBlock->end(MGoto::New(alloc(), callerCatchBlock));
   2784 
   2785      // Our catch block has the callee rethrow block as a predecessor, but
   2786      // ignores all phi's, because we use our own locals state.
   2787      if (!callerCatchBlock->addPredecessorWithoutPhis(calleeCatchBlock)) {
   2788        return false;
   2789      }
   2790 
   2791      // Our catch block ends with a patch to jump to the enclosing try block.
   2792      MBasicBlock* prevBlock = curBlock_;
   2793      curBlock_ = callerCatchBlock;
   2794      if (!endWithPadPatch(tryLandingPadPatches)) {
   2795        return false;
   2796      }
   2797      curBlock_ = prevBlock;
   2798    }
   2799 
   2800    // If there were no returns, then we are now in dead code
   2801    if (calleeReturns.empty()) {
   2802      curBlock_ = nullptr;
   2803      return true;
   2804    }
   2805 
   2806    // Create a block to join all of the returns from the inlined function
   2807    MBasicBlock* joinAfterCall = nullptr;
   2808    if (!newBlock(nullptr, &joinAfterCall)) {
   2809      return false;
   2810    }
   2811 
   2812    // The join block inherits all of the locals state from immediately before
   2813    // the inlined call
   2814    joinAfterCall->inheritSlots(lastBlockBeforeCall);
   2815 
   2816    // The join block has a phi node for every result of the inlined function
   2817    // type. Each phi node has an operand for each of the returns of the
   2818    // inlined function.
   2819    for (uint32_t i = 0; i < calleeFuncType.results().length(); i++) {
   2820      MPhi* phi = MPhi::New(alloc(), calleeFuncType.results()[i].toMIRType());
   2821      if (!phi || !phi->reserveLength(calleeReturns.length())) {
   2822        return false;
   2823      }
   2824      joinAfterCall->addPhi(phi);
   2825      if (!results->append(phi)) {
   2826        return false;
   2827      }
   2828    }
   2829 
   2830    // Bind every return from the inlined function to go to the join block, and
   2831    // add the results for the return to the phi nodes.
   2832    for (size_t i = 0; i < calleeReturns.length(); i++) {
   2833      const PendingInlineReturn& calleeReturn = calleeReturns[i];
   2834 
   2835      // Setup the predecessor and successor relationship
   2836      MBasicBlock* pred = calleeReturn.jump->block();
   2837      if (!joinAfterCall->addPredecessorWithoutPhis(pred)) {
   2838        return false;
   2839      }
   2840      calleeReturn.jump->replaceSuccessor(MGoto::TargetIndex, joinAfterCall);
   2841 
   2842      // For each result in this return, add it to the corresponding phi node
   2843      for (uint32_t resultIndex = 0;
   2844           resultIndex < calleeFuncType.results().length(); resultIndex++) {
   2845        MDefinition* result = (*results)[resultIndex];
   2846        ((MPhi*)(result))->addInput(calleeReturn.results[resultIndex]);
   2847      }
   2848    }
   2849 
   2850    // Continue MIR generation starting in the join block
   2851    curBlock_ = joinAfterCall;
   2852 
   2853    return true;
   2854  }
   2855 
   2856  [[nodiscard]]
   2857  bool callDirect(const FuncType& funcType, uint32_t funcIndex,
   2858                  uint32_t lineOrBytecode, const DefVector& args,
   2859                  DefVector* results) {
   2860    MOZ_ASSERT(!inDeadCode());
   2861 
   2862    CallCompileState callState(ABIKind::Wasm);
   2863    CallSiteDesc desc(lineOrBytecode, rootCompiler_.inlinedCallerOffsetsIndex(),
   2864                      CallSiteKind::Func);
   2865    ResultType resultType = ResultType::Vector(funcType.results());
   2866    auto callee = CalleeDesc::function(funcIndex);
   2867    ArgTypeVector argTypes(funcType);
   2868 
   2869    return emitCallArgs(funcType, args, &callState) &&
   2870           call(&callState, desc, callee, argTypes) &&
   2871           collectWasmCallResults(resultType, &callState, results);
   2872  }
   2873 
   2874  [[nodiscard]]
   2875  bool returnCallDirect(const FuncType& funcType, uint32_t funcIndex,
   2876                        uint32_t lineOrBytecode, const DefVector& args,
   2877                        DefVector* results) {
   2878    MOZ_ASSERT(!inDeadCode());
   2879 
   2880    // We do not support tail calls in inlined functions.
   2881    MOZ_RELEASE_ASSERT(!isInlined());
   2882 
   2883    CallCompileState callState(ABIKind::Wasm);
   2884    callState.returnCall = true;
   2885    CallSiteDesc desc(lineOrBytecode, CallSiteKind::ReturnFunc);
   2886    auto callee = CalleeDesc::function(funcIndex);
   2887    ArgTypeVector argTypes(funcType);
   2888 
   2889    if (!emitCallArgs(funcType, args, &callState)) {
   2890      return false;
   2891    }
   2892 
   2893    auto ins = MWasmReturnCall::New(
   2894        alloc(), desc, callee, callState.regArgs,
   2895        StackArgAreaSizeUnaligned(argTypes, callState.abiKind), nullptr);
   2896    if (!ins) {
   2897      return false;
   2898    }
   2899    curBlock_->end(ins);
   2900    curBlock_ = nullptr;
   2901    return true;
   2902  }
   2903 
   2904  [[nodiscard]]
   2905  bool returnCallImport(unsigned globalDataOffset, uint32_t lineOrBytecode,
   2906                        const FuncType& funcType, const DefVector& args,
   2907                        DefVector* results) {
   2908    MOZ_ASSERT(!inDeadCode());
   2909 
   2910    // We do not support tail calls in inlined functions.
   2911    MOZ_RELEASE_ASSERT(!isInlined());
   2912 
   2913    CallCompileState callState(ABIKind::Wasm);
   2914    callState.returnCall = true;
   2915    CallSiteDesc desc(lineOrBytecode, CallSiteKind::Import);
   2916    auto callee = CalleeDesc::import(globalDataOffset);
   2917    ArgTypeVector argTypes(funcType);
   2918 
   2919    if (!emitCallArgs(funcType, args, &callState)) {
   2920      return false;
   2921    }
   2922 
   2923    auto* ins = MWasmReturnCall::New(
   2924        alloc(), desc, callee, callState.regArgs,
   2925        StackArgAreaSizeUnaligned(argTypes, callState.abiKind), nullptr);
   2926    if (!ins) {
   2927      return false;
   2928    }
   2929    curBlock_->end(ins);
   2930    curBlock_ = nullptr;
   2931    return true;
   2932  }
   2933 
   2934  [[nodiscard]]
   2935  bool returnCallIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
   2936                          MDefinition* address, uint32_t lineOrBytecode,
   2937                          const DefVector& args, DefVector* results) {
   2938    MOZ_ASSERT(!inDeadCode());
   2939 
   2940    // We do not support tail calls in inlined functions.
   2941    MOZ_RELEASE_ASSERT(!isInlined());
   2942 
   2943    const FuncType& funcType = (*codeMeta().types)[funcTypeIndex].funcType();
   2944    CallIndirectId callIndirectId =
   2945        CallIndirectId::forFuncType(codeMeta(), funcTypeIndex);
   2946 
   2947    CallCompileState callState(ABIKind::Wasm);
   2948    callState.returnCall = true;
   2949    CalleeDesc callee;
   2950    MOZ_ASSERT(callIndirectId.kind() != CallIndirectIdKind::AsmJS);
   2951    const TableDesc& table = codeMeta().tables[tableIndex];
   2952    callee =
   2953        CalleeDesc::wasmTable(codeMeta(), table, tableIndex, callIndirectId);
   2954 
   2955    CallSiteDesc desc(lineOrBytecode, CallSiteKind::Indirect);
   2956    ArgTypeVector argTypes(funcType);
   2957 
   2958    if (!emitCallArgs(funcType, args, &callState)) {
   2959      return false;
   2960    }
   2961 
   2962    auto* address32 = boundsCheckAndWrapTableAddress(tableIndex, address);
   2963    if (!address32) {
   2964      return false;
   2965    }
   2966 
   2967    auto* ins = MWasmReturnCall::New(
   2968        alloc(), desc, callee, callState.regArgs,
   2969        StackArgAreaSizeUnaligned(argTypes, callState.abiKind), address32);
   2970    if (!ins) {
   2971      return false;
   2972    }
   2973    curBlock_->end(ins);
   2974    curBlock_ = nullptr;
   2975    return true;
   2976  }
   2977 
   2978  [[nodiscard]]
   2979  bool callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
   2980                    MDefinition* address, uint32_t lineOrBytecode,
   2981                    const DefVector& args, DefVector* results) {
   2982    MOZ_ASSERT(!inDeadCode());
   2983 
   2984    CallCompileState callState(ABIKind::Wasm);
   2985    const FuncType& funcType = (*codeMeta().types)[funcTypeIndex].funcType();
   2986    CallIndirectId callIndirectId =
   2987        CallIndirectId::forFuncType(codeMeta(), funcTypeIndex);
   2988 
   2989    CalleeDesc callee;
   2990    if (codeMeta().isAsmJS()) {
   2991      MOZ_ASSERT(tableIndex == 0);
   2992      MOZ_ASSERT(callIndirectId.kind() == CallIndirectIdKind::AsmJS);
   2993      uint32_t tableIndex = codeMeta().asmJSSigToTableIndex[funcTypeIndex];
   2994      const TableDesc& table = codeMeta().tables[tableIndex];
   2995      // ensured by asm.js validation
   2996      MOZ_ASSERT(table.initialLength() <= UINT32_MAX);
   2997      MOZ_ASSERT(IsPowerOfTwo(table.initialLength()));
   2998 
   2999      MDefinition* mask = constantI32(int32_t(table.initialLength() - 1));
   3000      MBitAnd* maskedAddress =
   3001          MBitAnd::New(alloc(), address, mask, MIRType::Int32);
   3002      curBlock_->add(maskedAddress);
   3003 
   3004      address = maskedAddress;
   3005      callee = CalleeDesc::asmJSTable(codeMeta(), tableIndex);
   3006    } else {
   3007      MOZ_ASSERT(callIndirectId.kind() != CallIndirectIdKind::AsmJS);
   3008      const TableDesc& table = codeMeta().tables[tableIndex];
   3009      callee =
   3010          CalleeDesc::wasmTable(codeMeta(), table, tableIndex, callIndirectId);
   3011      address = boundsCheckAndWrapTableAddress(tableIndex, address);
   3012      if (!address) {
   3013        return false;
   3014      }
   3015    }
   3016 
   3017    CallSiteDesc desc(lineOrBytecode, rootCompiler_.inlinedCallerOffsetsIndex(),
   3018                      CallSiteKind::Indirect);
   3019    ArgTypeVector argTypes(funcType);
   3020    ResultType resultType = ResultType::Vector(funcType.results());
   3021 
   3022    return emitCallArgs(funcType, args, &callState) &&
   3023           call(&callState, desc, callee, argTypes, address) &&
   3024           collectWasmCallResults(resultType, &callState, results);
   3025  }
   3026 
   3027  [[nodiscard]]
   3028  bool callImport(unsigned instanceDataOffset, uint32_t lineOrBytecode,
   3029                  const FuncType& funcType, const DefVector& args,
   3030                  DefVector* results) {
   3031    MOZ_ASSERT(!inDeadCode());
   3032 
   3033    CallCompileState callState(ABIKind::Wasm);
   3034    CallSiteDesc desc(lineOrBytecode, rootCompiler_.inlinedCallerOffsetsIndex(),
   3035                      CallSiteKind::Import);
   3036    auto callee = CalleeDesc::import(instanceDataOffset);
   3037    ArgTypeVector argTypes(funcType);
   3038    ResultType resultType = ResultType::Vector(funcType.results());
   3039 
   3040    return emitCallArgs(funcType, args, &callState) &&
   3041           call(&callState, desc, callee, argTypes) &&
   3042           collectWasmCallResults(resultType, &callState, results);
   3043  }
   3044 
   3045  [[nodiscard]]
   3046  bool builtinCall(CallCompileState* callState,
   3047                   const SymbolicAddressSignature& builtin,
   3048                   uint32_t lineOrBytecode, MDefinition** result) {
   3049    if (inDeadCode()) {
   3050      *result = nullptr;
   3051      return true;
   3052    }
   3053 
   3054    MOZ_ASSERT(builtin.failureMode == FailureMode::Infallible);
   3055 
   3056    CallSiteDesc desc(lineOrBytecode, rootCompiler_.inlinedCallerOffsetsIndex(),
   3057                      CallSiteKind::Symbolic);
   3058    auto callee = CalleeDesc::builtin(builtin.identity);
   3059 
   3060    auto* ins = MWasmCallUncatchable::New(
   3061        alloc(), desc, callee, callState->regArgs,
   3062        StackArgAreaSizeUnaligned(builtin, callState->abiKind));
   3063    if (!ins) {
   3064      return false;
   3065    }
   3066    curBlock_->add(ins);
   3067 
   3068    return collectBuiltinCallResult(builtin.retType, result, callState);
   3069  }
   3070 
   3071  [[nodiscard]]
   3072  bool builtinCall1(const SymbolicAddressSignature& builtin,
   3073                    uint32_t lineOrBytecode, MDefinition* arg,
   3074                    MDefinition** result) {
   3075    CallCompileState callState(ABIForBuiltin(builtin.identity));
   3076    return passCallArg(arg, builtin.argTypes[0], &callState) &&
   3077           finishCallArgs(&callState) &&
   3078           builtinCall(&callState, builtin, lineOrBytecode, result);
   3079  }
   3080 
   3081  [[nodiscard]]
   3082  bool builtinCall2(const SymbolicAddressSignature& builtin,
   3083                    uint32_t lineOrBytecode, MDefinition* arg1,
   3084                    MDefinition* arg2, MDefinition** result) {
   3085    CallCompileState callState(ABIForBuiltin(builtin.identity));
   3086    return passCallArg(arg1, builtin.argTypes[0], &callState) &&
   3087           passCallArg(arg2, builtin.argTypes[1], &callState) &&
   3088           finishCallArgs(&callState) &&
   3089           builtinCall(&callState, builtin, lineOrBytecode, result);
   3090  }
   3091 
   3092  [[nodiscard]]
   3093  bool builtinCall5(const SymbolicAddressSignature& builtin,
   3094                    uint32_t lineOrBytecode, MDefinition* arg1,
   3095                    MDefinition* arg2, MDefinition* arg3, MDefinition* arg4,
   3096                    MDefinition* arg5, MDefinition** result) {
   3097    CallCompileState callState(ABIForBuiltin(builtin.identity));
   3098    return passCallArg(arg1, builtin.argTypes[0], &callState) &&
   3099           passCallArg(arg2, builtin.argTypes[1], &callState) &&
   3100           passCallArg(arg3, builtin.argTypes[2], &callState) &&
   3101           passCallArg(arg4, builtin.argTypes[3], &callState) &&
   3102           passCallArg(arg5, builtin.argTypes[4], &callState) &&
   3103           finishCallArgs(&callState) &&
   3104           builtinCall(&callState, builtin, lineOrBytecode, result);
   3105  }
   3106 
   3107  [[nodiscard]]
   3108  bool builtinCall6(const SymbolicAddressSignature& builtin,
   3109                    uint32_t lineOrBytecode, MDefinition* arg1,
   3110                    MDefinition* arg2, MDefinition* arg3, MDefinition* arg4,
   3111                    MDefinition* arg5, MDefinition* arg6,
   3112                    MDefinition** result) {
   3113    CallCompileState callState(ABIForBuiltin(builtin.identity));
   3114    return passCallArg(arg1, builtin.argTypes[0], &callState) &&
   3115           passCallArg(arg2, builtin.argTypes[1], &callState) &&
   3116           passCallArg(arg3, builtin.argTypes[2], &callState) &&
   3117           passCallArg(arg4, builtin.argTypes[3], &callState) &&
   3118           passCallArg(arg5, builtin.argTypes[4], &callState) &&
   3119           passCallArg(arg6, builtin.argTypes[5], &callState) &&
   3120           finishCallArgs(&callState) &&
   3121           builtinCall(&callState, builtin, lineOrBytecode, result);
   3122  }
   3123 
   3124  [[nodiscard]]
   3125  bool instanceCall(CallCompileState* callState,
   3126                    const SymbolicAddressSignature& builtin,
   3127                    uint32_t lineOrBytecode, MDefinition** result = nullptr) {
   3128    MOZ_ASSERT_IF(!result, builtin.retType == MIRType::None);
   3129    if (inDeadCode()) {
   3130      if (result) {
   3131        *result = nullptr;
   3132      }
   3133      return true;
   3134    }
   3135 
   3136    CallSiteDesc desc(lineOrBytecode, rootCompiler_.inlinedCallerOffsetsIndex(),
   3137                      CallSiteKind::Symbolic);
   3138    if (builtin.failureMode != FailureMode::Infallible &&
   3139        !beginCatchableCall(callState)) {
   3140      return false;
   3141    }
   3142 
   3143    MInstruction* ins;
   3144    if (callState->isCatchable()) {
   3145      ins = MWasmCallCatchable::NewBuiltinInstanceMethodCall(
   3146          alloc(), desc, builtin.identity, builtin.failureMode,
   3147          builtin.failureTrap, callState->instanceArg, callState->regArgs,
   3148          StackArgAreaSizeUnaligned(builtin, callState->abiKind),
   3149          callState->tryNoteIndex, callState->fallthroughBlock,
   3150          callState->prePadBlock);
   3151    } else {
   3152      ins = MWasmCallUncatchable::NewBuiltinInstanceMethodCall(
   3153          alloc(), desc, builtin.identity, builtin.failureMode,
   3154          builtin.failureTrap, callState->instanceArg, callState->regArgs,
   3155          StackArgAreaSizeUnaligned(builtin, callState->abiKind));
   3156    }
   3157    if (!ins) {
   3158      return false;
   3159    }
   3160    curBlock_->add(ins);
   3161 
   3162    if (!finishCatchableCall(callState)) {
   3163      return false;
   3164    }
   3165 
   3166    if (!result) {
   3167      return true;
   3168    }
   3169    return collectBuiltinCallResult(builtin.retType, result, callState);
   3170  }
   3171 
   3172  /*********************************************** Instance call helpers ***/
   3173 
   3174  // Do not call this function directly -- it offers no protection against
   3175  // mis-counting of arguments.  Instead call one of
   3176  // ::emitInstanceCall{0,1,2,3,4,5,6}.
   3177  //
   3178  // Emits a call to the Instance function indicated by `callee`.  This is
   3179  // assumed to take an Instance pointer as its first argument.  The remaining
   3180  // args are taken from `args`, which is assumed to hold `numArgs` entries.
   3181  // If `result` is non-null, the MDefinition* holding the return value is
   3182  // written to `*result`.
   3183  [[nodiscard]]
   3184  bool emitInstanceCallN(uint32_t lineOrBytecode,
   3185                         const SymbolicAddressSignature& callee,
   3186                         MDefinition** args, size_t numArgs,
   3187                         MDefinition** result = nullptr) {
   3188    // Check that the first formal parameter is plausibly an Instance pointer.
   3189    MOZ_ASSERT(callee.numArgs > 0);
   3190    MOZ_ASSERT(callee.argTypes[0] == MIRType::Pointer);
   3191    // Check we agree on the number of args.
   3192    MOZ_ASSERT(numArgs + 1 /* the instance pointer */ == callee.numArgs);
   3193    // Check we agree on whether a value is returned.
   3194    MOZ_ASSERT((result == nullptr) == (callee.retType == MIRType::None));
   3195 
   3196    // If we are in dead code, it can happen that some of the `args` entries
   3197    // are nullptr, which will look like an OOM to the logic below.  So exit
   3198    // at this point.  `passInstanceCallArg`, `passCallArg`, `finishCall` and
   3199    // `instanceCall` all do nothing in dead code, so it's valid
   3200    // to exit here.
   3201    if (inDeadCode()) {
   3202      if (result) {
   3203        *result = nullptr;
   3204      }
   3205      return true;
   3206    }
   3207 
   3208    // Check all args for signs of OOMness before attempting to allocating any
   3209    // more memory.
   3210    for (size_t i = 0; i < numArgs; i++) {
   3211      if (!args[i]) {
   3212        if (result) {
   3213          *result = nullptr;
   3214        }
   3215        return false;
   3216      }
   3217    }
   3218 
   3219    // Finally, construct the call.
   3220    CallCompileState callState(ABIForBuiltin(callee.identity));
   3221    if (!passInstanceCallArg(callee.argTypes[0], &callState)) {
   3222      return false;
   3223    }
   3224    for (size_t i = 0; i < numArgs; i++) {
   3225      if (!passCallArg(args[i], callee.argTypes[i + 1], &callState)) {
   3226        return false;
   3227      }
   3228    }
   3229    if (!finishCallArgs(&callState)) {
   3230      return false;
   3231    }
   3232    return instanceCall(&callState, callee, lineOrBytecode, result);
   3233  }
   3234 
   3235  [[nodiscard]]
   3236  bool emitInstanceCall0(uint32_t lineOrBytecode,
   3237                         const SymbolicAddressSignature& callee,
   3238                         MDefinition** result = nullptr) {
   3239    MDefinition* args[0] = {};
   3240    return emitInstanceCallN(lineOrBytecode, callee, args, 0, result);
   3241  }
   3242  [[nodiscard]]
   3243  bool emitInstanceCall1(uint32_t lineOrBytecode,
   3244                         const SymbolicAddressSignature& callee,
   3245                         MDefinition* arg1, MDefinition** result = nullptr) {
   3246    MDefinition* args[1] = {arg1};
   3247    return emitInstanceCallN(lineOrBytecode, callee, args, 1, result);
   3248  }
   3249  [[nodiscard]]
   3250  bool emitInstanceCall2(uint32_t lineOrBytecode,
   3251                         const SymbolicAddressSignature& callee,
   3252                         MDefinition* arg1, MDefinition* arg2,
   3253                         MDefinition** result = nullptr) {
   3254    MDefinition* args[2] = {arg1, arg2};
   3255    return emitInstanceCallN(lineOrBytecode, callee, args, 2, result);
   3256  }
   3257  [[nodiscard]]
   3258  bool emitInstanceCall3(uint32_t lineOrBytecode,
   3259                         const SymbolicAddressSignature& callee,
   3260                         MDefinition* arg1, MDefinition* arg2,
   3261                         MDefinition* arg3, MDefinition** result = nullptr) {
   3262    MDefinition* args[3] = {arg1, arg2, arg3};
   3263    return emitInstanceCallN(lineOrBytecode, callee, args, 3, result);
   3264  }
   3265  [[nodiscard]]
   3266  bool emitInstanceCall4(uint32_t lineOrBytecode,
   3267                         const SymbolicAddressSignature& callee,
   3268                         MDefinition* arg1, MDefinition* arg2,
   3269                         MDefinition* arg3, MDefinition* arg4,
   3270                         MDefinition** result = nullptr) {
   3271    MDefinition* args[4] = {arg1, arg2, arg3, arg4};
   3272    return emitInstanceCallN(lineOrBytecode, callee, args, 4, result);
   3273  }
   3274  [[nodiscard]]
   3275  bool emitInstanceCall5(uint32_t lineOrBytecode,
   3276                         const SymbolicAddressSignature& callee,
   3277                         MDefinition* arg1, MDefinition* arg2,
   3278                         MDefinition* arg3, MDefinition* arg4,
   3279                         MDefinition* arg5, MDefinition** result = nullptr) {
   3280    MDefinition* args[5] = {arg1, arg2, arg3, arg4, arg5};
   3281    return emitInstanceCallN(lineOrBytecode, callee, args, 5, result);
   3282  }
   3283  [[nodiscard]]
   3284  bool emitInstanceCall6(uint32_t lineOrBytecode,
   3285                         const SymbolicAddressSignature& callee,
   3286                         MDefinition* arg1, MDefinition* arg2,
   3287                         MDefinition* arg3, MDefinition* arg4,
   3288                         MDefinition* arg5, MDefinition* arg6,
   3289                         MDefinition** result = nullptr) {
   3290    MDefinition* args[6] = {arg1, arg2, arg3, arg4, arg5, arg6};
   3291    return emitInstanceCallN(lineOrBytecode, callee, args, 6, result);
   3292  }
   3293 
   3294  [[nodiscard]] MDefinition* stackSwitch(MDefinition* suspender,
   3295                                         MDefinition* fn, MDefinition* data,
   3296                                         StackSwitchKind kind) {
   3297    MOZ_ASSERT(!inDeadCode());
   3298 
   3299    MInstruction* ins;
   3300    switch (kind) {
   3301      case StackSwitchKind::SwitchToMain:
   3302        ins = MWasmStackSwitchToMain::New(alloc(), instancePointer_, suspender,
   3303                                          fn, data);
   3304        break;
   3305      case StackSwitchKind::SwitchToSuspendable:
   3306        ins = MWasmStackSwitchToSuspendable::New(alloc(), instancePointer_,
   3307                                                 suspender, fn, data);
   3308        break;
   3309      case StackSwitchKind::ContinueOnSuspendable:
   3310        ins = MWasmStackContinueOnSuspendable::New(alloc(), instancePointer_,
   3311                                                   suspender, data);
   3312        break;
   3313    }
   3314    if (!ins) {
   3315      return nullptr;
   3316    }
   3317 
   3318    curBlock_->add(ins);
   3319 
   3320    return ins;
   3321  }
   3322 
   3323  [[nodiscard]]
   3324  bool callRef(const FuncType& funcType, MDefinition* ref,
   3325               uint32_t lineOrBytecode, const DefVector& args,
   3326               DefVector* results) {
   3327    MOZ_ASSERT(!inDeadCode());
   3328 
   3329    CallCompileState callState(ABIKind::Wasm);
   3330    CalleeDesc callee = CalleeDesc::wasmFuncRef();
   3331    CallSiteDesc desc(lineOrBytecode, rootCompiler_.inlinedCallerOffsetsIndex(),
   3332                      CallSiteKind::FuncRef);
   3333    ArgTypeVector argTypes(funcType);
   3334    ResultType resultType = ResultType::Vector(funcType.results());
   3335 
   3336    return emitCallArgs(funcType, args, &callState) &&
   3337           call(&callState, desc, callee, argTypes, ref) &&
   3338           collectWasmCallResults(resultType, &callState, results);
   3339  }
   3340 
   3341  [[nodiscard]]
   3342  bool returnCallRef(const FuncType& funcType, MDefinition* ref,
   3343                     uint32_t lineOrBytecode, const DefVector& args,
   3344                     DefVector* results) {
   3345    MOZ_ASSERT(!inDeadCode());
   3346    MOZ_ASSERT(!isInlined());
   3347 
   3348    CallCompileState callState(ABIKind::Wasm);
   3349    callState.returnCall = true;
   3350    CalleeDesc callee = CalleeDesc::wasmFuncRef();
   3351    CallSiteDesc desc(lineOrBytecode, CallSiteKind::FuncRef);
   3352    ArgTypeVector argTypes(funcType);
   3353 
   3354    if (!emitCallArgs(funcType, args, &callState)) {
   3355      return false;
   3356    }
   3357 
   3358    auto* ins = MWasmReturnCall::New(
   3359        alloc(), desc, callee, callState.regArgs,
   3360        StackArgAreaSizeUnaligned(argTypes, callState.abiKind), ref);
   3361    if (!ins) {
   3362      return false;
   3363    }
   3364    curBlock_->end(ins);
   3365    curBlock_ = nullptr;
   3366    return true;
   3367  }
   3368 
   3369  [[nodiscard]] MDefinition* stringCast(MDefinition* string) {
   3370    auto* ins = MWasmTrapIfAnyRefIsNotJSString::New(
   3371        alloc(), string, wasm::Trap::BadCast, trapSiteDesc());
   3372    if (!ins) {
   3373      return ins;
   3374    }
   3375    curBlock_->add(ins);
   3376    return ins;
   3377  }
   3378 
   3379  [[nodiscard]] MDefinition* stringTest(MDefinition* string) {
   3380    auto* ins = MWasmAnyRefIsJSString::New(alloc(), string);
   3381    if (!ins) {
   3382      return nullptr;
   3383    }
   3384    curBlock_->add(ins);
   3385    return ins;
   3386  }
   3387 
   3388  [[nodiscard]] MDefinition* stringLength(MDefinition* string) {
   3389    auto* ins = MWasmAnyRefJSStringLength::New(
   3390        alloc(), string, wasm::Trap::BadCast, trapSiteDesc());
   3391    if (!ins) {
   3392      return nullptr;
   3393    }
   3394    curBlock_->add(ins);
   3395    return ins;
   3396  }
   3397 
   3398  [[nodiscard]] bool dispatchInlineBuiltinModuleFunc(
   3399      const BuiltinModuleFunc& builtinModuleFunc, const DefVector& params) {
   3400    BuiltinInlineOp inlineOp = builtinModuleFunc.inlineOp();
   3401    MOZ_ASSERT(inlineOp != BuiltinInlineOp::None);
   3402    switch (inlineOp) {
   3403      case BuiltinInlineOp::StringCast: {
   3404        MOZ_ASSERT(params.length() == 1);
   3405        MDefinition* string = params[0];
   3406        MDefinition* cast = stringCast(string);
   3407        if (!cast) {
   3408          return false;
   3409        }
   3410        iter().setResult(string);
   3411        return true;
   3412      }
   3413      case BuiltinInlineOp::StringTest: {
   3414        MOZ_ASSERT(params.length() == 1);
   3415        MDefinition* string = params[0];
   3416        MDefinition* test = stringTest(string);
   3417        if (!test) {
   3418          return false;
   3419        }
   3420        iter().setResult(test);
   3421        return true;
   3422      }
   3423      case BuiltinInlineOp::StringLength: {
   3424        MOZ_ASSERT(params.length() == 1);
   3425        MDefinition* string = params[0];
   3426        MDefinition* length = stringLength(string);
   3427        if (!length) {
   3428          return false;
   3429        }
   3430        iter().setResult(length);
   3431        return true;
   3432      }
   3433      case BuiltinInlineOp::None:
   3434      case BuiltinInlineOp::Limit:
   3435        break;
   3436    }
   3437    MOZ_CRASH();
   3438  }
   3439 
   3440  [[nodiscard]] bool callBuiltinModuleFunc(
   3441      const BuiltinModuleFunc& builtinModuleFunc, const DefVector& params) {
   3442    MOZ_ASSERT(!inDeadCode());
   3443 
   3444    BuiltinInlineOp inlineOp = builtinModuleFunc.inlineOp();
   3445    if (inlineOp != BuiltinInlineOp::None) {
   3446      return dispatchInlineBuiltinModuleFunc(builtinModuleFunc, params);
   3447    }
   3448 
   3449    // It's almost possible to use FunctionCompiler::emitInstanceCallN here.
   3450    // Unfortunately not currently possible though, since ::emitInstanceCallN
   3451    // expects an array of arguments along with a size, and that's not what is
   3452    // available here.  It would be possible if we were prepared to copy
   3453    // `builtinModuleFunc->params` into a fixed-sized (16 element?) array, add
   3454    // `memoryBase`, and make the call.
   3455    const SymbolicAddressSignature& callee = *builtinModuleFunc.sig();
   3456 
   3457    CallCompileState callState(ABIForBuiltin(callee.identity));
   3458    if (!passInstanceCallArg(callee.argTypes[0], &callState) ||
   3459        !passCallArgs(params, builtinModuleFunc.funcType()->args(),
   3460                      &callState)) {
   3461      return false;
   3462    }
   3463 
   3464    if (builtinModuleFunc.usesMemory()) {
   3465      if (!passCallArg(memoryBase(0), MIRType::Pointer, &callState)) {
   3466        return false;
   3467      }
   3468    }
   3469 
   3470    if (!finishCallArgs(&callState)) {
   3471      return false;
   3472    }
   3473 
   3474    bool hasResult = !builtinModuleFunc.funcType()->results().empty();
   3475    MDefinition* result = nullptr;
   3476    MDefinition** resultOutParam = hasResult ? &result : nullptr;
   3477    if (!instanceCall(&callState, callee, readBytecodeOffset(),
   3478                      resultOutParam)) {
   3479      return false;
   3480    }
   3481 
   3482    if (hasResult) {
   3483      iter().setResult(result);
   3484    }
   3485    return true;
   3486  }
   3487 
   3488  /*********************************************** Control flow generation */
   3489 
   3490  inline bool inDeadCode() const { return curBlock_ == nullptr; }
   3491 
   3492  [[nodiscard]] bool returnValues(DefVector&& values) {
   3493    if (inDeadCode()) {
   3494      return true;
   3495    }
   3496 
   3497    // If we're inlined into another function, we must accumulate the returns
   3498    // so that they can be patched into the caller function.
   3499    if (isInlined()) {
   3500      MGoto* jump = MGoto::New(alloc());
   3501      if (!jump) {
   3502        return false;
   3503      }
   3504      curBlock_->end(jump);
   3505      curBlock_ = nullptr;
   3506      return pendingInlineReturns_.emplaceBack(
   3507          PendingInlineReturn(jump, std::move(values)));
   3508    }
   3509 
   3510    if (values.empty()) {
   3511      curBlock_->end(MWasmReturnVoid::New(alloc(), instancePointer_));
   3512    } else {
   3513      ResultType resultType = ResultType::Vector(funcType().results());
   3514      ABIResultIter iter(resultType);
   3515      // Switch to iterate in FIFO order instead of the default LIFO.
   3516      while (!iter.done()) {
   3517        iter.next();
   3518      }
   3519      iter.switchToPrev();
   3520      for (uint32_t i = 0; !iter.done(); iter.prev(), i++) {
   3521        if (!mirGen().ensureBallast()) {
   3522          return false;
   3523        }
   3524        const ABIResult& result = iter.cur();
   3525        if (result.onStack()) {
   3526          MOZ_ASSERT(iter.remaining() > 1);
   3527          auto* store = MWasmStoreStackResult::New(
   3528              alloc(), stackResultPointer_, result.stackOffset(), values[i]);
   3529          curBlock_->add(store);
   3530        } else {
   3531          MOZ_ASSERT(iter.remaining() == 1);
   3532          MOZ_ASSERT(i + 1 == values.length());
   3533          curBlock_->end(
   3534              MWasmReturn::New(alloc(), values[i], instancePointer_));
   3535        }
   3536      }
   3537    }
   3538    curBlock_ = nullptr;
   3539    return true;
   3540  }
   3541 
   3542  void unreachableTrap() {
   3543    if (inDeadCode()) {
   3544      return;
   3545    }
   3546 
   3547    auto* ins =
   3548        MWasmTrap::New(alloc(), wasm::Trap::Unreachable, trapSiteDesc());
   3549    curBlock_->end(ins);
   3550    curBlock_ = nullptr;
   3551  }
   3552 
   3553 private:
   3554  static uint32_t numPushed(MBasicBlock* block) {
   3555    return block->stackDepth() - block->info().firstStackSlot();
   3556  }
   3557 
   3558 public:
   3559  [[nodiscard]] bool pushDefs(const DefVector& defs) {
   3560    if (inDeadCode()) {
   3561      return true;
   3562    }
   3563    MOZ_ASSERT(numPushed(curBlock_) == 0);
   3564    if (!curBlock_->ensureHasSlots(defs.length())) {
   3565      return false;
   3566    }
   3567    for (MDefinition* def : defs) {
   3568      MOZ_ASSERT(def->type() != MIRType::None);
   3569      curBlock_->push(def);
   3570    }
   3571    return true;
   3572  }
   3573 
   3574  [[nodiscard]] bool popPushedDefs(DefVector* defs) {
   3575    size_t n = numPushed(curBlock_);
   3576    if (!defs->resizeUninitialized(n)) {
   3577      return false;
   3578    }
   3579    for (; n > 0; n--) {
   3580      MDefinition* def = curBlock_->pop();
   3581      MOZ_ASSERT(def->type() != MIRType::Value);
   3582      (*defs)[n - 1] = def;
   3583    }
   3584    return true;
   3585  }
   3586 
   3587 private:
   3588  [[nodiscard]] bool addJoinPredecessor(const DefVector& defs,
   3589                                        MBasicBlock** joinPred) {
   3590    *joinPred = curBlock_;
   3591    if (inDeadCode()) {
   3592      return true;
   3593    }
   3594    return pushDefs(defs);
   3595  }
   3596 
   3597 public:
   3598  [[nodiscard]] bool branchAndStartThen(MDefinition* cond,
   3599                                        MBasicBlock** elseBlock) {
   3600    if (inDeadCode()) {
   3601      *elseBlock = nullptr;
   3602    } else {
   3603      MBasicBlock* thenBlock;
   3604      if (!newBlock(curBlock_, &thenBlock)) {
   3605        return false;
   3606      }
   3607      if (!newBlock(curBlock_, elseBlock)) {
   3608        return false;
   3609      }
   3610 
   3611      curBlock_->end(MTest::New(alloc(), cond, thenBlock, *elseBlock));
   3612 
   3613      curBlock_ = thenBlock;
   3614      mirGraph().moveBlockToEnd(curBlock_);
   3615    }
   3616 
   3617    return startBlock();
   3618  }
   3619 
   3620  [[nodiscard]] bool switchToElse(MBasicBlock* elseBlock,
   3621                                  MBasicBlock** thenJoinPred) {
   3622    DefVector values;
   3623    if (!finishBlock(&values)) {
   3624      return false;
   3625    }
   3626 
   3627    if (!elseBlock) {
   3628      *thenJoinPred = nullptr;
   3629    } else {
   3630      if (!addJoinPredecessor(values, thenJoinPred)) {
   3631        return false;
   3632      }
   3633 
   3634      curBlock_ = elseBlock;
   3635      mirGraph().moveBlockToEnd(curBlock_);
   3636    }
   3637 
   3638    return startBlock();
   3639  }
   3640 
   3641  [[nodiscard]] bool joinIfElse(MBasicBlock* thenJoinPred, DefVector* defs) {
   3642    DefVector values;
   3643    if (!finishBlock(&values)) {
   3644      return false;
   3645    }
   3646 
   3647    if (!thenJoinPred && inDeadCode()) {
   3648      return true;
   3649    }
   3650 
   3651    MBasicBlock* elseJoinPred;
   3652    if (!addJoinPredecessor(values, &elseJoinPred)) {
   3653      return false;
   3654    }
   3655 
   3656    mozilla::Array<MBasicBlock*, 2> blocks;
   3657    size_t numJoinPreds = 0;
   3658    if (thenJoinPred) {
   3659      blocks[numJoinPreds++] = thenJoinPred;
   3660    }
   3661    if (elseJoinPred) {
   3662      blocks[numJoinPreds++] = elseJoinPred;
   3663    }
   3664 
   3665    if (numJoinPreds == 0) {
   3666      return true;
   3667    }
   3668 
   3669    MBasicBlock* join;
   3670    if (!goToNewBlock(blocks[0], &join)) {
   3671      return false;
   3672    }
   3673    for (size_t i = 1; i < numJoinPreds; ++i) {
   3674      if (!goToExistingBlock(blocks[i], join)) {
   3675        return false;
   3676      }
   3677    }
   3678 
   3679    curBlock_ = join;
   3680    return popPushedDefs(defs);
   3681  }
   3682 
   3683  [[nodiscard]] bool startBlock() {
   3684    MOZ_ASSERT_IF(pendingBlockDepth_ < pendingBlocks_.length(),
   3685                  pendingBlocks_[pendingBlockDepth_].patches.empty());
   3686    pendingBlockDepth_++;
   3687    return true;
   3688  }
   3689 
   3690  [[nodiscard]] bool finishBlock(DefVector* defs) {
   3691    MOZ_ASSERT(pendingBlockDepth_);
   3692    uint32_t topLabel = --pendingBlockDepth_;
   3693    return bindBranches(topLabel, defs);
   3694  }
   3695 
   3696  [[nodiscard]] bool startLoop(MBasicBlock** loopHeader, size_t paramCount) {
   3697    *loopHeader = nullptr;
   3698 
   3699    pendingBlockDepth_++;
   3700    rootCompiler_.startLoop();
   3701 
   3702    if (inDeadCode()) {
   3703      return true;
   3704    }
   3705 
   3706    // Create the loop header.
   3707    MOZ_ASSERT(curBlock_->loopDepth() == rootCompiler_.loopDepth() - 1);
   3708    *loopHeader = MBasicBlock::New(mirGraph(), info(), curBlock_,
   3709                                   MBasicBlock::PENDING_LOOP_HEADER);
   3710    if (!*loopHeader) {
   3711      return false;
   3712    }
   3713 
   3714    (*loopHeader)->setLoopDepth(rootCompiler_.loopDepth());
   3715    mirGraph().addBlock(*loopHeader);
   3716    curBlock_->end(MGoto::New(alloc(), *loopHeader));
   3717 
   3718    DefVector loopParams;
   3719    if (!iter().getResults(paramCount, &loopParams)) {
   3720      return false;
   3721    }
   3722 
   3723    // Eagerly create a phi for all loop params. setLoopBackedge will remove
   3724    // any that were not necessary.
   3725    for (size_t i = 0; i < paramCount; i++) {
   3726      MPhi* phi = MPhi::New(alloc(), loopParams[i]->type());
   3727      if (!phi) {
   3728        return false;
   3729      }
   3730      if (!phi->reserveLength(2)) {
   3731        return false;
   3732      }
   3733      (*loopHeader)->addPhi(phi);
   3734      phi->addInput(loopParams[i]);
   3735      loopParams[i] = phi;
   3736    }
   3737    iter().setResults(paramCount, loopParams);
   3738 
   3739    MBasicBlock* body;
   3740    if (!goToNewBlock(*loopHeader, &body)) {
   3741      return false;
   3742    }
   3743    curBlock_ = body;
   3744    return true;
   3745  }
   3746 
   3747 private:
   3748  void fixupRedundantPhis(MBasicBlock* b) {
   3749    for (size_t i = 0, depth = b->stackDepth(); i < depth; i++) {
   3750      MDefinition* def = b->getSlot(i);
   3751      if (def->isUnused()) {
   3752        b->setSlot(i, def->toPhi()->getOperand(0));
   3753      }
   3754    }
   3755  }
   3756 
   3757  [[nodiscard]] bool setLoopBackedge(MBasicBlock* loopEntry,
   3758                                     MBasicBlock* loopBody,
   3759                                     MBasicBlock* backedge, size_t paramCount) {
   3760    if (!loopEntry->setBackedgeWasm(backedge, paramCount)) {
   3761      return false;
   3762    }
   3763 
   3764    // Entering a loop will eagerly create a phi node for all locals and loop
   3765    // params. Now that we've closed the loop we can check which phi nodes
   3766    // were actually needed by checking if the SSA definition flowing into the
   3767    // loop header (operand 0) is different than the SSA definition coming from
   3768    // the loop backedge (operand 1). If they are the same definition, the phi
   3769    // is redundant and can be removed.
   3770    //
   3771    // To do this we mark all redundant phis as 'unused', then remove the phi's
   3772    // from places in ourself the phis may have flowed into, then replace all
   3773    // uses of the phi's in the MIR graph with the original SSA definition.
   3774    for (MPhiIterator phi = loopEntry->phisBegin(); phi != loopEntry->phisEnd();
   3775         phi++) {
   3776      MOZ_ASSERT(phi->numOperands() == 2);
   3777      if (phi->getOperand(0) == phi->getOperand(1)) {
   3778        phi->setUnused();
   3779      }
   3780    }
   3781 
   3782    // Fix up phis stored in the slots Vector of pending blocks.
   3783    for (PendingBlockTarget& pendingBlockTarget : pendingBlocks_) {
   3784      for (ControlFlowPatch& p : pendingBlockTarget.patches) {
   3785        MBasicBlock* block = p.ins->block();
   3786        if (block->loopDepth() >= loopEntry->loopDepth()) {
   3787          fixupRedundantPhis(block);
   3788        }
   3789      }
   3790    }
   3791 
   3792    // The loop body, if any, might be referencing recycled phis too.
   3793    if (loopBody) {
   3794      fixupRedundantPhis(loopBody);
   3795    }
   3796 
   3797    // Pending jumps to an enclosing try-catch may reference the recycled phis.
   3798    // We have to search above all enclosing try blocks, as a delegate may move
   3799    // patches around.
   3800    for (uint32_t depth = 0; depth < iter().controlStackDepth(); depth++) {
   3801      LabelKind kind = iter().controlKind(depth);
   3802      if (kind != LabelKind::Try && kind != LabelKind::TryTable &&
   3803          kind != LabelKind::Body) {
   3804        continue;
   3805      }
   3806      Control& control = iter().controlItem(depth);
   3807      if (!control.tryControl) {
   3808        continue;
   3809      }
   3810      for (MControlInstruction* patch : control.tryControl->landingPadPatches) {
   3811        MBasicBlock* block = patch->block();
   3812        if (block->loopDepth() >= loopEntry->loopDepth()) {
   3813          fixupRedundantPhis(block);
   3814        }
   3815      }
   3816    }
   3817    for (MControlInstruction* patch : bodyRethrowPadPatches_) {
   3818      MBasicBlock* block = patch->block();
   3819      if (block->loopDepth() >= loopEntry->loopDepth()) {
   3820        fixupRedundantPhis(block);
   3821      }
   3822    }
   3823 
   3824    // If we're inlined into another function we are accumulating return values
   3825    // in a vector, search through the results to see if any refer to a
   3826    // redundant phi.
   3827    for (PendingInlineReturn& pendingReturn : pendingInlineReturns_) {
   3828      for (uint32_t resultIndex = 0;
   3829           resultIndex < pendingReturn.results.length(); resultIndex++) {
   3830        MDefinition** pendingResult = &pendingReturn.results[resultIndex];
   3831        if ((*pendingResult)->isUnused()) {
   3832          *pendingResult = (*pendingResult)->toPhi()->getOperand(0);
   3833        }
   3834      }
   3835    }
   3836 
   3837    // Discard redundant phis and add to the free list.
   3838    for (MPhiIterator phi = loopEntry->phisBegin();
   3839         phi != loopEntry->phisEnd();) {
   3840      MPhi* entryDef = *phi++;
   3841      if (!entryDef->isUnused()) {
   3842        continue;
   3843      }
   3844 
   3845      entryDef->justReplaceAllUsesWith(entryDef->getOperand(0));
   3846      loopEntry->discardPhi(entryDef);
   3847      mirGraph().addPhiToFreeList(entryDef);
   3848    }
   3849 
   3850    return true;
   3851  }
   3852 
   3853 public:
   3854  [[nodiscard]] bool closeLoop(MBasicBlock* loopHeader,
   3855                               DefVector* loopResults) {
   3856    MOZ_ASSERT(pendingBlockDepth_ >= 1);
   3857    MOZ_ASSERT(rootCompiler_.loopDepth());
   3858 
   3859    uint32_t headerLabel = pendingBlockDepth_ - 1;
   3860 
   3861    if (!loopHeader) {
   3862      MOZ_ASSERT(inDeadCode());
   3863      MOZ_ASSERT(headerLabel >= pendingBlocks_.length() ||
   3864                 pendingBlocks_[headerLabel].patches.empty());
   3865      pendingBlockDepth_--;
   3866      rootCompiler_.closeLoop();
   3867      return true;
   3868    }
   3869 
   3870    // Op::Loop doesn't have an implicit backedge so temporarily set
   3871    // aside the end of the loop body to bind backedges.
   3872    MBasicBlock* loopBody = curBlock_;
   3873    curBlock_ = nullptr;
   3874 
   3875    // As explained in bug 1253544, Ion apparently has an invariant that
   3876    // there is only one backedge to loop headers. To handle wasm's ability
   3877    // to have multiple backedges to the same loop header, we bind all those
   3878    // branches as forward jumps to a single backward jump. This is
   3879    // unfortunate but the optimizer is able to fold these into single jumps
   3880    // to backedges.
   3881    DefVector backedgeValues;
   3882    if (!bindBranches(headerLabel, &backedgeValues)) {
   3883      return false;
   3884    }
   3885 
   3886    MOZ_ASSERT(loopHeader->loopDepth() == rootCompiler_.loopDepth());
   3887 
   3888    if (curBlock_) {
   3889      // We're on the loop backedge block, created by bindBranches.
   3890      for (size_t i = 0, n = numPushed(curBlock_); i != n; i++) {
   3891        curBlock_->pop();
   3892      }
   3893 
   3894      if (!pushDefs(backedgeValues)) {
   3895        return false;
   3896      }
   3897 
   3898      MOZ_ASSERT(curBlock_->loopDepth() == rootCompiler_.loopDepth());
   3899      curBlock_->end(MGoto::New(alloc(), loopHeader));
   3900      if (!setLoopBackedge(loopHeader, loopBody, curBlock_,
   3901                           backedgeValues.length())) {
   3902        return false;
   3903      }
   3904    }
   3905 
   3906    curBlock_ = loopBody;
   3907 
   3908    rootCompiler_.closeLoop();
   3909 
   3910    // If the loop depth still at the inner loop body, correct it.
   3911    if (curBlock_ && curBlock_->loopDepth() != rootCompiler_.loopDepth()) {
   3912      MBasicBlock* out;
   3913      if (!goToNewBlock(curBlock_, &out)) {
   3914        return false;
   3915      }
   3916      curBlock_ = out;
   3917    }
   3918 
   3919    pendingBlockDepth_ -= 1;
   3920    return inDeadCode() || popPushedDefs(loopResults);
   3921  }
   3922 
   3923  [[nodiscard]] bool addControlFlowPatch(
   3924      MControlInstruction* ins, uint32_t relative, uint32_t index,
   3925      BranchHint branchHint = BranchHint::Invalid) {
   3926    MOZ_ASSERT(relative < pendingBlockDepth_);
   3927    uint32_t absolute = pendingBlockDepth_ - 1 - relative;
   3928 
   3929    if (absolute >= pendingBlocks_.length() &&
   3930        !pendingBlocks_.resize(absolute + 1)) {
   3931      return false;
   3932    }
   3933 
   3934    pendingBlocks_[absolute].hint = branchHint;
   3935    return pendingBlocks_[absolute].patches.append(
   3936        ControlFlowPatch(ins, index));
   3937  }
   3938 
   3939  [[nodiscard]] bool br(uint32_t relativeDepth, const DefVector& values) {
   3940    if (inDeadCode()) {
   3941      return true;
   3942    }
   3943 
   3944    MGoto* jump = MGoto::New(alloc());
   3945    if (!addControlFlowPatch(jump, relativeDepth, MGoto::TargetIndex)) {
   3946      return false;
   3947    }
   3948 
   3949    if (!pushDefs(values)) {
   3950      return false;
   3951    }
   3952 
   3953    curBlock_->end(jump);
   3954    curBlock_ = nullptr;
   3955    return true;
   3956  }
   3957 
   3958  [[nodiscard]] bool brIf(uint32_t relativeDepth, const DefVector& values,
   3959                          MDefinition* condition, BranchHint branchHint) {
   3960    if (inDeadCode()) {
   3961      return true;
   3962    }
   3963 
   3964    MBasicBlock* joinBlock = nullptr;
   3965    if (!newBlock(curBlock_, &joinBlock)) {
   3966      return false;
   3967    }
   3968 
   3969    MTest* test = MTest::New(alloc(), condition, nullptr, joinBlock);
   3970    if (!addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex,
   3971                             branchHint)) {
   3972      return false;
   3973    }
   3974 
   3975    if (!pushDefs(values)) {
   3976      return false;
   3977    }
   3978 
   3979    curBlock_->end(test);
   3980    curBlock_ = joinBlock;
   3981 
   3982    return true;
   3983  }
   3984 
   3985  [[nodiscard]] bool brTable(MDefinition* operand, uint32_t defaultDepth,
   3986                             const Uint32Vector& depths,
   3987                             const DefVector& values) {
   3988    if (inDeadCode()) {
   3989      return true;
   3990    }
   3991 
   3992    size_t numCases = depths.length();
   3993    MOZ_ASSERT(numCases <= INT32_MAX);
   3994    MOZ_ASSERT(numCases);
   3995 
   3996    MTableSwitch* table =
   3997        MTableSwitch::New(alloc(), operand, 0, int32_t(numCases - 1));
   3998 
   3999    size_t defaultIndex;
   4000    if (!table->addDefault(nullptr, &defaultIndex)) {
   4001      return false;
   4002    }
   4003    if (!addControlFlowPatch(table, defaultDepth, defaultIndex)) {
   4004      return false;
   4005    }
   4006 
   4007    using IndexToCaseMap =
   4008        HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>;
   4009 
   4010    IndexToCaseMap indexToCase;
   4011    if (!indexToCase.put(defaultDepth, defaultIndex)) {
   4012      return false;
   4013    }
   4014 
   4015    for (size_t i = 0; i < numCases; i++) {
   4016      if (!mirGen().ensureBallast()) {
   4017        return false;
   4018      }
   4019 
   4020      uint32_t depth = depths[i];
   4021 
   4022      size_t caseIndex;
   4023      IndexToCaseMap::AddPtr p = indexToCase.lookupForAdd(depth);
   4024      if (!p) {
   4025        if (!table->addSuccessor(nullptr, &caseIndex)) {
   4026          return false;
   4027        }
   4028        if (!addControlFlowPatch(table, depth, caseIndex)) {
   4029          return false;
   4030        }
   4031        if (!indexToCase.add(p, depth, caseIndex)) {
   4032          return false;
   4033        }
   4034      } else {
   4035        caseIndex = p->value();
   4036      }
   4037 
   4038      if (!table->addCase(caseIndex)) {
   4039        return false;
   4040      }
   4041    }
   4042 
   4043    if (!pushDefs(values)) {
   4044      return false;
   4045    }
   4046 
   4047    curBlock_->end(table);
   4048    curBlock_ = nullptr;
   4049 
   4050    return true;
   4051  }
   4052 
   4053  /********************************************************** Exceptions ***/
   4054 
   4055  bool inTryBlockFrom(uint32_t fromRelativeDepth,
   4056                      uint32_t* tryRelativeDepth) const {
   4057    uint32_t relativeDepth;
   4058    if (iter_.controlFindInnermostFrom(
   4059            [](LabelKind kind, const Control& control) {
   4060              return control.tryControl != nullptr &&
   4061                     control.tryControl->inBody;
   4062            },
   4063            fromRelativeDepth, &relativeDepth)) {
   4064      *tryRelativeDepth = relativeDepth;
   4065      return true;
   4066    }
   4067 
   4068    if (callerCompiler_ && callerCompiler_->inTryCode()) {
   4069      *tryRelativeDepth = iter_.controlStackDepth() - 1;
   4070      return true;
   4071    }
   4072 
   4073    return false;
   4074  }
   4075 
   4076  bool inTryBlockFrom(uint32_t fromRelativeDepth,
   4077                      ControlInstructionVector** landingPadPatches) {
   4078    uint32_t tryRelativeDepth;
   4079    if (!inTryBlockFrom(fromRelativeDepth, &tryRelativeDepth)) {
   4080      return false;
   4081    }
   4082 
   4083    if (tryRelativeDepth == iter().controlStackDepth() - 1) {
   4084      *landingPadPatches = &bodyRethrowPadPatches_;
   4085    } else {
   4086      *landingPadPatches =
   4087          &iter().controlItem(tryRelativeDepth).tryControl->landingPadPatches;
   4088    }
   4089    return true;
   4090  }
   4091 
   4092  bool inTryBlock(ControlInstructionVector** landingPadPatches) {
   4093    return inTryBlockFrom(0, landingPadPatches);
   4094  }
   4095 
   4096  bool inTryCode() const {
   4097    uint32_t tryRelativeDepth;
   4098    return inTryBlockFrom(0, &tryRelativeDepth);
   4099  }
   4100 
   4101  MDefinition* loadTag(uint32_t tagIndex) {
   4102    MWasmLoadInstanceDataField* tag = MWasmLoadInstanceDataField::New(
   4103        alloc(), MIRType::WasmAnyRef,
   4104        codeMeta().offsetOfTagInstanceData(tagIndex), true, instancePointer_);
   4105    curBlock_->add(tag);
   4106    return tag;
   4107  }
   4108 
   4109  void loadPendingExceptionState(MDefinition** pendingException,
   4110                                 MDefinition** pendingExceptionTag) {
   4111    auto* exception = MWasmLoadInstance::New(
   4112        alloc(), instancePointer_, wasm::Instance::offsetOfPendingException(),
   4113        MIRType::WasmAnyRef, AliasSet::Load(AliasSet::WasmPendingException));
   4114    curBlock_->add(exception);
   4115    *pendingException = exception;
   4116 
   4117    auto* tag = MWasmLoadInstance::New(
   4118        alloc(), instancePointer_,
   4119        wasm::Instance::offsetOfPendingExceptionTag(), MIRType::WasmAnyRef,
   4120        AliasSet::Load(AliasSet::WasmPendingException));
   4121    curBlock_->add(tag);
   4122    *pendingExceptionTag = tag;
   4123  }
   4124 
   4125  [[nodiscard]] bool setPendingExceptionState(MDefinition* exception,
   4126                                              MDefinition* tag) {
   4127    // Set the pending exception object
   4128    auto* exceptionAddr = MWasmDerivedPointer::New(
   4129        alloc(), instancePointer_, Instance::offsetOfPendingException());
   4130    curBlock_->add(exceptionAddr);
   4131    auto* setException = MWasmStoreRef::New(
   4132        alloc(), instancePointer_, exceptionAddr, /*valueOffset=*/0, exception,
   4133        AliasSet::WasmPendingException, WasmPreBarrierKind::Normal);
   4134    curBlock_->add(setException);
   4135    if (!postBarrierEdgePrecise(/*lineOrBytecode=*/0, exceptionAddr,
   4136                                exception)) {
   4137      return false;
   4138    }
   4139 
   4140    // Set the pending exception tag object
   4141    auto* exceptionTagAddr = MWasmDerivedPointer::New(
   4142        alloc(), instancePointer_, Instance::offsetOfPendingExceptionTag());
   4143    curBlock_->add(exceptionTagAddr);
   4144    auto* setExceptionTag = MWasmStoreRef::New(
   4145        alloc(), instancePointer_, exceptionTagAddr, /*valueOffset=*/0, tag,
   4146        AliasSet::WasmPendingException, WasmPreBarrierKind::Normal);
   4147    curBlock_->add(setExceptionTag);
   4148    return postBarrierEdgePrecise(/*lineOrBytecode=*/0, exceptionTagAddr, tag);
   4149  }
   4150 
   4151  [[nodiscard]] bool endWithPadPatch(
   4152      ControlInstructionVector* tryLandingPadPatches) {
   4153    MGoto* jumpToLandingPad = MGoto::New(alloc());
   4154    curBlock_->end(jumpToLandingPad);
   4155    return tryLandingPadPatches->emplaceBack(jumpToLandingPad);
   4156  }
   4157 
   4158  [[nodiscard]] bool delegatePadPatches(const ControlInstructionVector& patches,
   4159                                        uint32_t relativeDepth) {
   4160    if (patches.empty()) {
   4161      return true;
   4162    }
   4163 
   4164    // Find where we are delegating the pad patches to.
   4165    ControlInstructionVector* targetPatches;
   4166    if (!inTryBlockFrom(relativeDepth, &targetPatches)) {
   4167      MOZ_ASSERT(relativeDepth <= pendingBlockDepth_ - 1);
   4168      targetPatches = &bodyRethrowPadPatches_;
   4169    }
   4170 
   4171    // Append the delegate's pad patches to the target's.
   4172    for (MControlInstruction* ins : patches) {
   4173      if (!targetPatches->emplaceBack(ins)) {
   4174        return false;
   4175      }
   4176    }
   4177    return true;
   4178  }
   4179 
   4180  [[nodiscard]]
   4181  bool beginCatchableCall(CallCompileState* callState) {
   4182    if (!inTryBlock(&callState->tryLandingPadPatches)) {
   4183      MOZ_ASSERT(!callState->isCatchable());
   4184      return true;
   4185    }
   4186    MOZ_ASSERT(callState->isCatchable());
   4187 
   4188    // Allocate a try note
   4189    if (!rootCompiler_.addTryNote(&callState->tryNoteIndex)) {
   4190      return false;
   4191    }
   4192 
   4193    // Allocate blocks for fallthrough and exceptions
   4194    return newBlock(curBlock_, &callState->fallthroughBlock) &&
   4195           newBlock(curBlock_, &callState->prePadBlock);
   4196  }
   4197 
   4198  [[nodiscard]]
   4199  bool finishCatchableCall(CallCompileState* callState) {
   4200    if (!callState->tryLandingPadPatches) {
   4201      return true;
   4202    }
   4203 
   4204    // Switch to the prePadBlock
   4205    MBasicBlock* callBlock = curBlock_;
   4206    curBlock_ = callState->prePadBlock;
   4207 
   4208    // Mark this as the landing pad for the call
   4209    curBlock_->add(MWasmCallLandingPrePad::New(alloc(), callBlock,
   4210                                               callState->tryNoteIndex));
   4211 
   4212    // End with a pending jump to the landing pad
   4213    if (!endWithPadPatch(callState->tryLandingPadPatches)) {
   4214      return false;
   4215    }
   4216 
   4217    // Compilation continues in the fallthroughBlock.
   4218    curBlock_ = callState->fallthroughBlock;
   4219    return true;
   4220  }
   4221 
   4222  // Create a landing pad for a try block. This is also used for the implicit
   4223  // rethrow landing pad used for delegate instructions that target the
   4224  // outermost label.
   4225  [[nodiscard]]
   4226  bool createTryLandingPad(ControlInstructionVector& landingPadPatches,
   4227                           MBasicBlock** landingPad) {
   4228    MOZ_ASSERT(!landingPadPatches.empty());
   4229 
   4230    // Bind the branches from exception throwing code to a new landing pad
   4231    // block. This is done similarly to what is done in bindBranches.
   4232    MControlInstruction* ins = landingPadPatches[0];
   4233    MBasicBlock* pred = ins->block();
   4234    if (!newBlock(pred, landingPad)) {
   4235      return false;
   4236    }
   4237    ins->replaceSuccessor(MGoto::TargetIndex, *landingPad);
   4238    for (size_t i = 1; i < landingPadPatches.length(); i++) {
   4239      ins = landingPadPatches[i];
   4240      pred = ins->block();
   4241      if (!(*landingPad)->addPredecessor(alloc(), pred)) {
   4242        return false;
   4243      }
   4244      ins->replaceSuccessor(MGoto::TargetIndex, *landingPad);
   4245    }
   4246 
   4247    // Clear the now bound pad patches.
   4248    landingPadPatches.clear();
   4249    return true;
   4250  }
   4251 
   4252  [[nodiscard]]
   4253  bool createTryTableLandingPad(TryControl* tryControl) {
   4254    // If there were no patches, then there were no throwing instructions and
   4255    // we don't need to do anything.
   4256    if (tryControl->landingPadPatches.empty()) {
   4257      return true;
   4258    }
   4259 
   4260    // Create the landing pad block and bind all the throwing instructions
   4261    MBasicBlock* landingPad;
   4262    if (!createTryLandingPad(tryControl->landingPadPatches, &landingPad)) {
   4263      return false;
   4264    }
   4265 
   4266    // Get the pending exception from the instance
   4267    MDefinition* pendingException;
   4268    MDefinition* pendingExceptionTag;
   4269    if (!consumePendingException(&landingPad, &pendingException,
   4270                                 &pendingExceptionTag)) {
   4271      return false;
   4272    }
   4273 
   4274    MBasicBlock* originalBlock = curBlock_;
   4275    curBlock_ = landingPad;
   4276 
   4277    bool hadCatchAll = false;
   4278    for (const TryTableCatch& tryTableCatch : tryControl->catches) {
   4279      // Handle a catch_all by jumping to the target block
   4280      if (tryTableCatch.tagIndex == CatchAllIndex) {
   4281        // Capture the exnref value if we need to
   4282        DefVector values;
   4283        if (tryTableCatch.captureExnRef && !values.append(pendingException)) {
   4284          return false;
   4285        }
   4286 
   4287        // Branch to the catch_all code
   4288        if (!br(tryTableCatch.labelRelativeDepth, values)) {
   4289          return false;
   4290        }
   4291 
   4292        // Break from the loop and skip the implicit rethrow that's needed
   4293        // if we didn't have a catch_all
   4294        hadCatchAll = true;
   4295        break;
   4296      }
   4297 
   4298      // Handle a tagged catch by doing a compare and branch on the tag index,
   4299      // jumping to a catch block if they match, or else to a fallthrough block
   4300      // to continue the landing pad.
   4301      MBasicBlock* catchBlock = nullptr;
   4302      MBasicBlock* fallthroughBlock = nullptr;
   4303      if (!newBlock(curBlock_, &catchBlock) ||
   4304          !newBlock(curBlock_, &fallthroughBlock)) {
   4305        return false;
   4306      }
   4307 
   4308      // Branch to the catch block if the exception's tag matches this catch
   4309      // block's tag.
   4310      MDefinition* catchTag = loadTag(tryTableCatch.tagIndex);
   4311      MDefinition* matchesCatchTag =
   4312          compare(pendingExceptionTag, catchTag, JSOp::Eq,
   4313                  MCompare::Compare_WasmAnyRef);
   4314      curBlock_->end(
   4315          MTest::New(alloc(), matchesCatchTag, catchBlock, fallthroughBlock));
   4316 
   4317      // Set up the catch block by extracting the values from the exception
   4318      // object.
   4319      curBlock_ = catchBlock;
   4320 
   4321      // Extract the exception values for the catch block
   4322      DefVector values;
   4323      if (!loadExceptionValues(pendingException, tryTableCatch.tagIndex,
   4324                               &values)) {
   4325        return false;
   4326      }
   4327      if (tryTableCatch.captureExnRef && !values.append(pendingException)) {
   4328        return false;
   4329      }
   4330 
   4331      if (!br(tryTableCatch.labelRelativeDepth, values)) {
   4332        return false;
   4333      }
   4334 
   4335      curBlock_ = fallthroughBlock;
   4336    }
   4337 
   4338    // If there was no catch_all, we must rethrow this exception.
   4339    if (!hadCatchAll) {
   4340      if (!throwFrom(pendingException, pendingExceptionTag)) {
   4341        return false;
   4342      }
   4343    }
   4344 
   4345    curBlock_ = originalBlock;
   4346    return true;
   4347  }
   4348 
   4349  // Consume the pending exception state from instance. This will clear out the
   4350  // previous value.
   4351  [[nodiscard]]
   4352  bool consumePendingException(MBasicBlock** landingPad,
   4353                               MDefinition** pendingException,
   4354                               MDefinition** pendingExceptionTag) {
   4355    MBasicBlock* prevBlock = curBlock_;
   4356    curBlock_ = *landingPad;
   4357 
   4358    // Load the pending exception and tag
   4359    loadPendingExceptionState(pendingException, pendingExceptionTag);
   4360 
   4361    // Clear the pending exception and tag
   4362    auto* null = constantNullRef(MaybeRefType());
   4363    if (!setPendingExceptionState(null, null)) {
   4364      return false;
   4365    }
   4366 
   4367    // The landing pad may have changed from loading and clearing the pending
   4368    // exception state.
   4369    *landingPad = curBlock_;
   4370 
   4371    curBlock_ = prevBlock;
   4372    return true;
   4373  }
   4374 
   4375  [[nodiscard]] bool startTry() {
   4376    Control& control = iter().controlItem();
   4377    control.block = curBlock_;
   4378    control.tryControl = rootCompiler_.newTryControl();
   4379    if (!control.tryControl) {
   4380      return false;
   4381    }
   4382    control.tryControl->inBody = true;
   4383    return startBlock();
   4384  }
   4385 
   4386  [[nodiscard]] bool startTryTable(TryTableCatchVector&& catches) {
   4387    Control& control = iter().controlItem();
   4388    control.block = curBlock_;
   4389    control.tryControl = rootCompiler_.newTryControl();
   4390    if (!control.tryControl) {
   4391      return false;
   4392    }
   4393    control.tryControl->inBody = true;
   4394    control.tryControl->catches = std::move(catches);
   4395    return startBlock();
   4396  }
   4397 
   4398  [[nodiscard]] bool joinTryOrCatchBlock(Control& control) {
   4399    // If the try or catch block ended with dead code, there is no need to
   4400    // do any control flow join.
   4401    if (inDeadCode()) {
   4402      return true;
   4403    }
   4404 
   4405    // This is a split path which we'll need to join later, using a control
   4406    // flow patch.
   4407    MOZ_ASSERT(!curBlock_->hasLastIns());
   4408    MGoto* jump = MGoto::New(alloc());
   4409    if (!addControlFlowPatch(jump, 0, MGoto::TargetIndex)) {
   4410      return false;
   4411    }
   4412 
   4413    // Finish the current block with the control flow patch instruction.
   4414    curBlock_->end(jump);
   4415    return true;
   4416  }
   4417 
   4418  // Finish the previous block (either a try or catch block) and then setup a
   4419  // new catch block.
   4420  [[nodiscard]] bool switchToCatch(Control& control, LabelKind fromKind,
   4421                                   uint32_t tagIndex) {
   4422    // Mark this control node as being no longer in the body of the try
   4423    control.tryControl->inBody = false;
   4424 
   4425    // If there is no control block, then either:
   4426    //   - the entry of the try block is dead code, or
   4427    //   - there is no landing pad for the try-catch.
   4428    // In either case, any catch will be dead code.
   4429    if (!control.block) {
   4430      MOZ_ASSERT(inDeadCode());
   4431      return true;
   4432    }
   4433 
   4434    // Join the previous try or catch block with a patch to the future join of
   4435    // the whole try-catch block.
   4436    if (!joinTryOrCatchBlock(control)) {
   4437      return false;
   4438    }
   4439 
   4440    // If we are switching from the try block, create the landing pad. This is
   4441    // guaranteed to happen once and only once before processing catch blocks.
   4442    if (fromKind == LabelKind::Try) {
   4443      if (!control.tryControl->landingPadPatches.empty()) {
   4444        // Create the landing pad block and bind all the throwing instructions
   4445        MBasicBlock* padBlock = nullptr;
   4446        if (!createTryLandingPad(control.tryControl->landingPadPatches,
   4447                                 &padBlock)) {
   4448          return false;
   4449        }
   4450 
   4451        // Store the pending exception and tag on the control item for future
   4452        // use in catch handlers.
   4453        if (!consumePendingException(
   4454                &padBlock, &control.tryControl->pendingException,
   4455                &control.tryControl->pendingExceptionTag)) {
   4456          return false;
   4457        }
   4458 
   4459        // Set the control block for this try-catch to the landing pad.
   4460        control.block = padBlock;
   4461      } else {
   4462        control.block = nullptr;
   4463      }
   4464    }
   4465 
   4466    // If there is no landing pad, then this and following catches are dead
   4467    // code.
   4468    if (!control.block) {
   4469      curBlock_ = nullptr;
   4470      return true;
   4471    }
   4472 
   4473    // Switch to the landing pad.
   4474    curBlock_ = control.block;
   4475 
   4476    // We should have a pending exception and tag if we were able to create a
   4477    // landing pad.
   4478    MOZ_ASSERT(control.tryControl->pendingException);
   4479    MOZ_ASSERT(control.tryControl->pendingExceptionTag);
   4480 
   4481    // Handle a catch_all by immediately jumping to a new block. We require a
   4482    // new block (as opposed to just emitting the catch_all code in the current
   4483    // block) because rethrow requires the exception/tag to be present in the
   4484    // landing pad's slots, while the catch_all block must not have the
   4485    // exception/tag in slots.
   4486    if (tagIndex == CatchAllIndex) {
   4487      MBasicBlock* catchAllBlock = nullptr;
   4488      if (!goToNewBlock(curBlock_, &catchAllBlock)) {
   4489        return false;
   4490      }
   4491      // Compilation will continue in the catch_all block.
   4492      curBlock_ = catchAllBlock;
   4493      return true;
   4494    }
   4495 
   4496    // Handle a tagged catch by doing a compare and branch on the tag index,
   4497    // jumping to a catch block if they match, or else to a fallthrough block
   4498    // to continue the landing pad.
   4499    MBasicBlock* catchBlock = nullptr;
   4500    MBasicBlock* fallthroughBlock = nullptr;
   4501    if (!newBlock(curBlock_, &catchBlock) ||
   4502        !newBlock(curBlock_, &fallthroughBlock)) {
   4503      return false;
   4504    }
   4505 
   4506    // Branch to the catch block if the exception's tag matches this catch
   4507    // block's tag.
   4508    MDefinition* catchTag = loadTag(tagIndex);
   4509    MDefinition* matchesCatchTag =
   4510        compare(control.tryControl->pendingExceptionTag, catchTag, JSOp::Eq,
   4511                MCompare::Compare_WasmAnyRef);
   4512    curBlock_->end(
   4513        MTest::New(alloc(), matchesCatchTag, catchBlock, fallthroughBlock));
   4514 
   4515    // The landing pad will continue in the fallthrough block
   4516    control.block = fallthroughBlock;
   4517 
   4518    // Set up the catch block by extracting the values from the exception
   4519    // object.
   4520    curBlock_ = catchBlock;
   4521 
   4522    // Extract the exception values for the catch block
   4523    DefVector values;
   4524    if (!loadExceptionValues(control.tryControl->pendingException, tagIndex,
   4525                             &values)) {
   4526      return false;
   4527    }
   4528    iter().setResults(values.length(), values);
   4529    return true;
   4530  }
   4531 
   4532  [[nodiscard]] bool loadExceptionValues(MDefinition* exception,
   4533                                         uint32_t tagIndex, DefVector* values) {
   4534    SharedTagType tagType = codeMeta().tags[tagIndex].type;
   4535    const ValTypeVector& params = tagType->argTypes();
   4536    const TagOffsetVector& offsets = tagType->argOffsets();
   4537 
   4538    // Get the data pointer from the exception object
   4539    auto* data = MWasmLoadField::New(
   4540        alloc(), exception, nullptr, WasmExceptionObject::offsetOfData(),
   4541        mozilla::Nothing(), MIRType::Pointer, MWideningOp::None,
   4542        AliasSet::Load(AliasSet::Any));
   4543    if (!data) {
   4544      return false;
   4545    }
   4546    curBlock_->add(data);
   4547 
   4548    // Presize the values vector to the number of params
   4549    if (!values->reserve(params.length())) {
   4550      return false;
   4551    }
   4552 
   4553    // Load each value from the data pointer
   4554    for (size_t i = 0; i < params.length(); i++) {
   4555      if (!mirGen().ensureBallast()) {
   4556        return false;
   4557      }
   4558      auto* load =
   4559          MWasmLoadField::New(alloc(), data, exception, offsets[i],
   4560                              mozilla::Nothing(), params[i].toMIRType(),
   4561                              MWideningOp::None, AliasSet::Load(AliasSet::Any),
   4562                              mozilla::Nothing(), params[i].toMaybeRefType());
   4563      if (!load || !values->append(load)) {
   4564        return false;
   4565      }
   4566      curBlock_->add(load);
   4567    }
   4568    return true;
   4569  }
   4570 
   4571  [[nodiscard]] bool finishTryCatch(LabelKind kind, Control& control,
   4572                                    DefVector* defs) {
   4573    switch (kind) {
   4574      case LabelKind::Try: {
   4575        // This is a catchless try, we must delegate all throwing instructions
   4576        // to the nearest enclosing try block if one exists, or else to the
   4577        // body block which will handle it in emitBodyRethrowPad. We
   4578        // specify a relativeDepth of '1' to delegate outside of the still
   4579        // active try block.
   4580        uint32_t relativeDepth = 1;
   4581        if (!delegatePadPatches(control.tryControl->landingPadPatches,
   4582                                relativeDepth)) {
   4583          return false;
   4584        }
   4585        break;
   4586      }
   4587      case LabelKind::Catch: {
   4588        MOZ_ASSERT(!control.tryControl->inBody);
   4589        // This is a try without a catch_all, we must have a rethrow at the end
   4590        // of the landing pad (if any).
   4591        MBasicBlock* padBlock = control.block;
   4592        if (padBlock) {
   4593          MBasicBlock* prevBlock = curBlock_;
   4594          curBlock_ = padBlock;
   4595          if (!throwFrom(control.tryControl->pendingException,
   4596                         control.tryControl->pendingExceptionTag)) {
   4597            return false;
   4598          }
   4599          curBlock_ = prevBlock;
   4600        }
   4601        break;
   4602      }
   4603      case LabelKind::CatchAll: {
   4604        MOZ_ASSERT(!control.tryControl->inBody);
   4605        // This is a try with a catch_all, and requires no special handling.
   4606        break;
   4607      }
   4608      default:
   4609        MOZ_CRASH();
   4610    }
   4611 
   4612    // Finish the block, joining the try and catch blocks
   4613    return finishBlock(defs);
   4614  }
   4615 
   4616  [[nodiscard]] bool finishTryTable(Control& control, DefVector* defs) {
   4617    // Mark this control as no longer in the body of the try
   4618    control.tryControl->inBody = false;
   4619    // Create a landing pad for all of the catches
   4620    if (!createTryTableLandingPad(control.tryControl.get())) {
   4621      return false;
   4622    }
   4623    // Finish the block, joining the try and catch blocks
   4624    return finishBlock(defs);
   4625  }
   4626 
   4627  [[nodiscard]] bool emitBodyRethrowPad(Control& control) {
   4628    // If there are no throwing instructions pending, we don't need to do
   4629    // anything
   4630    if (bodyRethrowPadPatches_.empty()) {
   4631      return true;
   4632    }
   4633 
   4634    // Create a landing pad for any throwing instructions
   4635    MBasicBlock* padBlock;
   4636    if (!createTryLandingPad(bodyRethrowPadPatches_, &padBlock)) {
   4637      return false;
   4638    }
   4639 
   4640    // If we're inlined into another function, we save the landing pad to be
   4641    // linked later directly to our caller's landing pad. See
   4642    // `finishedInlinedCallDirect`.
   4643    if (callerCompiler_ && callerCompiler_->inTryCode()) {
   4644      pendingInlineCatchBlock_ = padBlock;
   4645      return true;
   4646    }
   4647 
   4648    // Otherwise we need to grab the pending exception and rethrow it.
   4649    MDefinition* pendingException;
   4650    MDefinition* pendingExceptionTag;
   4651    if (!consumePendingException(&padBlock, &pendingException,
   4652                                 &pendingExceptionTag)) {
   4653      return false;
   4654    }
   4655 
   4656    // Switch to the landing pad and rethrow the exception
   4657    MBasicBlock* prevBlock = curBlock_;
   4658    curBlock_ = padBlock;
   4659    if (!throwFrom(pendingException, pendingExceptionTag)) {
   4660      return false;
   4661    }
   4662    curBlock_ = prevBlock;
   4663 
   4664    MOZ_ASSERT(bodyRethrowPadPatches_.empty());
   4665    return true;
   4666  }
   4667 
   4668  [[nodiscard]] bool emitNewException(MDefinition* tag,
   4669                                      MDefinition** exception) {
   4670    return emitInstanceCall1(readBytecodeOffset(), SASigExceptionNew, tag,
   4671                             exception);
   4672  }
   4673 
   4674  [[nodiscard]] bool emitThrow(uint32_t tagIndex, const DefVector& argValues) {
   4675    if (inDeadCode()) {
   4676      return true;
   4677    }
   4678    uint32_t bytecodeOffset = readBytecodeOffset();
   4679 
   4680    // Load the tag
   4681    MDefinition* tag = loadTag(tagIndex);
   4682    if (!tag) {
   4683      return false;
   4684    }
   4685 
   4686    // Allocate an exception object
   4687    MDefinition* exception;
   4688    if (!emitNewException(tag, &exception)) {
   4689      return false;
   4690    }
   4691 
   4692    // Load the data pointer from the object
   4693    auto* data = MWasmLoadField::New(
   4694        alloc(), exception, nullptr, WasmExceptionObject::offsetOfData(),
   4695        mozilla::Nothing(), MIRType::Pointer, MWideningOp::None,
   4696        AliasSet::Load(AliasSet::Any));
   4697    if (!data) {
   4698      return false;
   4699    }
   4700    curBlock_->add(data);
   4701 
   4702    // Store the params into the data pointer
   4703    SharedTagType tagType = codeMeta().tags[tagIndex].type;
   4704    for (size_t i = 0; i < tagType->argOffsets().length(); i++) {
   4705      if (!mirGen().ensureBallast()) {
   4706        return false;
   4707      }
   4708      ValType type = tagType->argTypes()[i];
   4709      uint32_t offset = tagType->argOffsets()[i];
   4710 
   4711      if (!type.isRefRepr()) {
   4712        auto* store = MWasmStoreField::New(
   4713            alloc(), data, exception, offset, mozilla::Nothing(), argValues[i],
   4714            MNarrowingOp::None, AliasSet::Store(AliasSet::Any));
   4715        if (!store) {
   4716          return false;
   4717        }
   4718        curBlock_->add(store);
   4719        continue;
   4720      }
   4721 
   4722      // Store the new value
   4723      auto* store = MWasmStoreFieldRef::New(
   4724          alloc(), instancePointer_, data, exception, offset,
   4725          mozilla::Nothing(), argValues[i], AliasSet::Store(AliasSet::Any),
   4726          Nothing(), WasmPreBarrierKind::None);
   4727      if (!store) {
   4728        return false;
   4729      }
   4730      curBlock_->add(store);
   4731 
   4732      // Call the post-write barrier
   4733      if (!postBarrierWholeCell(bytecodeOffset, exception, argValues[i])) {
   4734        return false;
   4735      }
   4736    }
   4737 
   4738    // Throw the exception
   4739    return throwFrom(exception, tag);
   4740  }
   4741 
   4742  [[nodiscard]] bool emitThrowRef(MDefinition* exnRef) {
   4743    if (inDeadCode()) {
   4744      return true;
   4745    }
   4746 
   4747    // The exception must be non-null
   4748    exnRef = refAsNonNull(exnRef);
   4749    if (!exnRef) {
   4750      return false;
   4751    }
   4752 
   4753    // Call Instance::throwException to perform tag unpacking and throw the
   4754    // exception
   4755    if (!emitInstanceCall1(readBytecodeOffset(), SASigThrowException, exnRef)) {
   4756      return false;
   4757    }
   4758    unreachableTrap();
   4759 
   4760    curBlock_ = nullptr;
   4761    return true;
   4762  }
   4763 
   4764  [[nodiscard]] bool throwFrom(MDefinition* exn, MDefinition* tag) {
   4765    if (inDeadCode()) {
   4766      return true;
   4767    }
   4768 
   4769    // Check if there is a local catching try control, and if so, then add a
   4770    // pad-patch to its tryPadPatches.
   4771    ControlInstructionVector* tryLandingPadPatches;
   4772    if (inTryBlock(&tryLandingPadPatches)) {
   4773      // Set the pending exception state, the landing pad will read from this
   4774      if (!setPendingExceptionState(exn, tag)) {
   4775        return false;
   4776      }
   4777 
   4778      // End with a pending jump to the landing pad
   4779      if (!endWithPadPatch(tryLandingPadPatches)) {
   4780        return false;
   4781      }
   4782      curBlock_ = nullptr;
   4783      return true;
   4784    }
   4785 
   4786    // If there is no surrounding catching block, call an instance method to
   4787    // throw the exception.
   4788    if (!emitInstanceCall1(readBytecodeOffset(), SASigThrowException, exn)) {
   4789      return false;
   4790    }
   4791    unreachableTrap();
   4792 
   4793    curBlock_ = nullptr;
   4794    return true;
   4795  }
   4796 
   4797  [[nodiscard]] bool emitRethrow(uint32_t relativeDepth) {
   4798    if (inDeadCode()) {
   4799      return true;
   4800    }
   4801 
   4802    Control& control = iter().controlItem(relativeDepth);
   4803    MOZ_ASSERT(iter().controlKind(relativeDepth) == LabelKind::Catch ||
   4804               iter().controlKind(relativeDepth) == LabelKind::CatchAll);
   4805    return throwFrom(control.tryControl->pendingException,
   4806                     control.tryControl->pendingExceptionTag);
   4807  }
   4808 
   4809  /******************************** WasmGC: low level load/store helpers ***/
   4810 
   4811  // Given a (StorageType, FieldExtension) pair, produce the (MIRType,
   4812  // MWideningOp) pair that will give the correct operation for reading the
   4813  // value from memory.
   4814  static void fieldLoadInfoToMIR(StorageType type, FieldWideningOp wideningOp,
   4815                                 MIRType* mirType, MWideningOp* mirWideningOp) {
   4816    switch (type.kind()) {
   4817      case StorageType::I8: {
   4818        switch (wideningOp) {
   4819          case FieldWideningOp::Signed:
   4820            *mirType = MIRType::Int32;
   4821            *mirWideningOp = MWideningOp::FromS8;
   4822            return;
   4823          case FieldWideningOp::Unsigned:
   4824            *mirType = MIRType::Int32;
   4825            *mirWideningOp = MWideningOp::FromU8;
   4826            return;
   4827          default:
   4828            MOZ_CRASH();
   4829        }
   4830      }
   4831      case StorageType::I16: {
   4832        switch (wideningOp) {
   4833          case FieldWideningOp::Signed:
   4834            *mirType = MIRType::Int32;
   4835            *mirWideningOp = MWideningOp::FromS16;
   4836            return;
   4837          case FieldWideningOp::Unsigned:
   4838            *mirType = MIRType::Int32;
   4839            *mirWideningOp = MWideningOp::FromU16;
   4840            return;
   4841          default:
   4842            MOZ_CRASH();
   4843        }
   4844      }
   4845      default: {
   4846        switch (wideningOp) {
   4847          case FieldWideningOp::None:
   4848            *mirType = type.toMIRType();
   4849            *mirWideningOp = MWideningOp::None;
   4850            return;
   4851          default:
   4852            MOZ_CRASH();
   4853        }
   4854      }
   4855    }
   4856  }
   4857 
   4858  // Given a StorageType, return the Scale required when accessing array
   4859  // elements of this type.
   4860  static Scale scaleFromFieldType(StorageType type) {
   4861    if (type.kind() == StorageType::V128) {
   4862      // V128 is accessed differently, so this scale will not be used.
   4863      return Scale::Invalid;
   4864    }
   4865    return ShiftToScale(type.indexingShift());
   4866  }
   4867 
   4868  // Given a StorageType, produce the MNarrowingOp required for writing the
   4869  // value to memory.
   4870  static MNarrowingOp fieldStoreInfoToMIR(StorageType type) {
   4871    switch (type.kind()) {
   4872      case StorageType::I8:
   4873        return MNarrowingOp::To8;
   4874      case StorageType::I16:
   4875        return MNarrowingOp::To16;
   4876      default:
   4877        return MNarrowingOp::None;
   4878    }
   4879  }
   4880 
   4881  // Generate a write of `value` at address `base + offset`, where `offset` is
   4882  // known at JIT time.  If the written value is a reftype, the previous value
   4883  // at `base + offset` will be retrieved and handed off to the post-write
   4884  // barrier.  `keepAlive` will be referenced by the instruction so as to hold
   4885  // it live (from the GC's point of view).
   4886  [[nodiscard]] bool writeGcValueAtBasePlusOffset(
   4887      uint32_t lineOrBytecode, StorageType type, MDefinition* keepAlive,
   4888      AliasSet::Flag aliasBitset, MDefinition* value, MDefinition* base,
   4889      uint32_t offset, uint32_t fieldIndex, bool needsTrapInfo,
   4890      WasmPreBarrierKind preBarrierKind, WasmPostBarrierKind postBarrierKind) {
   4891    MOZ_ASSERT(aliasBitset != 0);
   4892    MOZ_ASSERT(keepAlive->type() == MIRType::WasmAnyRef);
   4893    MOZ_ASSERT(type.widenToValType().toMIRType() == value->type());
   4894    MNarrowingOp narrowingOp = fieldStoreInfoToMIR(type);
   4895 
   4896    if (!type.isRefRepr()) {
   4897      MaybeTrapSiteDesc maybeTrap;
   4898      if (needsTrapInfo) {
   4899        maybeTrap.emplace(trapSiteDesc());
   4900      }
   4901 
   4902      auto* store = MWasmStoreField::New(
   4903          alloc(), base, keepAlive, offset, mozilla::Some(fieldIndex), value,
   4904          narrowingOp, AliasSet::Store(aliasBitset), maybeTrap);
   4905      if (!store) {
   4906        return false;
   4907      }
   4908      curBlock_->add(store);
   4909      return true;
   4910    }
   4911 
   4912    // Otherwise it's a ref store.  Load the previous value so we can show it
   4913    // to the post-write barrier.
   4914    //
   4915    // Optimisation opportunity: for the case where this field write results
   4916    // from struct.new, the old value is always zero.  So we should synthesise
   4917    // a suitable zero constant rather than reading it from the object.  See
   4918    // also bug 1799999.
   4919    MOZ_ASSERT(narrowingOp == MNarrowingOp::None);
   4920    MOZ_ASSERT(type.widenToValType() == type.valType());
   4921 
   4922    // Store the new value
   4923    auto* store = MWasmStoreFieldRef::New(
   4924        alloc(), instancePointer_, base, keepAlive, offset,
   4925        mozilla::Some(fieldIndex), value, AliasSet::Store(aliasBitset),
   4926        mozilla::Some(trapSiteDesc()), preBarrierKind);
   4927    if (!store) {
   4928      return false;
   4929    }
   4930    curBlock_->add(store);
   4931 
   4932    // Call the post-write barrier
   4933    switch (postBarrierKind) {
   4934      case WasmPostBarrierKind::WholeCell:
   4935        return postBarrierWholeCell(lineOrBytecode, keepAlive, value);
   4936      case WasmPostBarrierKind::Edge:
   4937        MOZ_CRASH("WasmPostBarrierKind::Edge not supported");
   4938      case WasmPostBarrierKind::None:
   4939        return true;
   4940      default:
   4941        MOZ_CRASH("Bad postBarrierKind");
   4942    }
   4943  }
   4944 
   4945  // Generate a write of `value` at address `base + index * scale`, where
   4946  // `scale` is known at JIT-time.  If the written value is a reftype, the
   4947  // previous value at `base + index * scale` will be retrieved and handed off
   4948  // to the post-write barrier.  `keepAlive` will be referenced by the
   4949  // instruction so as to hold it live (from the GC's point of view).
   4950  [[nodiscard]] bool writeGcValueAtBasePlusScaledIndex(
   4951      uint32_t lineOrBytecode, StorageType type, MDefinition* keepAlive,
   4952      AliasSet::Flag aliasBitset, MDefinition* value, MDefinition* base,
   4953      uint32_t scale, MDefinition* index, WasmPreBarrierKind preBarrierKind,
   4954      WasmPostBarrierKind postBarrierKind) {
   4955    MOZ_ASSERT(aliasBitset != 0);
   4956    MOZ_ASSERT(keepAlive->type() == MIRType::WasmAnyRef);
   4957    MOZ_ASSERT(type.widenToValType().toMIRType() == value->type());
   4958    MOZ_ASSERT(scale == 1 || scale == 2 || scale == 4 || scale == 8 ||
   4959               scale == 16);
   4960 
   4961    MNarrowingOp narrowingOp = fieldStoreInfoToMIR(type);
   4962 
   4963    if (!type.isRefRepr()) {
   4964      MaybeTrapSiteDesc maybeTrap;
   4965      Scale scale = scaleFromFieldType(type);
   4966      auto* store = MWasmStoreElement::New(
   4967          alloc(), base, index, value, keepAlive, narrowingOp, scale,
   4968          AliasSet::Store(aliasBitset), maybeTrap);
   4969      if (!store) {
   4970        return false;
   4971      }
   4972      curBlock_->add(store);
   4973      return true;
   4974    }
   4975 
   4976    // Otherwise it's a ref store.
   4977    MOZ_ASSERT(narrowingOp == MNarrowingOp::None);
   4978    MOZ_ASSERT(type.widenToValType() == type.valType());
   4979 
   4980    // Store the new value
   4981    auto* store = MWasmStoreElementRef::New(
   4982        alloc(), instancePointer_, base, index, value, keepAlive,
   4983        AliasSet::Store(aliasBitset), mozilla::Some(trapSiteDesc()),
   4984        preBarrierKind);
   4985    if (!store) {
   4986      return false;
   4987    }
   4988    curBlock_->add(store);
   4989 
   4990    switch (postBarrierKind) {
   4991      case WasmPostBarrierKind::WholeCell:
   4992        return postBarrierWholeCell(lineOrBytecode, keepAlive, value);
   4993      case WasmPostBarrierKind::Edge:
   4994        return postBarrierEdgeAtIndex(lineOrBytecode, keepAlive, base, index,
   4995                                      sizeof(void*), value);
   4996      case WasmPostBarrierKind::None:
   4997        return true;
   4998      default:
   4999        MOZ_CRASH("Bad postBarrierKind");
   5000    }
   5001  }
   5002 
   5003  // Generate a read from address `base + offset`, where `offset` is known at
   5004  // JIT time.  The loaded value will be widened as described by `type` and
   5005  // `fieldWideningOp`.  `keepAlive` will be referenced by the instruction so as
   5006  // to hold it live (from the GC's point of view).
   5007  [[nodiscard]] MDefinition* readGcValueAtBasePlusOffset(
   5008      StorageType type, FieldWideningOp fieldWideningOp, MDefinition* keepAlive,
   5009      AliasSet::Flag aliasBitset, MDefinition* base, uint32_t offset,
   5010      uint32_t fieldIndex, bool needsTrapInfo) {
   5011    MOZ_ASSERT(aliasBitset != 0);
   5012    MOZ_ASSERT(keepAlive->type() == MIRType::WasmAnyRef);
   5013    MIRType mirType;
   5014    MWideningOp mirWideningOp;
   5015    fieldLoadInfoToMIR(type, fieldWideningOp, &mirType, &mirWideningOp);
   5016    MaybeTrapSiteDesc maybeTrap;
   5017    if (needsTrapInfo) {
   5018      maybeTrap.emplace(trapSiteDesc());
   5019    }
   5020 
   5021    auto* load = MWasmLoadField::New(alloc(), base, keepAlive, offset,
   5022                                     mozilla::Some(fieldIndex), mirType,
   5023                                     mirWideningOp, AliasSet::Load(aliasBitset),
   5024                                     maybeTrap, type.toMaybeRefType());
   5025    if (!load) {
   5026      return nullptr;
   5027    }
   5028    curBlock_->add(load);
   5029    return load;
   5030  }
   5031 
   5032  // Generate a read from address `base + index * scale`, where `scale` is
   5033  // known at JIT-time.  The loaded value will be widened as described by
   5034  // `type` and `fieldWideningOp`.  `keepAlive` will be referenced by the
   5035  // instruction so as to hold it live (from the GC's point of view).
   5036  [[nodiscard]] MDefinition* readGcArrayValueAtIndex(
   5037      StorageType type, FieldWideningOp fieldWideningOp, MDefinition* keepAlive,
   5038      AliasSet::Flag aliasBitset, MDefinition* base, MDefinition* index) {
   5039    MOZ_ASSERT(aliasBitset != 0);
   5040    MOZ_ASSERT(keepAlive->type() == MIRType::WasmAnyRef);
   5041 
   5042    MIRType mirType;
   5043    MWideningOp mirWideningOp;
   5044    fieldLoadInfoToMIR(type, fieldWideningOp, &mirType, &mirWideningOp);
   5045    Scale scale = scaleFromFieldType(type);
   5046    auto* load = MWasmLoadElement::New(
   5047        alloc(), base, keepAlive, index, mirType, mirWideningOp, scale,
   5048        AliasSet::Load(aliasBitset), mozilla::Some(trapSiteDesc()),
   5049        type.toMaybeRefType());
   5050    if (!load) {
   5051      return nullptr;
   5052    }
   5053    curBlock_->add(load);
   5054    return load;
   5055  }
   5056 
   5057  /************************************************ WasmGC: type helpers ***/
   5058 
   5059  // Returns an MDefinition holding the supertype vector for `typeIndex`.
   5060  [[nodiscard]] MDefinition* loadSuperTypeVector(uint32_t typeIndex) {
   5061    uint32_t stvOffset = codeMeta().offsetOfSuperTypeVector(typeIndex);
   5062 
   5063    auto* load =
   5064        MWasmLoadInstanceDataField::New(alloc(), MIRType::Pointer, stvOffset,
   5065                                        /*isConst=*/true, instancePointer_);
   5066    if (!load) {
   5067      return nullptr;
   5068    }
   5069    curBlock_->add(load);
   5070    return load;
   5071  }
   5072 
   5073  uint32_t readAllocSiteIndex(uint32_t typeIndex) {
   5074    if (!codeTailMeta() || !codeTailMeta()->hasFuncDefAllocSites()) {
   5075      // For single tier of optimized compilation, there are no assigned alloc
   5076      // sites, using type index as alloc site.
   5077      return typeIndex;
   5078    }
   5079    AllocSitesRange rangeInModule =
   5080        codeTailMeta()->getFuncDefAllocSites(funcIndex());
   5081    uint32_t localIndex = numAllocSites_++;
   5082    MOZ_RELEASE_ASSERT(localIndex < rangeInModule.length);
   5083    return rangeInModule.begin + localIndex;
   5084  }
   5085 
   5086  [[nodiscard]] MDefinition* loadAllocSiteInstanceData(
   5087      uint32_t allocSiteIndex) {
   5088    auto* allocSites = MWasmLoadInstance::New(
   5089        alloc(), instancePointer_, Instance::offsetOfAllocSites(),
   5090        MIRType::Pointer, AliasSet::None());
   5091    if (!allocSites) {
   5092      return nullptr;
   5093    }
   5094    curBlock_->add(allocSites);
   5095 
   5096    auto* result = MWasmDerivedPointer::New(
   5097        alloc(), allocSites, allocSiteIndex * sizeof(gc::AllocSite));
   5098    if (!result) {
   5099      return nullptr;
   5100    }
   5101    curBlock_->add(result);
   5102    return result;
   5103  }
   5104 
   5105  /********************************************** WasmGC: struct helpers ***/
   5106 
   5107  [[nodiscard]] MDefinition* createStructObject(uint32_t typeIndex,
   5108                                                uint32_t allocSiteIndex,
   5109                                                bool zeroFields) {
   5110    // Allocate an uninitialized struct.
   5111    MDefinition* allocSite = loadAllocSiteInstanceData(allocSiteIndex);
   5112    if (!allocSite) {
   5113      return nullptr;
   5114    }
   5115 
   5116    const TypeDef* typeDef = &(*codeMeta().types)[typeIndex];
   5117    auto* structObject =
   5118        MWasmNewStructObject::New(alloc(), instancePointer_, allocSite, typeDef,
   5119                                  zeroFields, trapSiteDesc());
   5120    if (!structObject) {
   5121      return nullptr;
   5122    }
   5123    curBlock_->add(structObject);
   5124 
   5125    return structObject;
   5126  }
   5127 
   5128  // Helper function for EmitStruct{New,Set}: given a MIR pointer to a
   5129  // WasmStructObject, a MIR pointer to a value, and a field descriptor,
   5130  // generate MIR to write the value to the relevant field in the object.
   5131  [[nodiscard]] bool writeValueToStructField(
   5132      uint32_t lineOrBytecode, const StructType& structType,
   5133      uint32_t fieldIndex, MDefinition* structObject, MDefinition* value,
   5134      WasmPreBarrierKind preBarrierKind) {
   5135    StorageType fieldType = structType.fields_[fieldIndex].type;
   5136    FieldAccessPath path = structType.fieldAccessPaths_[fieldIndex];
   5137    uint32_t areaOffset = path.hasOOL() ? path.oolOffset() : path.ilOffset();
   5138 
   5139    // Make `base` point at the first byte of either the struct object as a
   5140    // whole or of the out-of-line data area.
   5141    MDefinition* base;
   5142    bool needsTrapInfo;
   5143    if (path.hasOOL()) {
   5144      // The path has two components, of which the first (the IL component) is
   5145      // the offset where the OOL pointer is stored.  Hence `path.ilOffset()`.
   5146      auto* loadDataPointer = MWasmLoadField::New(
   5147          alloc(), structObject, nullptr, path.ilOffset(), mozilla::Nothing(),
   5148          MIRType::WasmStructData, MWideningOp::None,
   5149          AliasSet::Load(AliasSet::WasmStructOutlineDataPointer),
   5150          mozilla::Some(trapSiteDesc()));
   5151      if (!loadDataPointer) {
   5152        return false;
   5153      }
   5154      curBlock_->add(loadDataPointer);
   5155      base = loadDataPointer;
   5156      needsTrapInfo = false;
   5157    } else {
   5158      base = structObject;
   5159      needsTrapInfo = true;
   5160    }
   5161    // The transaction is to happen at `base + areaOffset`, so to speak.
   5162 
   5163    // The alias set denoting the field's location, although lacking a
   5164    // Load-vs-Store indication at this point.
   5165    AliasSet::Flag fieldAliasSet = path.hasOOL()
   5166                                       ? AliasSet::WasmStructOutlineDataArea
   5167                                       : AliasSet::WasmStructInlineDataArea;
   5168 
   5169    return writeGcValueAtBasePlusOffset(
   5170        lineOrBytecode, fieldType, structObject, fieldAliasSet, value, base,
   5171        areaOffset, fieldIndex, needsTrapInfo, preBarrierKind,
   5172        WasmPostBarrierKind::WholeCell);
   5173  }
   5174 
   5175  // Helper function for EmitStructGet: given a MIR pointer to a
   5176  // WasmStructObject, a field descriptor and a field widening operation,
   5177  // generate MIR to read the value from the relevant field in the object.
   5178  [[nodiscard]] MDefinition* readValueFromStructField(
   5179      const StructType& structType, uint32_t fieldIndex,
   5180      FieldWideningOp wideningOp, MDefinition* structObject) {
   5181    StorageType fieldType = structType.fields_[fieldIndex].type;
   5182    FieldAccessPath path = structType.fieldAccessPaths_[fieldIndex];
   5183    uint32_t areaOffset = path.hasOOL() ? path.oolOffset() : path.ilOffset();
   5184 
   5185    // Make `base` point at the first byte of either the struct object as a
   5186    // whole or of the out-of-line data area.
   5187    MDefinition* base;
   5188    bool needsTrapInfo;
   5189    if (path.hasOOL()) {
   5190      // The path has two components, of which the first (the IL component) is
   5191      // the offset where the OOL pointer is stored.  Hence `path.ilOffset()`.
   5192      auto* loadDataPointer = MWasmLoadField::New(
   5193          alloc(), structObject, nullptr, path.ilOffset(), mozilla::Nothing(),
   5194          MIRType::WasmStructData, MWideningOp::None,
   5195          AliasSet::Load(AliasSet::WasmStructOutlineDataPointer),
   5196          mozilla::Some(trapSiteDesc()));
   5197      if (!loadDataPointer) {
   5198        return nullptr;
   5199      }
   5200      curBlock_->add(loadDataPointer);
   5201      base = loadDataPointer;
   5202      needsTrapInfo = false;
   5203    } else {
   5204      base = structObject;
   5205      needsTrapInfo = true;
   5206    }
   5207    // The transaction is to happen at `base + areaOffset`, so to speak.
   5208 
   5209    // The alias set denoting the field's location, although lacking a
   5210    // Load-vs-Store indication at this point.
   5211    AliasSet::Flag fieldAliasSet = path.hasOOL()
   5212                                       ? AliasSet::WasmStructOutlineDataArea
   5213                                       : AliasSet::WasmStructInlineDataArea;
   5214 
   5215    return readGcValueAtBasePlusOffset(fieldType, wideningOp, structObject,
   5216                                       fieldAliasSet, base, areaOffset,
   5217                                       fieldIndex, needsTrapInfo);
   5218  }
   5219 
   5220  /********************************* WasmGC: address-arithmetic helpers ***/
   5221 
   5222  inline bool targetIs64Bit() const {
   5223 #ifdef JS_64BIT
   5224    return true;
   5225 #else
   5226    return false;
   5227 #endif
   5228  }
   5229 
   5230  // Generate MIR to unsigned widen `val` out to the target word size.  If
   5231  // `val` is already at the target word size, this is a no-op.  The only
   5232  // other allowed case is where `val` is Int32 and we're compiling for a
   5233  // 64-bit target, in which case a widen is generated.
   5234  [[nodiscard]] MDefinition* unsignedWidenToTargetWord(MDefinition* val) {
   5235    if (targetIs64Bit()) {
   5236      if (val->type() == MIRType::Int32) {
   5237        auto* ext = MExtendInt32ToInt64::New(alloc(), val, /*isUnsigned=*/true);
   5238        if (!ext) {
   5239          return nullptr;
   5240        }
   5241        curBlock_->add(ext);
   5242        return ext;
   5243      }
   5244      MOZ_ASSERT(val->type() == MIRType::Int64);
   5245      return val;
   5246    }
   5247    MOZ_ASSERT(val->type() == MIRType::Int32);
   5248    return val;
   5249  }
   5250 
   5251  /********************************************** WasmGC: array helpers ***/
   5252 
   5253  // Given `arrayObject`, the address of a WasmArrayObject, generate MIR to
   5254  // return the contents of the WasmArrayObject::numElements_ field.
   5255  // Adds trap site info for the null check.
   5256  [[nodiscard]] MDefinition* getWasmArrayObjectNumElements(
   5257      MDefinition* arrayObject) {
   5258    MOZ_ASSERT(arrayObject->type() == MIRType::WasmAnyRef);
   5259 
   5260    auto* numElements = MWasmLoadField::New(
   5261        alloc(), arrayObject, nullptr, WasmArrayObject::offsetOfNumElements(),
   5262        mozilla::Nothing(), MIRType::Int32, MWideningOp::None,
   5263        AliasSet::Load(AliasSet::WasmArrayNumElements),
   5264        mozilla::Some(trapSiteDesc()));
   5265    if (!numElements) {
   5266      return nullptr;
   5267    }
   5268    curBlock_->add(numElements);
   5269 
   5270    return numElements;
   5271  }
   5272 
   5273  // Given `arrayObject`, the address of a WasmArrayObject, generate MIR to
   5274  // return the contents of the WasmArrayObject::data_ field.
   5275  [[nodiscard]] MDefinition* getWasmArrayObjectData(MDefinition* arrayObject) {
   5276    MOZ_ASSERT(arrayObject->type() == MIRType::WasmAnyRef);
   5277 
   5278    auto* data = MWasmLoadField::New(
   5279        alloc(), arrayObject, nullptr, WasmArrayObject::offsetOfData(),
   5280        mozilla::Nothing(), MIRType::WasmArrayData, MWideningOp::None,
   5281        AliasSet::Load(AliasSet::WasmArrayDataPointer),
   5282        mozilla::Some(trapSiteDesc()));
   5283    if (!data) {
   5284      return nullptr;
   5285    }
   5286    curBlock_->add(data);
   5287 
   5288    return data;
   5289  }
   5290 
   5291  // Given a JIT-time-known type index `typeIndex` and a run-time known number
   5292  // of elements `numElements`, create MIR to allocate a new wasm array,
   5293  // possibly initialized with `typeIndex`s default value.
   5294  [[nodiscard]] MDefinition* createArrayObject(uint32_t typeIndex,
   5295                                               uint32_t allocSiteIndex,
   5296                                               MDefinition* numElements,
   5297                                               bool zeroFields) {
   5298    MDefinition* allocSite = loadAllocSiteInstanceData(allocSiteIndex);
   5299    if (!allocSite) {
   5300      return nullptr;
   5301    }
   5302 
   5303    const TypeDef* typeDef = &(*codeMeta().types)[typeIndex];
   5304    auto* arrayObject = MWasmNewArrayObject::New(
   5305        alloc(), instancePointer_, numElements, allocSite, typeDef, zeroFields,
   5306        trapSiteDesc());
   5307    if (!arrayObject) {
   5308      return nullptr;
   5309    }
   5310    curBlock_->add(arrayObject);
   5311 
   5312    return arrayObject;
   5313  }
   5314 
   5315  // This emits MIR to perform several actions common to array loads and
   5316  // stores.  Given `arrayObject`, that points to a WasmArrayObject, and an
   5317  // index value `index`, it:
   5318  //
   5319  // * Generates a trap if the array pointer is null
   5320  // * Gets the size of the array
   5321  // * Emits a bounds check of `index` against the array size
   5322  // * Retrieves the OOL object pointer from the array
   5323  // * Includes check for null via signal handler.
   5324  //
   5325  // The returned value is for the OOL object pointer.
   5326  [[nodiscard]] MDefinition* setupForArrayAccess(MDefinition* arrayObject,
   5327                                                 MDefinition* index) {
   5328    MOZ_ASSERT(arrayObject->type() == MIRType::WasmAnyRef);
   5329    MOZ_ASSERT(index->type() == MIRType::Int32);
   5330 
   5331    // Check for null is done in getWasmArrayObjectNumElements.
   5332 
   5333    // Get the size value for the array.
   5334    MDefinition* numElements = getWasmArrayObjectNumElements(arrayObject);
   5335    if (!numElements) {
   5336      return nullptr;
   5337    }
   5338 
   5339    // Create a bounds check.
   5340    auto* boundsCheck =
   5341        MWasmBoundsCheck::New(alloc(), index, numElements, trapSiteDesc(),
   5342                              MWasmBoundsCheck::Target::Other);
   5343    if (!boundsCheck) {
   5344      return nullptr;
   5345    }
   5346    curBlock_->add(boundsCheck);
   5347 
   5348    // Get the address of the first byte of the (OOL) data area.
   5349    return getWasmArrayObjectData(arrayObject);
   5350  }
   5351 
   5352  [[nodiscard]] bool fillArray(uint32_t lineOrBytecode,
   5353                               const ArrayType& arrayType,
   5354                               MDefinition* arrayObject, MDefinition* index,
   5355                               MDefinition* numElements, MDefinition* val,
   5356                               WasmPreBarrierKind preBarrierKind,
   5357                               WasmPostBarrierKind postBarrierKind) {
   5358    mozilla::DebugOnly<MIRType> valMIRType = val->type();
   5359    StorageType elemType = arrayType.elementType();
   5360    MOZ_ASSERT(elemType.widenToValType().toMIRType() == valMIRType);
   5361 
   5362    uint32_t elemSize = elemType.size();
   5363    MOZ_ASSERT(elemSize >= 1 && elemSize <= 16);
   5364 
   5365    // Make `arrayBase` point at the first byte of the (OOL) data area.
   5366    MDefinition* arrayBase = getWasmArrayObjectData(arrayObject);
   5367    if (!arrayBase) {
   5368      return false;
   5369    }
   5370 
   5371    // We have:
   5372    //   arrayBase   : TargetWord
   5373    //   index       : Int32
   5374    //   numElements : Int32
   5375    //   val         : <any StorageType>
   5376    //   $elemSize = arrayType.elementType_.size(); 1, 2, 4, 8 or 16
   5377    //
   5378    // Generate MIR:
   5379    //   <in current block>
   5380    //     limit : Int32 = index + numElements
   5381    //     if (limit == index) goto after; // skip loop if trip count == 0
   5382    //   loop:
   5383    //     indexPhi = phi(index, indexNext)
   5384    //     arrayBase[index * $elemSize] = val
   5385    //     indexNext = indexPhi + 1
   5386    //     if (indexNext <u limit) goto loop;
   5387    //   after:
   5388    //
   5389    // We construct the loop "manually" rather than using
   5390    // FunctionCompiler::{startLoop,closeLoop} as the latter have awareness of
   5391    // the wasm view of loops, whereas the loop we're building here is not a
   5392    // wasm-level loop.
   5393    // ==== Create the "loop" and "after" blocks ====
   5394    MBasicBlock* loopBlock;
   5395    if (!newBlock(curBlock_, &loopBlock, MBasicBlock::LOOP_HEADER)) {
   5396      return false;
   5397    }
   5398    MBasicBlock* afterBlock;
   5399    if (!newBlock(loopBlock, &afterBlock)) {
   5400      return false;
   5401    }
   5402 
   5403    // ==== Fill in the remainder of the block preceding the loop ====
   5404    MAdd* limit = MAdd::NewWasm(alloc(), index, numElements, MIRType::Int32);
   5405    if (!limit) {
   5406      return false;
   5407    }
   5408    curBlock_->add(limit);
   5409 
   5410    // Note: the comparison (and eventually the entire initialisation loop) will
   5411    // be folded out in the case where the number of elements is zero.
   5412    // See MCompare::tryFoldEqualOperands.
   5413    MDefinition* limitEqualsBase =
   5414        compare(limit, index, JSOp::StrictEq, MCompare::Compare_UInt32);
   5415    if (!limitEqualsBase) {
   5416      return false;
   5417    }
   5418    MTest* skipIfLimitEqualsBase =
   5419        MTest::New(alloc(), limitEqualsBase, afterBlock, loopBlock);
   5420    if (!skipIfLimitEqualsBase) {
   5421      return false;
   5422    }
   5423    curBlock_->end(skipIfLimitEqualsBase);
   5424    if (!afterBlock->addPredecessor(alloc(), curBlock_)) {
   5425      return false;
   5426    }
   5427 
   5428    // ==== Fill in the loop block as best we can ====
   5429    curBlock_ = loopBlock;
   5430    MPhi* indexPhi = MPhi::New(alloc(), MIRType::Int32);
   5431    if (!indexPhi) {
   5432      return false;
   5433    }
   5434    if (!indexPhi->reserveLength(2)) {
   5435      return false;
   5436    }
   5437    indexPhi->addInput(index);
   5438    curBlock_->addPhi(indexPhi);
   5439    curBlock_->setLoopDepth(rootCompiler_.loopDepth() + 1);
   5440 
   5441    if (!writeGcValueAtBasePlusScaledIndex(
   5442            lineOrBytecode, elemType, arrayObject, AliasSet::WasmArrayDataArea,
   5443            val, arrayBase, elemSize, indexPhi, preBarrierKind,
   5444            postBarrierKind)) {
   5445      return false;
   5446    }
   5447 
   5448    auto* indexNext =
   5449        MAdd::NewWasm(alloc(), indexPhi, constantI32(1), MIRType::Int32);
   5450    if (!indexNext) {
   5451      return false;
   5452    }
   5453    curBlock_->add(indexNext);
   5454    indexPhi->addInput(indexNext);
   5455 
   5456    MDefinition* indexNextLtuLimit =
   5457        compare(indexNext, limit, JSOp::Lt, MCompare::Compare_UInt32);
   5458    if (!indexNextLtuLimit) {
   5459      return false;
   5460    }
   5461    auto* continueIfIndexNextLtuLimit =
   5462        MTest::New(alloc(), indexNextLtuLimit, loopBlock, afterBlock);
   5463    if (!continueIfIndexNextLtuLimit) {
   5464      return false;
   5465    }
   5466    curBlock_->end(continueIfIndexNextLtuLimit);
   5467    if (!loopBlock->addPredecessor(alloc(), loopBlock)) {
   5468      return false;
   5469    }
   5470    // ==== Loop block completed ====
   5471 
   5472    curBlock_ = afterBlock;
   5473    return true;
   5474  }
   5475 
   5476  [[nodiscard]] bool createArrayCopy(uint32_t lineOrBytecode,
   5477                                     MDefinition* dstArrayObject,
   5478                                     MDefinition* dstArrayIndex,
   5479                                     MDefinition* srcArrayObject,
   5480                                     MDefinition* srcArrayIndex,
   5481                                     MDefinition* numElements, int32_t elemSize,
   5482                                     bool elemsAreRefTyped) {
   5483    // Check for null is done in getWasmArrayObjectNumElements.
   5484 
   5485    // Get the arrays' actual sizes.
   5486    MDefinition* dstNumElements = getWasmArrayObjectNumElements(dstArrayObject);
   5487    if (!dstNumElements) {
   5488      return false;
   5489    }
   5490    MDefinition* srcNumElements = getWasmArrayObjectNumElements(srcArrayObject);
   5491    if (!srcNumElements) {
   5492      return false;
   5493    }
   5494 
   5495    // Create the bounds checks.
   5496    MInstruction* dstBoundsCheck = MWasmBoundsCheckRange32::New(
   5497        alloc(), dstArrayIndex, numElements, dstNumElements, trapSiteDesc());
   5498    if (!dstBoundsCheck) {
   5499      return false;
   5500    }
   5501    curBlock_->add(dstBoundsCheck);
   5502 
   5503    MInstruction* srcBoundsCheck = MWasmBoundsCheckRange32::New(
   5504        alloc(), srcArrayIndex, numElements, srcNumElements, trapSiteDesc());
   5505    if (!srcBoundsCheck) {
   5506      return false;
   5507    }
   5508    curBlock_->add(srcBoundsCheck);
   5509 
   5510    // Check if numElements != 0 -- optimization to not invoke builtins.
   5511    MBasicBlock* copyBlock = nullptr;
   5512    if (!newBlock(curBlock_, &copyBlock)) {
   5513      return false;
   5514    }
   5515    MBasicBlock* joinBlock = nullptr;
   5516    if (!newBlock(curBlock_, &joinBlock)) {
   5517      return false;
   5518    }
   5519 
   5520    MInstruction* condition =
   5521        MCompare::NewWasm(alloc(), numElements, constantI32(0), JSOp::StrictEq,
   5522                          MCompare::Compare_UInt32);
   5523    curBlock_->add(condition);
   5524 
   5525    MTest* test = MTest::New(alloc(), condition, joinBlock, copyBlock);
   5526    if (!test) {
   5527      return false;
   5528    }
   5529    curBlock_->end(test);
   5530    curBlock_ = copyBlock;
   5531 
   5532    MInstruction* dstData = MWasmLoadField::New(
   5533        alloc(), dstArrayObject, nullptr, WasmArrayObject::offsetOfData(),
   5534        mozilla::Nothing(), MIRType::WasmArrayData, MWideningOp::None,
   5535        AliasSet::Load(AliasSet::WasmArrayDataPointer));
   5536    if (!dstData) {
   5537      return false;
   5538    }
   5539    curBlock_->add(dstData);
   5540 
   5541    MInstruction* srcData = MWasmLoadField::New(
   5542        alloc(), srcArrayObject, nullptr, WasmArrayObject::offsetOfData(),
   5543        mozilla::Nothing(), MIRType::WasmArrayData, MWideningOp::None,
   5544        AliasSet::Load(AliasSet::WasmArrayDataPointer));
   5545    if (!srcData) {
   5546      return false;
   5547    }
   5548    curBlock_->add(srcData);
   5549 
   5550    if (elemsAreRefTyped) {
   5551      MOZ_RELEASE_ASSERT(elemSize == sizeof(void*));
   5552 
   5553      if (!builtinCall6(SASigArrayRefsMove, lineOrBytecode, dstArrayObject,
   5554                        dstData, dstArrayIndex, srcData, srcArrayIndex,
   5555                        numElements, nullptr)) {
   5556        return false;
   5557      }
   5558    } else {
   5559      MDefinition* elemSizeDef = constantI32(elemSize);
   5560      if (!elemSizeDef) {
   5561        return false;
   5562      }
   5563 
   5564      if (!builtinCall6(SASigArrayMemMove, lineOrBytecode, dstData,
   5565                        dstArrayIndex, srcData, srcArrayIndex, elemSizeDef,
   5566                        numElements, nullptr)) {
   5567        return false;
   5568      }
   5569    }
   5570 
   5571    MGoto* fallthrough = MGoto::New(alloc(), joinBlock);
   5572    if (!fallthrough) {
   5573      return false;
   5574    }
   5575    curBlock_->end(fallthrough);
   5576    if (!joinBlock->addPredecessor(alloc(), curBlock_)) {
   5577      return false;
   5578    }
   5579    curBlock_ = joinBlock;
   5580    return true;
   5581  }
   5582 
   5583  [[nodiscard]] bool createArrayFill(uint32_t lineOrBytecode,
   5584                                     uint32_t typeIndex,
   5585                                     MDefinition* arrayObject,
   5586                                     MDefinition* index, MDefinition* val,
   5587                                     MDefinition* numElements) {
   5588    MOZ_ASSERT(arrayObject->type() == MIRType::WasmAnyRef);
   5589    MOZ_ASSERT(index->type() == MIRType::Int32);
   5590    MOZ_ASSERT(numElements->type() == MIRType::Int32);
   5591 
   5592    const ArrayType& arrayType = (*codeMeta().types)[typeIndex].arrayType();
   5593 
   5594    // Check for null is done in getWasmArrayObjectNumElements.
   5595 
   5596    // Get the array's actual size.
   5597    MDefinition* actualNumElements = getWasmArrayObjectNumElements(arrayObject);
   5598    if (!actualNumElements) {
   5599      return false;
   5600    }
   5601 
   5602    // Create a bounds check.
   5603    auto* boundsCheck = MWasmBoundsCheckRange32::New(
   5604        alloc(), index, numElements, actualNumElements, trapSiteDesc());
   5605    if (!boundsCheck) {
   5606      return false;
   5607    }
   5608    curBlock_->add(boundsCheck);
   5609 
   5610    return fillArray(lineOrBytecode, arrayType, arrayObject, index, numElements,
   5611                     val, WasmPreBarrierKind::Normal,
   5612                     WasmPostBarrierKind::Edge);
   5613  }
   5614 
   5615  /*********************************************** WasmGC: other helpers ***/
   5616 
   5617  // Generate MIR that causes a trap of kind `trapKind` if `arg` is zero.
   5618  // Currently `arg` may only be a MIRType::Int32, but that requirement could
   5619  // be relaxed if needed in future.
   5620  [[nodiscard]] bool trapIfZero(wasm::Trap trapKind, MDefinition* arg) {
   5621    MOZ_ASSERT(arg->type() == MIRType::Int32);
   5622 
   5623    MBasicBlock* trapBlock = nullptr;
   5624    if (!newBlock(curBlock_, &trapBlock)) {
   5625      return false;
   5626    }
   5627 
   5628    auto* trap = MWasmTrap::New(alloc(), trapKind, trapSiteDesc());
   5629    if (!trap) {
   5630      return false;
   5631    }
   5632    trapBlock->end(trap);
   5633 
   5634    MBasicBlock* joinBlock = nullptr;
   5635    if (!newBlock(curBlock_, &joinBlock)) {
   5636      return false;
   5637    }
   5638 
   5639    auto* test = MTest::New(alloc(), arg, joinBlock, trapBlock);
   5640    if (!test) {
   5641      return false;
   5642    }
   5643    curBlock_->end(test);
   5644    curBlock_ = joinBlock;
   5645    return true;
   5646  }
   5647 
   5648  // Generate MIR that attempts to cast `ref` to `castToTypeDef`.  If the
   5649  // cast fails, we trap.  If it succeeds, then `ref` can be assumed to
   5650  // have a type that is a subtype of (or the same as) `castToTypeDef` after
   5651  // this point.
   5652  [[nodiscard]] MDefinition* refCast(MDefinition* ref, RefType destType) {
   5653    MInstruction* cast = nullptr;
   5654    if (destType.isTypeRef()) {
   5655      uint32_t typeIndex = codeMeta().types->indexOf(*destType.typeDef());
   5656      MDefinition* superSTV = loadSuperTypeVector(typeIndex);
   5657      if (!superSTV) {
   5658        return nullptr;
   5659      }
   5660      cast = MWasmRefCastConcrete::New(alloc(), ref, superSTV, destType,
   5661                                       trapSiteDesc());
   5662    } else {
   5663      cast = MWasmRefCastAbstract::New(alloc(), ref, destType, trapSiteDesc());
   5664    }
   5665 
   5666    if (!cast) {
   5667      return nullptr;
   5668    }
   5669    curBlock_->add(cast);
   5670    return cast;
   5671  }
   5672 
   5673  // Generate MIR that computes a boolean value indicating whether or not it
   5674  // is possible to cast `ref` to `destType`.
   5675  [[nodiscard]] MDefinition* refTest(MDefinition* ref, RefType destType) {
   5676    MInstruction* isSubTypeOf = nullptr;
   5677    if (destType.isTypeRef()) {
   5678      uint32_t typeIndex = codeMeta().types->indexOf(*destType.typeDef());
   5679      MDefinition* superSTV = loadSuperTypeVector(typeIndex);
   5680      if (!superSTV) {
   5681        return nullptr;
   5682      }
   5683      isSubTypeOf = MWasmRefTestConcrete::New(alloc(), ref, superSTV, destType);
   5684    } else {
   5685      isSubTypeOf = MWasmRefTestAbstract::New(alloc(), ref, destType);
   5686    }
   5687    MOZ_ASSERT(isSubTypeOf);
   5688 
   5689    curBlock_->add(isSubTypeOf);
   5690    return isSubTypeOf;
   5691  }
   5692 
   5693  // Generates MIR for br_on_cast and br_on_cast_fail.
   5694  [[nodiscard]] bool brOnCastCommon(bool onSuccess, uint32_t labelRelativeDepth,
   5695                                    RefType sourceType, RefType destType,
   5696                                    const ResultType& labelType,
   5697                                    const DefVector& values) {
   5698    if (inDeadCode()) {
   5699      return true;
   5700    }
   5701 
   5702    MBasicBlock* fallthroughBlock = nullptr;
   5703    if (!newBlock(curBlock_, &fallthroughBlock)) {
   5704      return false;
   5705    }
   5706 
   5707    // `values` are the values in the top block-value on the stack.  Since the
   5708    // argument to `br_on_cast{_fail}` is at the top of the stack, it is the
   5709    // last element in `values`.
   5710    //
   5711    // For both br_on_cast and br_on_cast_fail, the OpIter validation routines
   5712    // ensure that `values` is non-empty (by rejecting the case
   5713    // `labelType->length() < 1`) and that the last value in `values` is
   5714    // reftyped.
   5715    MOZ_RELEASE_ASSERT(values.length() > 0);
   5716    MDefinition* ref = values.back();
   5717    MOZ_ASSERT(ref->type() == MIRType::WasmAnyRef);
   5718 
   5719    MDefinition* success = refTest(ref, destType);
   5720    if (!success) {
   5721      return false;
   5722    }
   5723 
   5724    MTest* test;
   5725    if (onSuccess) {
   5726      test = MTest::New(alloc(), success, nullptr, fallthroughBlock);
   5727      if (!test || !addControlFlowPatch(test, labelRelativeDepth,
   5728                                        MTest::TrueBranchIndex)) {
   5729        return false;
   5730      }
   5731    } else {
   5732      test = MTest::New(alloc(), success, fallthroughBlock, nullptr);
   5733      if (!test || !addControlFlowPatch(test, labelRelativeDepth,
   5734                                        MTest::FalseBranchIndex)) {
   5735        return false;
   5736      }
   5737    }
   5738 
   5739    if (!pushDefs(values)) {
   5740      return false;
   5741    }
   5742 
   5743    curBlock_->end(test);
   5744    curBlock_ = fallthroughBlock;
   5745    return true;
   5746  }
   5747 
   5748  [[nodiscard]] bool brOnNonStruct(const DefVector& values) {
   5749    if (inDeadCode()) {
   5750      return true;
   5751    }
   5752 
   5753    MBasicBlock* fallthroughBlock = nullptr;
   5754    if (!newBlock(curBlock_, &fallthroughBlock)) {
   5755      return false;
   5756    }
   5757 
   5758    MOZ_ASSERT(values.length() > 0);
   5759    MOZ_ASSERT(values.back()->type() == MIRType::WasmAnyRef);
   5760 
   5761    MGoto* jump = MGoto::New(alloc(), fallthroughBlock);
   5762    if (!jump) {
   5763      return false;
   5764    }
   5765    if (!pushDefs(values)) {
   5766      return false;
   5767    }
   5768 
   5769    curBlock_->end(jump);
   5770    curBlock_ = fallthroughBlock;
   5771    return true;
   5772  }
   5773 
   5774  [[nodiscard]] MDefinition* convertAnyExtern(MDefinition* ref,
   5775                                              wasm::RefType::Kind kind) {
   5776    auto* converted = MWasmRefConvertAnyExtern::New(alloc(), ref, kind);
   5777    if (!converted) {
   5778      return nullptr;
   5779    }
   5780    curBlock_->add(converted);
   5781    return converted;
   5782  }
   5783 
   5784  /************************************************************ DECODING ***/
   5785 
   5786  // AsmJS adds a line number to `callSiteLineNums` for certain operations that
   5787  // are represented by a JS call, such as math builtins. We use these line
   5788  // numbers when calling builtins. This method will read from
   5789  // `callSiteLineNums` when we are using AsmJS, or else return the current
   5790  // bytecode offset.
   5791  //
   5792  // This method MUST be called from opcodes that AsmJS will emit a call site
   5793  // line number for, or else the arrays will get out of sync. Other opcodes
   5794  // must use `readBytecodeOffset` below.
   5795  uint32_t readCallSiteLineOrBytecode() {
   5796    if (!func_.callSiteLineNums.empty()) {
   5797      return func_.callSiteLineNums[lastReadCallSite_++];
   5798    }
   5799    return iter_.lastOpcodeOffset();
   5800  }
   5801 
   5802  // Return the current bytecode offset.
   5803  uint32_t readBytecodeOffset() { return iter_.lastOpcodeOffset(); }
   5804 
   5805  CallRefHint readCallRefHint() {
   5806    // We don't track anything if we're not using lazy tiering
   5807    if (compilerEnv().mode() != CompileMode::LazyTiering) {
   5808      return CallRefHint();
   5809    }
   5810 
   5811    CallRefMetricsRange rangeInModule =
   5812        codeTailMeta()->getFuncDefCallRefs(funcIndex());
   5813    uint32_t localIndex = numCallRefs_++;
   5814    MOZ_RELEASE_ASSERT(localIndex < rangeInModule.length);
   5815    uint32_t moduleIndex = rangeInModule.begin + localIndex;
   5816    return codeTailMeta()->getCallRefHint(moduleIndex);
   5817  }
   5818 
   5819 #if DEBUG
   5820  bool done() const { return iter_.done(); }
   5821 #endif
   5822 
   5823  /*************************************************************************/
   5824 private:
   5825  [[nodiscard]] bool newBlock(MBasicBlock* pred, MBasicBlock** block,
   5826                              MBasicBlock::Kind kind = MBasicBlock::NORMAL) {
   5827    *block = MBasicBlock::New(mirGraph(), info(), pred, kind);
   5828    if (!*block) {
   5829      return false;
   5830    }
   5831    mirGraph().addBlock(*block);
   5832    (*block)->setLoopDepth(rootCompiler_.loopDepth());
   5833    return true;
   5834  }
   5835 
   5836  [[nodiscard]] bool goToNewBlock(MBasicBlock* pred, MBasicBlock** block) {
   5837    if (!newBlock(pred, block)) {
   5838      return false;
   5839    }
   5840    pred->end(MGoto::New(alloc(), *block));
   5841    return true;
   5842  }
   5843 
   5844  [[nodiscard]] bool goToExistingBlock(MBasicBlock* prev, MBasicBlock* next) {
   5845    MOZ_ASSERT(prev);
   5846    MOZ_ASSERT(next);
   5847    prev->end(MGoto::New(alloc(), next));
   5848    return next->addPredecessor(alloc(), prev);
   5849  }
   5850 
   5851  [[nodiscard]] bool bindBranches(uint32_t absolute, DefVector* defs) {
   5852    if (absolute >= pendingBlocks_.length() ||
   5853        pendingBlocks_[absolute].patches.empty()) {
   5854      return inDeadCode() || popPushedDefs(defs);
   5855    }
   5856 
   5857    ControlFlowPatchVector& patches = pendingBlocks_[absolute].patches;
   5858    MControlInstruction* ins = patches[0].ins;
   5859    MBasicBlock* pred = ins->block();
   5860 
   5861    MBasicBlock* join = nullptr;
   5862    if (!newBlock(pred, &join)) {
   5863      return false;
   5864    }
   5865 
   5866    // Use branch hinting information if any.
   5867    if (pendingBlocks_[absolute].hint != BranchHint::Invalid) {
   5868      BranchHint hint = pendingBlocks_[absolute].hint;
   5869      if (hint == BranchHint::Likely) {
   5870        join->setFrequency(Frequency::Likely);
   5871      } else if (hint == BranchHint::Unlikely) {
   5872        join->setFrequency(Frequency::Unlikely);
   5873      }
   5874    }
   5875 
   5876    pred->mark();
   5877    ins->replaceSuccessor(patches[0].index, join);
   5878 
   5879    for (size_t i = 1; i < patches.length(); i++) {
   5880      ins = patches[i].ins;
   5881 
   5882      pred = ins->block();
   5883      if (!pred->isMarked()) {
   5884        if (!join->addPredecessor(alloc(), pred)) {
   5885          return false;
   5886        }
   5887        pred->mark();
   5888      }
   5889 
   5890      ins->replaceSuccessor(patches[i].index, join);
   5891    }
   5892 
   5893    MOZ_ASSERT_IF(curBlock_, !curBlock_->isMarked());
   5894    for (uint32_t i = 0; i < join->numPredecessors(); i++) {
   5895      join->getPredecessor(i)->unmark();
   5896    }
   5897 
   5898    if (curBlock_ && !goToExistingBlock(curBlock_, join)) {
   5899      return false;
   5900    }
   5901 
   5902    curBlock_ = join;
   5903 
   5904    if (!popPushedDefs(defs)) {
   5905      return false;
   5906    }
   5907 
   5908    patches.clear();
   5909    return true;
   5910  }
   5911 
   5912  bool emitI32Const();
   5913  bool emitI64Const();
   5914  bool emitF32Const();
   5915  bool emitF64Const();
   5916  bool emitBlock();
   5917  bool emitLoop();
   5918  bool emitIf();
   5919  bool emitElse();
   5920  bool emitEnd();
   5921  bool emitBr();
   5922  bool emitBrIf();
   5923  bool emitBrTable();
   5924  bool emitReturn();
   5925  bool emitUnreachable();
   5926  bool emitTry();
   5927  bool emitCatch();
   5928  bool emitCatchAll();
   5929  bool emitTryTable();
   5930  bool emitDelegate();
   5931  bool emitThrow();
   5932  bool emitThrowRef();
   5933  bool emitRethrow();
   5934  bool emitInlineCall(const FuncType& funcType, uint32_t funcIndex,
   5935                      InliningHeuristics::CallKind callKind,
   5936                      const DefVector& args, DefVector* results);
   5937  bool emitCall(bool asmJSFuncDef);
   5938  bool emitCallIndirect(bool oldStyle);
   5939  bool emitStackSwitch();
   5940  bool emitReturnCall();
   5941  bool emitReturnCallIndirect();
   5942  bool emitReturnCallRef();
   5943  bool emitGetLocal();
   5944  bool emitSetLocal();
   5945  bool emitTeeLocal();
   5946  bool emitGetGlobal();
   5947  bool emitSetGlobal();
   5948  bool emitTeeGlobal();
   5949  template <typename MIRClass>
   5950  bool emitUnary(ValType operandType);
   5951  template <typename MIRClass>
   5952  bool emitConversion(ValType operandType, ValType resultType);
   5953  template <typename MIRClass>
   5954  bool emitUnaryWithType(ValType operandType, MIRType mirType);
   5955  template <typename MIRClass>
   5956  bool emitConversionWithType(ValType operandType, ValType resultType,
   5957                              MIRType mirType);
   5958  bool emitTruncate(ValType operandType, ValType resultType, bool isUnsigned,
   5959                    bool isSaturating);
   5960  bool emitSignExtend(uint32_t srcSize, uint32_t targetSize);
   5961  bool emitExtendI32(bool isUnsigned);
   5962  bool emitConvertI64ToFloatingPoint(ValType resultType, MIRType mirType,
   5963                                     bool isUnsigned);
   5964  bool emitReinterpret(ValType resultType, ValType operandType,
   5965                       MIRType mirType);
   5966  bool emitAdd(ValType type, MIRType mirType);
   5967  bool emitSub(ValType type, MIRType mirType);
   5968  bool emitRotate(ValType type, bool isLeftRotation);
   5969  bool emitBitNot(ValType operandType, MIRType mirType);
   5970  bool emitBitwiseAndOrXor(ValType operandType, MIRType mirType,
   5971                           MWasmBinaryBitwise::SubOpcode subOpc);
   5972  template <typename MIRClass>
   5973  bool emitShift(ValType operandType, MIRType mirType);
   5974  bool emitUrsh(ValType operandType, MIRType mirType);
   5975  bool emitMul(ValType operandType, MIRType mirType);
   5976  bool emitDiv(ValType operandType, MIRType mirType, bool isUnsigned);
   5977  bool emitRem(ValType operandType, MIRType mirType, bool isUnsigned);
   5978  bool emitMinMax(ValType operandType, MIRType mirType, bool isMax);
   5979  bool emitCopySign(ValType operandType);
   5980  bool emitComparison(ValType operandType, JSOp compareOp,
   5981                      MCompare::CompareType compareType);
   5982  bool emitSelect(bool typed);
   5983  bool emitLoad(ValType type, Scalar::Type viewType);
   5984  bool emitStore(ValType resultType, Scalar::Type viewType);
   5985  bool emitTeeStore(ValType resultType, Scalar::Type viewType);
   5986  bool emitTeeStoreWithCoercion(ValType resultType, Scalar::Type viewType);
   5987  bool tryInlineUnaryBuiltin(SymbolicAddress callee, MDefinition* input);
   5988  bool emitUnaryMathBuiltinCall(const SymbolicAddressSignature& callee);
   5989  bool emitBinaryMathBuiltinCall(const SymbolicAddressSignature& callee);
   5990  bool emitMemoryGrow();
   5991  bool emitMemorySize();
   5992  bool emitAtomicCmpXchg(ValType type, Scalar::Type viewType);
   5993  bool emitAtomicLoad(ValType type, Scalar::Type viewType);
   5994  bool emitAtomicRMW(ValType type, Scalar::Type viewType, jit::AtomicOp op);
   5995  bool emitAtomicStore(ValType type, Scalar::Type viewType);
   5996  bool emitWait(ValType type, uint32_t byteSize);
   5997  bool emitFence();
   5998  bool emitNotify();
   5999  bool emitAtomicXchg(ValType type, Scalar::Type viewType);
   6000  bool emitMemCopyCall(uint32_t dstMemIndex, uint32_t srcMemIndex,
   6001                       MDefinition* dst, MDefinition* src, MDefinition* len);
   6002  bool emitMemCopyInline(uint32_t memoryIndex, MDefinition* dst,
   6003                         MDefinition* src, uint32_t length);
   6004  bool emitMemCopy();
   6005  bool emitTableCopy();
   6006  bool emitDataOrElemDrop(bool isData);
   6007  bool emitMemFillCall(uint32_t memoryIndex, MDefinition* start,
   6008                       MDefinition* val, MDefinition* len);
   6009  bool emitMemFillInline(uint32_t memoryIndex, MDefinition* start,
   6010                         MDefinition* val, uint32_t length);
   6011  bool emitMemFill();
   6012  bool emitMemInit();
   6013  bool emitTableInit();
   6014  bool emitTableFill();
   6015  bool emitMemDiscard();
   6016  bool emitTableGet();
   6017  bool emitTableGrow();
   6018  bool emitTableSet();
   6019  bool emitTableSize();
   6020  bool emitRefFunc();
   6021  bool emitRefNull();
   6022  bool emitRefIsNull();
   6023  bool emitConstSimd128();
   6024  bool emitBinarySimd128(bool commutative, SimdOp op);
   6025  bool emitTernarySimd128(wasm::SimdOp op);
   6026  bool emitShiftSimd128(SimdOp op);
   6027  bool emitSplatSimd128(ValType inType, SimdOp op);
   6028  bool emitUnarySimd128(SimdOp op);
   6029  bool emitReduceSimd128(SimdOp op);
   6030  bool emitExtractLaneSimd128(ValType outType, uint32_t laneLimit, SimdOp op);
   6031  bool emitReplaceLaneSimd128(ValType laneType, uint32_t laneLimit, SimdOp op);
   6032  bool emitShuffleSimd128();
   6033  bool emitLoadSplatSimd128(Scalar::Type viewType, wasm::SimdOp splatOp);
   6034  bool emitLoadExtendSimd128(wasm::SimdOp op);
   6035  bool emitLoadZeroSimd128(Scalar::Type viewType, size_t numBytes);
   6036  bool emitLoadLaneSimd128(uint32_t laneSize);
   6037  bool emitStoreLaneSimd128(uint32_t laneSize);
   6038  bool emitRefAsNonNull();
   6039  bool emitBrOnNull();
   6040  bool emitBrOnNonNull();
   6041  bool emitSpeculativeInlineCallRef(uint32_t bytecodeOffset,
   6042                                    const FuncType& funcType,
   6043                                    CallRefHint expectedFuncIndices,
   6044                                    MDefinition* actualCalleeFunc,
   6045                                    const DefVector& args, DefVector* results);
   6046  bool emitCallRef();
   6047  bool emitStructNew();
   6048  bool emitStructNewDefault();
   6049  bool emitStructSet();
   6050  bool emitStructGet(FieldWideningOp wideningOp);
   6051  bool emitArrayNew();
   6052  bool emitArrayNewDefault();
   6053  bool emitArrayNewFixed();
   6054  bool emitArrayNewData();
   6055  bool emitArrayNewElem();
   6056  bool emitArrayInitData();
   6057  bool emitArrayInitElem();
   6058  bool emitArraySet();
   6059  bool emitArrayGet(FieldWideningOp wideningOp);
   6060  bool emitArrayLen();
   6061  bool emitArrayCopy();
   6062  bool emitArrayFill();
   6063  bool emitRefI31();
   6064  bool emitI31Get(FieldWideningOp wideningOp);
   6065  bool emitRefTest(bool nullable);
   6066  bool emitRefCast(bool nullable);
   6067  bool emitBrOnCast(bool onSuccess);
   6068  bool emitAnyConvertExtern();
   6069  bool emitExternConvertAny();
   6070  bool emitCallBuiltinModuleFunc();
   6071 
   6072 public:
   6073  bool emitBodyExprs();
   6074 };
   6075 
   6076 template <>
   6077 MDefinition* FunctionCompiler::unary<MToFloat32>(MDefinition* op) {
   6078  if (inDeadCode()) {
   6079    return nullptr;
   6080  }
   6081  auto* ins = MToFloat32::New(alloc(), op, mustPreserveNaN(op->type()));
   6082  curBlock_->add(ins);
   6083  return ins;
   6084 }
   6085 
   6086 template <>
   6087 MDefinition* FunctionCompiler::unary<MWasmBuiltinTruncateToInt32>(
   6088    MDefinition* op) {
   6089  if (inDeadCode()) {
   6090    return nullptr;
   6091  }
   6092  auto* ins = MWasmBuiltinTruncateToInt32::New(
   6093      alloc(), op, instancePointer_, trapSiteDescWithCallSiteLineNumber());
   6094  curBlock_->add(ins);
   6095  return ins;
   6096 }
   6097 
   6098 template <>
   6099 MDefinition* FunctionCompiler::unary<MNot>(MDefinition* op) {
   6100  if (inDeadCode()) {
   6101    return nullptr;
   6102  }
   6103  auto* ins = MNot::NewInt32(alloc(), op);
   6104  curBlock_->add(ins);
   6105  return ins;
   6106 }
   6107 
   6108 template <>
   6109 MDefinition* FunctionCompiler::unary<MAbs>(MDefinition* op, MIRType type) {
   6110  if (inDeadCode()) {
   6111    return nullptr;
   6112  }
   6113  auto* ins = MAbs::NewWasm(alloc(), op, type);
   6114  curBlock_->add(ins);
   6115  return ins;
   6116 }
   6117 
   6118 bool FunctionCompiler::emitI32Const() {
   6119  int32_t i32;
   6120  if (!iter().readI32Const(&i32)) {
   6121    return false;
   6122  }
   6123 
   6124  iter().setResult(constantI32(i32));
   6125  return true;
   6126 }
   6127 
   6128 bool FunctionCompiler::emitI64Const() {
   6129  int64_t i64;
   6130  if (!iter().readI64Const(&i64)) {
   6131    return false;
   6132  }
   6133 
   6134  iter().setResult(constantI64(i64));
   6135  return true;
   6136 }
   6137 
   6138 bool FunctionCompiler::emitF32Const() {
   6139  float f32;
   6140  if (!iter().readF32Const(&f32)) {
   6141    return false;
   6142  }
   6143 
   6144  iter().setResult(constantF32(f32));
   6145  return true;
   6146 }
   6147 
   6148 bool FunctionCompiler::emitF64Const() {
   6149  double f64;
   6150  if (!iter().readF64Const(&f64)) {
   6151    return false;
   6152  }
   6153 
   6154  iter().setResult(constantF64(f64));
   6155  return true;
   6156 }
   6157 
   6158 bool FunctionCompiler::emitBlock() {
   6159  BlockType type;
   6160  return iter().readBlock(&type) && startBlock();
   6161 }
   6162 
   6163 bool FunctionCompiler::emitLoop() {
   6164  BlockType type;
   6165  if (!iter().readLoop(&type)) {
   6166    return false;
   6167  }
   6168 
   6169  MBasicBlock* loopHeader;
   6170  if (!startLoop(&loopHeader, type.params().length())) {
   6171    return false;
   6172  }
   6173 
   6174  addInterruptCheck();
   6175 
   6176  iter().controlItem().block = loopHeader;
   6177  return true;
   6178 }
   6179 
   6180 bool FunctionCompiler::emitIf() {
   6181  BranchHint branchHint =
   6182      iter().getBranchHint(funcIndex(), relativeBytecodeOffset());
   6183 
   6184  BlockType type;
   6185  MDefinition* condition = nullptr;
   6186  if (!iter().readIf(&type, &condition)) {
   6187    return false;
   6188  }
   6189 
   6190  MBasicBlock* elseBlock;
   6191  if (!branchAndStartThen(condition, &elseBlock)) {
   6192    return false;
   6193  }
   6194 
   6195  // Store the branch hint in the basic block.
   6196  if (!inDeadCode() && branchHint != BranchHint::Invalid) {
   6197    if (branchHint == BranchHint::Likely) {
   6198      getCurBlock()->setFrequency(Frequency::Likely);
   6199    } else if (branchHint == BranchHint::Unlikely) {
   6200      getCurBlock()->setFrequency(Frequency::Unlikely);
   6201    }
   6202  }
   6203 
   6204  iter().controlItem().block = elseBlock;
   6205  return true;
   6206 }
   6207 
   6208 bool FunctionCompiler::emitElse() {
   6209  ResultType paramType;
   6210  ResultType resultType;
   6211  DefVector thenValues;
   6212  if (!iter().readElse(&paramType, &resultType, &thenValues)) {
   6213    return false;
   6214  }
   6215 
   6216  if (!pushDefs(thenValues)) {
   6217    return false;
   6218  }
   6219 
   6220  Control& control = iter().controlItem();
   6221  return switchToElse(control.block, &control.block);
   6222 }
   6223 
   6224 bool FunctionCompiler::emitEnd() {
   6225  LabelKind kind;
   6226  ResultType type;
   6227  DefVector preJoinDefs;
   6228  DefVector resultsForEmptyElse;
   6229  if (!iter().readEnd(&kind, &type, &preJoinDefs, &resultsForEmptyElse)) {
   6230    return false;
   6231  }
   6232 
   6233  Control& control = iter().controlItem();
   6234  MBasicBlock* block = control.block;
   6235 
   6236  if (!pushDefs(preJoinDefs)) {
   6237    return false;
   6238  }
   6239 
   6240  // Every label case is responsible to pop the control item at the appropriate
   6241  // time for the label case
   6242  DefVector postJoinDefs;
   6243  switch (kind) {
   6244    case LabelKind::Body: {
   6245      MOZ_ASSERT(!control.tryControl);
   6246      if (!emitBodyRethrowPad(control)) {
   6247        return false;
   6248      }
   6249      if (!finishBlock(&postJoinDefs)) {
   6250        return false;
   6251      }
   6252      if (!returnValues(std::move(postJoinDefs))) {
   6253        return false;
   6254      }
   6255      iter().popEnd();
   6256      MOZ_ASSERT(iter().controlStackEmpty());
   6257      return iter().endFunction(iter().end());
   6258    }
   6259    case LabelKind::Block:
   6260      MOZ_ASSERT(!control.tryControl);
   6261      if (!finishBlock(&postJoinDefs)) {
   6262        return false;
   6263      }
   6264      iter().popEnd();
   6265      break;
   6266    case LabelKind::Loop:
   6267      MOZ_ASSERT(!control.tryControl);
   6268      if (!closeLoop(block, &postJoinDefs)) {
   6269        return false;
   6270      }
   6271      iter().popEnd();
   6272      break;
   6273    case LabelKind::Then: {
   6274      MOZ_ASSERT(!control.tryControl);
   6275      // If we didn't see an Else, create a trivial else block so that we create
   6276      // a diamond anyway, to preserve Ion invariants.
   6277      if (!switchToElse(block, &block)) {
   6278        return false;
   6279      }
   6280 
   6281      if (!pushDefs(resultsForEmptyElse)) {
   6282        return false;
   6283      }
   6284 
   6285      if (!joinIfElse(block, &postJoinDefs)) {
   6286        return false;
   6287      }
   6288      iter().popEnd();
   6289      break;
   6290    }
   6291    case LabelKind::Else:
   6292      MOZ_ASSERT(!control.tryControl);
   6293      if (!joinIfElse(block, &postJoinDefs)) {
   6294        return false;
   6295      }
   6296      iter().popEnd();
   6297      break;
   6298    case LabelKind::Try:
   6299    case LabelKind::Catch:
   6300    case LabelKind::CatchAll:
   6301      MOZ_ASSERT(control.tryControl);
   6302      if (!finishTryCatch(kind, control, &postJoinDefs)) {
   6303        return false;
   6304      }
   6305      rootCompiler().freeTryControl(std::move(control.tryControl));
   6306      iter().popEnd();
   6307      break;
   6308    case LabelKind::TryTable:
   6309      MOZ_ASSERT(control.tryControl);
   6310      if (!finishTryTable(control, &postJoinDefs)) {
   6311        return false;
   6312      }
   6313      rootCompiler().freeTryControl(std::move(control.tryControl));
   6314      iter().popEnd();
   6315      break;
   6316  }
   6317 
   6318  MOZ_ASSERT_IF(!inDeadCode(), postJoinDefs.length() == type.length());
   6319  iter().setResults(postJoinDefs.length(), postJoinDefs);
   6320 
   6321  return true;
   6322 }
   6323 
   6324 bool FunctionCompiler::emitBr() {
   6325  uint32_t relativeDepth;
   6326  ResultType type;
   6327  DefVector values;
   6328  if (!iter().readBr(&relativeDepth, &type, &values)) {
   6329    return false;
   6330  }
   6331 
   6332  return br(relativeDepth, values);
   6333 }
   6334 
   6335 bool FunctionCompiler::emitBrIf() {
   6336  uint32_t relativeDepth;
   6337  ResultType type;
   6338  DefVector values;
   6339  MDefinition* condition;
   6340 
   6341  BranchHint branchHint =
   6342      iter().getBranchHint(funcIndex(), relativeBytecodeOffset());
   6343 
   6344  if (!iter().readBrIf(&relativeDepth, &type, &values, &condition)) {
   6345    return false;
   6346  }
   6347 
   6348  return brIf(relativeDepth, values, condition, branchHint);
   6349 }
   6350 
   6351 bool FunctionCompiler::emitBrTable() {
   6352  Uint32Vector depths;
   6353  uint32_t defaultDepth;
   6354  ResultType branchValueType;
   6355  DefVector branchValues;
   6356  MDefinition* index;
   6357  if (!iter().readBrTable(&depths, &defaultDepth, &branchValueType,
   6358                          &branchValues, &index)) {
   6359    return false;
   6360  }
   6361 
   6362  // If all the targets are the same, or there are no targets, we can just
   6363  // use a goto. This is not just an optimization: MaybeFoldConditionBlock
   6364  // assumes that tables have more than one successor.
   6365  bool allSameDepth = true;
   6366  for (uint32_t depth : depths) {
   6367    if (depth != defaultDepth) {
   6368      allSameDepth = false;
   6369      break;
   6370    }
   6371  }
   6372 
   6373  if (allSameDepth) {
   6374    return br(defaultDepth, branchValues);
   6375  }
   6376 
   6377  return brTable(index, defaultDepth, depths, branchValues);
   6378 }
   6379 
   6380 bool FunctionCompiler::emitReturn() {
   6381  DefVector values;
   6382  if (!iter().readReturn(&values)) {
   6383    return false;
   6384  }
   6385 
   6386  return returnValues(std::move(values));
   6387 }
   6388 
   6389 bool FunctionCompiler::emitUnreachable() {
   6390  if (!iter().readUnreachable()) {
   6391    return false;
   6392  }
   6393 
   6394  unreachableTrap();
   6395  return true;
   6396 }
   6397 
   6398 bool FunctionCompiler::emitTry() {
   6399  BlockType type;
   6400  if (!iter().readTry(&type)) {
   6401    return false;
   6402  }
   6403 
   6404  return startTry();
   6405 }
   6406 
   6407 bool FunctionCompiler::emitCatch() {
   6408  LabelKind kind;
   6409  uint32_t tagIndex;
   6410  ResultType paramType, resultType;
   6411  DefVector tryValues;
   6412  if (!iter().readCatch(&kind, &tagIndex, &paramType, &resultType,
   6413                        &tryValues)) {
   6414    return false;
   6415  }
   6416 
   6417  // Pushing the results of the previous block, to properly join control flow
   6418  // after the try and after each handler, as well as potential control flow
   6419  // patches from other instrunctions. This is similar to what is done for
   6420  // if-then-else control flow and for most other control control flow joins.
   6421  if (!pushDefs(tryValues)) {
   6422    return false;
   6423  }
   6424 
   6425  return switchToCatch(iter().controlItem(), kind, tagIndex);
   6426 }
   6427 
   6428 bool FunctionCompiler::emitCatchAll() {
   6429  LabelKind kind;
   6430  ResultType paramType, resultType;
   6431  DefVector tryValues;
   6432  if (!iter().readCatchAll(&kind, &paramType, &resultType, &tryValues)) {
   6433    return false;
   6434  }
   6435 
   6436  // Pushing the results of the previous block, to properly join control flow
   6437  // after the try and after each handler, as well as potential control flow
   6438  // patches from other instrunctions.
   6439  if (!pushDefs(tryValues)) {
   6440    return false;
   6441  }
   6442 
   6443  return switchToCatch(iter().controlItem(), kind, CatchAllIndex);
   6444 }
   6445 
   6446 bool FunctionCompiler::emitTryTable() {
   6447  BlockType type;
   6448  TryTableCatchVector catches;
   6449  if (!iter().readTryTable(&type, &catches)) {
   6450    return false;
   6451  }
   6452 
   6453  return startTryTable(std::move(catches));
   6454 }
   6455 
   6456 bool FunctionCompiler::emitDelegate() {
   6457  uint32_t relativeDepth;
   6458  ResultType resultType;
   6459  DefVector tryValues;
   6460  if (!iter().readDelegate(&relativeDepth, &resultType, &tryValues)) {
   6461    return false;
   6462  }
   6463 
   6464  Control& control = iter().controlItem();
   6465  MBasicBlock* block = control.block;
   6466  MOZ_ASSERT(control.tryControl);
   6467 
   6468  // Unless the entire try-delegate is dead code, delegate any pad-patches from
   6469  // this try to the next try-block above relativeDepth.
   6470  if (block) {
   6471    ControlInstructionVector& padPatches =
   6472        control.tryControl->landingPadPatches;
   6473    if (!delegatePadPatches(padPatches, relativeDepth)) {
   6474      return false;
   6475    }
   6476  }
   6477  rootCompiler().freeTryControl(std::move(control.tryControl));
   6478  iter().popDelegate();
   6479 
   6480  // Push the results of the previous block, and join control flow with
   6481  // potential control flow patches from other instrunctions in the try code.
   6482  // This is similar to what is done for EmitEnd.
   6483  if (!pushDefs(tryValues)) {
   6484    return false;
   6485  }
   6486  DefVector postJoinDefs;
   6487  if (!finishBlock(&postJoinDefs)) {
   6488    return false;
   6489  }
   6490  MOZ_ASSERT_IF(!inDeadCode(), postJoinDefs.length() == resultType.length());
   6491  iter().setResults(postJoinDefs.length(), postJoinDefs);
   6492 
   6493  return true;
   6494 }
   6495 
   6496 bool FunctionCompiler::emitThrow() {
   6497  uint32_t tagIndex;
   6498  DefVector argValues;
   6499  if (!iter().readThrow(&tagIndex, &argValues)) {
   6500    return false;
   6501  }
   6502 
   6503  return emitThrow(tagIndex, argValues);
   6504 }
   6505 
   6506 bool FunctionCompiler::emitThrowRef() {
   6507  MDefinition* exnRef;
   6508  if (!iter().readThrowRef(&exnRef)) {
   6509    return false;
   6510  }
   6511 
   6512  return emitThrowRef(exnRef);
   6513 }
   6514 
   6515 bool FunctionCompiler::emitRethrow() {
   6516  uint32_t relativeDepth;
   6517  if (!iter().readRethrow(&relativeDepth)) {
   6518    return false;
   6519  }
   6520 
   6521  return emitRethrow(relativeDepth);
   6522 }
   6523 
   6524 bool FunctionCompiler::emitInlineCall(const FuncType& funcType,
   6525                                      uint32_t funcIndex,
   6526                                      InliningHeuristics::CallKind callKind,
   6527                                      const DefVector& args,
   6528                                      DefVector* results) {
   6529  UniqueChars error;
   6530  const BytecodeRange& funcRange = codeTailMeta()->funcDefRange(funcIndex);
   6531  BytecodeSpan funcBytecode = codeTailMeta()->funcDefBody(funcIndex);
   6532  FuncCompileInput func(funcIndex, funcRange.start, funcBytecode.data(),
   6533                        funcBytecode.data() + funcBytecode.size(),
   6534                        Uint32Vector());
   6535  Decoder d(func.begin, func.end, func.lineOrBytecode, &error);
   6536 
   6537  ValTypeVector locals;
   6538  if (!DecodeLocalEntriesWithParams(d, codeMeta(), funcIndex, &locals)) {
   6539    return false;
   6540  }
   6541 
   6542  CompileInfo* compileInfo = rootCompiler().startInlineCall(
   6543      this->funcIndex(), bytecodeOffset(), funcIndex, locals.length(),
   6544      funcRange.size(), callKind);
   6545  if (!compileInfo) {
   6546    return false;
   6547  }
   6548 
   6549  FunctionCompiler calleeCompiler(this, d, func, locals, *compileInfo);
   6550  if (!calleeCompiler.initInline(args)) {
   6551    MOZ_ASSERT(!error);
   6552    return false;
   6553  }
   6554 
   6555  if (!calleeCompiler.startBlock()) {
   6556    MOZ_ASSERT(!error);
   6557    return false;
   6558  }
   6559 
   6560  if (!calleeCompiler.emitBodyExprs()) {
   6561    MOZ_ASSERT(!error);
   6562    return false;
   6563  }
   6564 
   6565  calleeCompiler.finish();
   6566  rootCompiler_.finishInlineCall();
   6567 
   6568  return finishInlinedCallDirect(calleeCompiler, results);
   6569 }
   6570 
   6571 bool FunctionCompiler::emitCall(bool asmJSFuncDef) {
   6572  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   6573 
   6574  uint32_t funcIndex;
   6575  DefVector args;
   6576  if (asmJSFuncDef) {
   6577    if (!iter().readOldCallDirect(codeMeta().numFuncImports, &funcIndex,
   6578                                  &args)) {
   6579      return false;
   6580    }
   6581  } else {
   6582    if (!iter().readCall(&funcIndex, &args)) {
   6583      return false;
   6584    }
   6585  }
   6586 
   6587  if (inDeadCode()) {
   6588    return true;
   6589  }
   6590 
   6591  const FuncType& funcType = codeMeta().getFuncType(funcIndex);
   6592 
   6593  DefVector results;
   6594  if (codeMeta().funcIsImport(funcIndex)) {
   6595    BuiltinModuleFuncId knownFuncImport = codeMeta().knownFuncImport(funcIndex);
   6596    if (knownFuncImport != BuiltinModuleFuncId::None) {
   6597      const BuiltinModuleFunc& builtinModuleFunc =
   6598          BuiltinModuleFuncs::getFromId(knownFuncImport);
   6599      return callBuiltinModuleFunc(builtinModuleFunc, args);
   6600    }
   6601 
   6602    uint32_t instanceDataOffset =
   6603        codeMeta().offsetOfFuncImportInstanceData(funcIndex);
   6604    if (!callImport(instanceDataOffset, lineOrBytecode, funcType, args,
   6605                    &results)) {
   6606      return false;
   6607    }
   6608  } else {
   6609    const auto callKind = InliningHeuristics::CallKind::Direct;
   6610    // Make up a single-entry CallRefHint and enquire about its inlineability.
   6611    CallRefHint hints;
   6612    hints.append(funcIndex);
   6613    hints = auditInlineableCallees(callKind, hints);
   6614    if (!hints.empty()) {
   6615      // Inlining of `funcIndex` was approved.
   6616      if (!emitInlineCall(funcType, funcIndex, callKind, args, &results)) {
   6617        return false;
   6618      }
   6619    } else {
   6620      if (!callDirect(funcType, funcIndex, lineOrBytecode, args, &results)) {
   6621        return false;
   6622      }
   6623    }
   6624  }
   6625 
   6626  iter().setResults(results.length(), results);
   6627  return true;
   6628 }
   6629 
   6630 bool FunctionCompiler::emitCallIndirect(bool oldStyle) {
   6631  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   6632 
   6633  uint32_t funcTypeIndex;
   6634  uint32_t tableIndex;
   6635  MDefinition* callee;
   6636  DefVector args;
   6637  if (oldStyle) {
   6638    tableIndex = 0;
   6639    if (!iter().readOldCallIndirect(&funcTypeIndex, &callee, &args)) {
   6640      return false;
   6641    }
   6642  } else {
   6643    if (!iter().readCallIndirect(&funcTypeIndex, &tableIndex, &callee, &args)) {
   6644      return false;
   6645    }
   6646  }
   6647 
   6648  if (inDeadCode()) {
   6649    return true;
   6650  }
   6651 
   6652  DefVector results;
   6653  if (!callIndirect(funcTypeIndex, tableIndex, callee, lineOrBytecode, args,
   6654                    &results)) {
   6655    return false;
   6656  }
   6657 
   6658  iter().setResults(results.length(), results);
   6659  return true;
   6660 }
   6661 
   6662 #ifdef ENABLE_WASM_JSPI
   6663 bool FunctionCompiler::emitStackSwitch() {
   6664  StackSwitchKind kind;
   6665  MDefinition* suspender;
   6666  MDefinition* fn;
   6667  MDefinition* data;
   6668  if (!iter().readStackSwitch(&kind, &suspender, &fn, &data)) {
   6669    return false;
   6670  }
   6671  MDefinition* result = stackSwitch(suspender, fn, data, kind);
   6672  if (!result) {
   6673    return false;
   6674  }
   6675 
   6676  if (kind == StackSwitchKind::SwitchToMain) {
   6677    iter().setResult(result);
   6678  }
   6679  return true;
   6680 }
   6681 #endif
   6682 
   6683 bool FunctionCompiler::emitReturnCall() {
   6684  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   6685 
   6686  uint32_t funcIndex;
   6687  DefVector args;
   6688  if (!iter().readReturnCall(&funcIndex, &args)) {
   6689    return false;
   6690  }
   6691 
   6692  if (inDeadCode()) {
   6693    return true;
   6694  }
   6695 
   6696  const FuncType& funcType = codeMeta().getFuncType(funcIndex);
   6697 
   6698  DefVector results;
   6699  if (codeMeta().funcIsImport(funcIndex)) {
   6700    uint32_t globalDataOffset =
   6701        codeMeta().offsetOfFuncImportInstanceData(funcIndex);
   6702    if (!returnCallImport(globalDataOffset, lineOrBytecode, funcType, args,
   6703                          &results)) {
   6704      return false;
   6705    }
   6706  } else {
   6707    if (!returnCallDirect(funcType, funcIndex, lineOrBytecode, args,
   6708                          &results)) {
   6709      return false;
   6710    }
   6711  }
   6712  return true;
   6713 }
   6714 
   6715 bool FunctionCompiler::emitReturnCallIndirect() {
   6716  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   6717 
   6718  uint32_t funcTypeIndex;
   6719  uint32_t tableIndex;
   6720  MDefinition* callee;
   6721  DefVector args;
   6722  if (!iter().readReturnCallIndirect(&funcTypeIndex, &tableIndex, &callee,
   6723                                     &args)) {
   6724    return false;
   6725  }
   6726 
   6727  if (inDeadCode()) {
   6728    return true;
   6729  }
   6730 
   6731  DefVector results;
   6732  return returnCallIndirect(funcTypeIndex, tableIndex, callee, lineOrBytecode,
   6733                            args, &results);
   6734 }
   6735 
   6736 bool FunctionCompiler::emitReturnCallRef() {
   6737  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   6738 
   6739  uint32_t funcTypeIndex;
   6740  MDefinition* callee;
   6741  DefVector args;
   6742 
   6743  if (!iter().readReturnCallRef(&funcTypeIndex, &callee, &args)) {
   6744    return false;
   6745  }
   6746 
   6747  if (inDeadCode()) {
   6748    return true;
   6749  }
   6750 
   6751  const FuncType& funcType = codeMeta().types->type(funcTypeIndex).funcType();
   6752  DefVector results;
   6753  return returnCallRef(funcType, callee, lineOrBytecode, args, &results);
   6754 }
   6755 
   6756 bool FunctionCompiler::emitGetLocal() {
   6757  uint32_t id;
   6758  if (!iter().readGetLocal(&id)) {
   6759    return false;
   6760  }
   6761 
   6762  iter().setResult(getLocalDef(id));
   6763  return true;
   6764 }
   6765 
   6766 bool FunctionCompiler::emitSetLocal() {
   6767  uint32_t id;
   6768  MDefinition* value;
   6769  if (!iter().readSetLocal(&id, &value)) {
   6770    return false;
   6771  }
   6772 
   6773  assign(id, value);
   6774  return true;
   6775 }
   6776 
   6777 bool FunctionCompiler::emitTeeLocal() {
   6778  uint32_t id;
   6779  MDefinition* value;
   6780  if (!iter().readTeeLocal(&id, &value)) {
   6781    return false;
   6782  }
   6783 
   6784  assign(id, value);
   6785  return true;
   6786 }
   6787 
   6788 bool FunctionCompiler::emitGetGlobal() {
   6789  uint32_t id;
   6790  if (!iter().readGetGlobal(&id)) {
   6791    return false;
   6792  }
   6793 
   6794  const GlobalDesc& global = codeMeta().globals[id];
   6795  if (!global.isConstant()) {
   6796    iter().setResult(loadGlobalVar(global));
   6797    return true;
   6798  }
   6799 
   6800  LitVal value = global.constantValue();
   6801 
   6802  MDefinition* result;
   6803  switch (value.type().kind()) {
   6804    case ValType::I32:
   6805      result = constantI32(int32_t(value.i32()));
   6806      break;
   6807    case ValType::I64:
   6808      result = constantI64(int64_t(value.i64()));
   6809      break;
   6810    case ValType::F32:
   6811      result = constantF32(value.f32());
   6812      break;
   6813    case ValType::F64:
   6814      result = constantF64(value.f64());
   6815      break;
   6816    case ValType::V128:
   6817 #ifdef ENABLE_WASM_SIMD
   6818      result = constantV128(value.v128());
   6819      break;
   6820 #else
   6821      return iter().fail("Ion has no SIMD support yet");
   6822 #endif
   6823    case ValType::Ref:
   6824      MOZ_ASSERT(value.ref().isNull());
   6825      result = constantNullRef(MaybeRefType(value.type().refType()));
   6826      break;
   6827    default:
   6828      MOZ_CRASH("unexpected type in EmitGetGlobal");
   6829  }
   6830 
   6831  iter().setResult(result);
   6832  return true;
   6833 }
   6834 
   6835 bool FunctionCompiler::emitSetGlobal() {
   6836  uint32_t bytecodeOffset = readBytecodeOffset();
   6837 
   6838  uint32_t id;
   6839  MDefinition* value;
   6840  if (!iter().readSetGlobal(&id, &value)) {
   6841    return false;
   6842  }
   6843 
   6844  const GlobalDesc& global = codeMeta().globals[id];
   6845  MOZ_ASSERT(global.isMutable());
   6846  return storeGlobalVar(bytecodeOffset, global, value);
   6847 }
   6848 
   6849 bool FunctionCompiler::emitTeeGlobal() {
   6850  uint32_t bytecodeOffset = readBytecodeOffset();
   6851 
   6852  uint32_t id;
   6853  MDefinition* value;
   6854  if (!iter().readTeeGlobal(&id, &value)) {
   6855    return false;
   6856  }
   6857 
   6858  const GlobalDesc& global = codeMeta().globals[id];
   6859  MOZ_ASSERT(global.isMutable());
   6860 
   6861  return storeGlobalVar(bytecodeOffset, global, value);
   6862 }
   6863 
   6864 template <typename MIRClass>
   6865 bool FunctionCompiler::emitUnary(ValType operandType) {
   6866  MDefinition* input;
   6867  if (!iter().readUnary(operandType, &input)) {
   6868    return false;
   6869  }
   6870 
   6871  iter().setResult(unary<MIRClass>(input));
   6872  return true;
   6873 }
   6874 
   6875 template <typename MIRClass>
   6876 bool FunctionCompiler::emitConversion(ValType operandType, ValType resultType) {
   6877  MDefinition* input;
   6878  if (!iter().readConversion(operandType, resultType, &input)) {
   6879    return false;
   6880  }
   6881 
   6882  iter().setResult(unary<MIRClass>(input));
   6883  return true;
   6884 }
   6885 
   6886 template <typename MIRClass>
   6887 bool FunctionCompiler::emitUnaryWithType(ValType operandType, MIRType mirType) {
   6888  MDefinition* input;
   6889  if (!iter().readUnary(operandType, &input)) {
   6890    return false;
   6891  }
   6892 
   6893  iter().setResult(unary<MIRClass>(input, mirType));
   6894  return true;
   6895 }
   6896 
   6897 template <typename MIRClass>
   6898 bool FunctionCompiler::emitConversionWithType(ValType operandType,
   6899                                              ValType resultType,
   6900                                              MIRType mirType) {
   6901  MDefinition* input;
   6902  if (!iter().readConversion(operandType, resultType, &input)) {
   6903    return false;
   6904  }
   6905 
   6906  iter().setResult(unary<MIRClass>(input, mirType));
   6907  return true;
   6908 }
   6909 
   6910 bool FunctionCompiler::emitTruncate(ValType operandType, ValType resultType,
   6911                                    bool isUnsigned, bool isSaturating) {
   6912  MDefinition* input = nullptr;
   6913  if (!iter().readConversion(operandType, resultType, &input)) {
   6914    return false;
   6915  }
   6916 
   6917  TruncFlags flags = 0;
   6918  if (isUnsigned) {
   6919    flags |= TRUNC_UNSIGNED;
   6920  }
   6921  if (isSaturating) {
   6922    flags |= TRUNC_SATURATING;
   6923  }
   6924  if (resultType == ValType::I32) {
   6925    if (codeMeta().isAsmJS()) {
   6926      if (inDeadCode()) {
   6927        // The read callsite line, produced by prepareCall, has to be
   6928        // consumed -- the MWasmBuiltinTruncateToInt32 and MTruncateToInt32
   6929        // will not create MIR node.
   6930        (void)readCallSiteLineOrBytecode();
   6931        iter().setResult(nullptr);
   6932      } else if (input && (input->type() == MIRType::Double ||
   6933                           input->type() == MIRType::Float32)) {
   6934        iter().setResult(unary<MWasmBuiltinTruncateToInt32>(input));
   6935      } else {
   6936        iter().setResult(unary<MTruncateToInt32>(input));
   6937      }
   6938    } else {
   6939      iter().setResult(truncate<MWasmTruncateToInt32>(input, flags));
   6940    }
   6941  } else {
   6942    MOZ_ASSERT(resultType == ValType::I64);
   6943    MOZ_ASSERT(!codeMeta().isAsmJS());
   6944 #if defined(JS_CODEGEN_ARM)
   6945    iter().setResult(truncateWithInstance(input, flags));
   6946 #else
   6947    iter().setResult(truncate<MWasmTruncateToInt64>(input, flags));
   6948 #endif
   6949  }
   6950  return true;
   6951 }
   6952 
   6953 bool FunctionCompiler::emitSignExtend(uint32_t srcSize, uint32_t targetSize) {
   6954  MDefinition* input;
   6955  ValType type = targetSize == 4 ? ValType::I32 : ValType::I64;
   6956  if (!iter().readConversion(type, type, &input)) {
   6957    return false;
   6958  }
   6959 
   6960  iter().setResult(signExtend(input, srcSize, targetSize));
   6961  return true;
   6962 }
   6963 
   6964 bool FunctionCompiler::emitExtendI32(bool isUnsigned) {
   6965  MDefinition* input;
   6966  if (!iter().readConversion(ValType::I32, ValType::I64, &input)) {
   6967    return false;
   6968  }
   6969 
   6970  iter().setResult(extendI32(input, isUnsigned));
   6971  return true;
   6972 }
   6973 
   6974 bool FunctionCompiler::emitConvertI64ToFloatingPoint(ValType resultType,
   6975                                                     MIRType mirType,
   6976                                                     bool isUnsigned) {
   6977  MDefinition* input;
   6978  if (!iter().readConversion(ValType::I64, resultType, &input)) {
   6979    return false;
   6980  }
   6981 
   6982  iter().setResult(convertI64ToFloatingPoint(input, mirType, isUnsigned));
   6983  return true;
   6984 }
   6985 
   6986 bool FunctionCompiler::emitReinterpret(ValType resultType, ValType operandType,
   6987                                       MIRType mirType) {
   6988  MDefinition* input;
   6989  if (!iter().readConversion(operandType, resultType, &input)) {
   6990    return false;
   6991  }
   6992 
   6993  iter().setResult(unary<MReinterpretCast>(input, mirType));
   6994  return true;
   6995 }
   6996 
   6997 bool FunctionCompiler::emitAdd(ValType type, MIRType mirType) {
   6998  MDefinition* lhs;
   6999  MDefinition* rhs;
   7000  if (!iter().readBinary(type, &lhs, &rhs)) {
   7001    return false;
   7002  }
   7003 
   7004  iter().setResult(add(lhs, rhs, mirType));
   7005  return true;
   7006 }
   7007 
   7008 bool FunctionCompiler::emitSub(ValType type, MIRType mirType) {
   7009  MDefinition* lhs;
   7010  MDefinition* rhs;
   7011  if (!iter().readBinary(type, &lhs, &rhs)) {
   7012    return false;
   7013  }
   7014 
   7015  iter().setResult(sub(lhs, rhs, mirType));
   7016  return true;
   7017 }
   7018 
   7019 bool FunctionCompiler::emitRotate(ValType type, bool isLeftRotation) {
   7020  MDefinition* lhs;
   7021  MDefinition* rhs;
   7022  if (!iter().readBinary(type, &lhs, &rhs)) {
   7023    return false;
   7024  }
   7025 
   7026  MDefinition* result = rotate(lhs, rhs, type.toMIRType(), isLeftRotation);
   7027  iter().setResult(result);
   7028  return true;
   7029 }
   7030 
   7031 bool FunctionCompiler::emitBitNot(ValType operandType, MIRType mirType) {
   7032  MDefinition* input;
   7033  if (!iter().readUnary(operandType, &input)) {
   7034    return false;
   7035  }
   7036 
   7037  iter().setResult(bitnot(input, mirType));
   7038  return true;
   7039 }
   7040 
   7041 bool FunctionCompiler::emitBitwiseAndOrXor(
   7042    ValType operandType, MIRType mirType,
   7043    MWasmBinaryBitwise::SubOpcode subOpc) {
   7044  MDefinition* lhs;
   7045  MDefinition* rhs;
   7046  if (!iter().readBinary(operandType, &lhs, &rhs)) {
   7047    return false;
   7048  }
   7049 
   7050  iter().setResult(binary<MWasmBinaryBitwise>(lhs, rhs, mirType, subOpc));
   7051  return true;
   7052 }
   7053 
   7054 template <typename MIRClass>
   7055 bool FunctionCompiler::emitShift(ValType operandType, MIRType mirType) {
   7056  MDefinition* lhs;
   7057  MDefinition* rhs;
   7058  if (!iter().readBinary(operandType, &lhs, &rhs)) {
   7059    return false;
   7060  }
   7061 
   7062  iter().setResult(binary<MIRClass>(lhs, rhs, mirType));
   7063  return true;
   7064 }
   7065 
   7066 bool FunctionCompiler::emitUrsh(ValType operandType, MIRType mirType) {
   7067  MDefinition* lhs;
   7068  MDefinition* rhs;
   7069  if (!iter().readBinary(operandType, &lhs, &rhs)) {
   7070    return false;
   7071  }
   7072 
   7073  iter().setResult(ursh(lhs, rhs, mirType));
   7074  return true;
   7075 }
   7076 
   7077 bool FunctionCompiler::emitMul(ValType operandType, MIRType mirType) {
   7078  MDefinition* lhs;
   7079  MDefinition* rhs;
   7080  if (!iter().readBinary(operandType, &lhs, &rhs)) {
   7081    return false;
   7082  }
   7083 
   7084  iter().setResult(
   7085      mul(lhs, rhs, mirType,
   7086          mirType == MIRType::Int32 ? MMul::Integer : MMul::Normal));
   7087  return true;
   7088 }
   7089 
   7090 bool FunctionCompiler::emitDiv(ValType operandType, MIRType mirType,
   7091                               bool isUnsigned) {
   7092  MDefinition* lhs;
   7093  MDefinition* rhs;
   7094  if (!iter().readBinary(operandType, &lhs, &rhs)) {
   7095    return false;
   7096  }
   7097 
   7098  iter().setResult(div(lhs, rhs, mirType, isUnsigned));
   7099  return true;
   7100 }
   7101 
   7102 bool FunctionCompiler::emitRem(ValType operandType, MIRType mirType,
   7103                               bool isUnsigned) {
   7104  MDefinition* lhs;
   7105  MDefinition* rhs;
   7106  if (!iter().readBinary(operandType, &lhs, &rhs)) {
   7107    return false;
   7108  }
   7109 
   7110  iter().setResult(mod(lhs, rhs, mirType, isUnsigned));
   7111  return true;
   7112 }
   7113 
   7114 bool FunctionCompiler::emitMinMax(ValType operandType, MIRType mirType,
   7115                                  bool isMax) {
   7116  MDefinition* lhs;
   7117  MDefinition* rhs;
   7118  if (!iter().readBinary(operandType, &lhs, &rhs)) {
   7119    return false;
   7120  }
   7121 
   7122  iter().setResult(minMax(lhs, rhs, mirType, isMax));
   7123  return true;
   7124 }
   7125 
   7126 bool FunctionCompiler::emitCopySign(ValType operandType) {
   7127  MDefinition* lhs;
   7128  MDefinition* rhs;
   7129  if (!iter().readBinary(operandType, &lhs, &rhs)) {
   7130    return false;
   7131  }
   7132 
   7133  iter().setResult(binary<MCopySign>(lhs, rhs, operandType.toMIRType()));
   7134  return true;
   7135 }
   7136 
   7137 bool FunctionCompiler::emitComparison(ValType operandType, JSOp compareOp,
   7138                                      MCompare::CompareType compareType) {
   7139  MDefinition* lhs;
   7140  MDefinition* rhs;
   7141  if (!iter().readComparison(operandType, &lhs, &rhs)) {
   7142    return false;
   7143  }
   7144 
   7145  iter().setResult(compare(lhs, rhs, compareOp, compareType));
   7146  return true;
   7147 }
   7148 
   7149 bool FunctionCompiler::emitSelect(bool typed) {
   7150  StackType type;
   7151  MDefinition* trueValue;
   7152  MDefinition* falseValue;
   7153  MDefinition* condition;
   7154  if (!iter().readSelect(typed, &type, &trueValue, &falseValue, &condition)) {
   7155    return false;
   7156  }
   7157 
   7158  iter().setResult(select(trueValue, falseValue, condition));
   7159  return true;
   7160 }
   7161 
   7162 bool FunctionCompiler::emitLoad(ValType type, Scalar::Type viewType) {
   7163  LinearMemoryAddress<MDefinition*> addr;
   7164  if (!iter().readLoad(type, Scalar::byteSize(viewType), &addr)) {
   7165    return false;
   7166  }
   7167 
   7168  MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
   7169                          trapSiteDesc(), hugeMemoryEnabled(addr.memoryIndex));
   7170  auto* ins = load(addr.base, &access, type);
   7171  if (!inDeadCode() && !ins) {
   7172    return false;
   7173  }
   7174 
   7175  iter().setResult(ins);
   7176  return true;
   7177 }
   7178 
   7179 bool FunctionCompiler::emitStore(ValType resultType, Scalar::Type viewType) {
   7180  LinearMemoryAddress<MDefinition*> addr;
   7181  MDefinition* value;
   7182  if (!iter().readStore(resultType, Scalar::byteSize(viewType), &addr,
   7183                        &value)) {
   7184    return false;
   7185  }
   7186 
   7187  MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
   7188                          trapSiteDesc(), hugeMemoryEnabled(addr.memoryIndex));
   7189 
   7190  store(addr.base, &access, value);
   7191  return true;
   7192 }
   7193 
   7194 bool FunctionCompiler::emitTeeStore(ValType resultType, Scalar::Type viewType) {
   7195  LinearMemoryAddress<MDefinition*> addr;
   7196  MDefinition* value;
   7197  if (!iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr,
   7198                           &value)) {
   7199    return false;
   7200  }
   7201 
   7202  MOZ_ASSERT(isMem32(addr.memoryIndex));  // asm.js opcode
   7203  MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
   7204                          trapSiteDesc(), hugeMemoryEnabled(addr.memoryIndex));
   7205 
   7206  store(addr.base, &access, value);
   7207  return true;
   7208 }
   7209 
   7210 bool FunctionCompiler::emitTeeStoreWithCoercion(ValType resultType,
   7211                                                Scalar::Type viewType) {
   7212  LinearMemoryAddress<MDefinition*> addr;
   7213  MDefinition* value;
   7214  if (!iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr,
   7215                           &value)) {
   7216    return false;
   7217  }
   7218 
   7219  if (resultType == ValType::F32 && viewType == Scalar::Float64) {
   7220    value = unary<MToDouble>(value);
   7221  } else if (resultType == ValType::F64 && viewType == Scalar::Float32) {
   7222    value = unary<MToFloat32>(value);
   7223  } else {
   7224    MOZ_CRASH("unexpected coerced store");
   7225  }
   7226 
   7227  MOZ_ASSERT(isMem32(addr.memoryIndex));  // asm.js opcode
   7228  MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
   7229                          trapSiteDesc(), hugeMemoryEnabled(addr.memoryIndex));
   7230 
   7231  store(addr.base, &access, value);
   7232  return true;
   7233 }
   7234 
   7235 bool FunctionCompiler::tryInlineUnaryBuiltin(SymbolicAddress callee,
   7236                                             MDefinition* input) {
   7237  if (!input) {
   7238    return false;
   7239  }
   7240 
   7241  MOZ_ASSERT(IsFloatingPointType(input->type()));
   7242 
   7243  RoundingMode mode;
   7244  if (!IsRoundingFunction(callee, &mode)) {
   7245    return false;
   7246  }
   7247 
   7248  if (!MNearbyInt::HasAssemblerSupport(mode)) {
   7249    return false;
   7250  }
   7251 
   7252  iter().setResult(nearbyInt(input, mode));
   7253  return true;
   7254 }
   7255 
   7256 bool FunctionCompiler::emitUnaryMathBuiltinCall(
   7257    const SymbolicAddressSignature& callee) {
   7258  MOZ_ASSERT(callee.numArgs == 1);
   7259 
   7260  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   7261 
   7262  MDefinition* input;
   7263  if (!iter().readUnary(ValType::fromMIRType(callee.argTypes[0]), &input)) {
   7264    return false;
   7265  }
   7266 
   7267  if (tryInlineUnaryBuiltin(callee.identity, input)) {
   7268    return true;
   7269  }
   7270 
   7271  MDefinition* def;
   7272  if (!builtinCall1(callee, lineOrBytecode, input, &def)) {
   7273    return false;
   7274  }
   7275 
   7276  iter().setResult(def);
   7277  return true;
   7278 }
   7279 
   7280 bool FunctionCompiler::emitBinaryMathBuiltinCall(
   7281    const SymbolicAddressSignature& callee) {
   7282  MOZ_ASSERT(callee.numArgs == 2);
   7283  MOZ_ASSERT(callee.argTypes[0] == callee.argTypes[1]);
   7284 
   7285  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   7286 
   7287  MDefinition* lhs;
   7288  MDefinition* rhs;
   7289  // This call to readBinary assumes both operands have the same type.
   7290  if (!iter().readBinary(ValType::fromMIRType(callee.argTypes[0]), &lhs,
   7291                         &rhs)) {
   7292    return false;
   7293  }
   7294 
   7295  MDefinition* def;
   7296  if (!builtinCall2(callee, lineOrBytecode, lhs, rhs, &def)) {
   7297    return false;
   7298  }
   7299 
   7300  iter().setResult(def);
   7301  return true;
   7302 }
   7303 
   7304 bool FunctionCompiler::emitMemoryGrow() {
   7305  uint32_t bytecodeOffset = readBytecodeOffset();
   7306 
   7307  MDefinition* delta;
   7308  uint32_t memoryIndex;
   7309  if (!iter().readMemoryGrow(&memoryIndex, &delta)) {
   7310    return false;
   7311  }
   7312 
   7313  if (inDeadCode()) {
   7314    return true;
   7315  }
   7316 
   7317  MDefinition* memoryIndexValue = constantI32(int32_t(memoryIndex));
   7318  if (!memoryIndexValue) {
   7319    return false;
   7320  }
   7321 
   7322  const SymbolicAddressSignature& callee =
   7323      isMem32(memoryIndex) ? SASigMemoryGrowM32 : SASigMemoryGrowM64;
   7324 
   7325  MDefinition* ret;
   7326  if (!emitInstanceCall2(bytecodeOffset, callee, delta, memoryIndexValue,
   7327                         &ret)) {
   7328    return false;
   7329  }
   7330 
   7331  iter().setResult(ret);
   7332  return true;
   7333 }
   7334 
   7335 bool FunctionCompiler::emitMemorySize() {
   7336  uint32_t bytecodeOffset = readBytecodeOffset();
   7337 
   7338  uint32_t memoryIndex;
   7339  if (!iter().readMemorySize(&memoryIndex)) {
   7340    return false;
   7341  }
   7342 
   7343  if (inDeadCode()) {
   7344    return true;
   7345  }
   7346 
   7347  MDefinition* memoryIndexValue = constantI32(int32_t(memoryIndex));
   7348  if (!memoryIndexValue) {
   7349    return false;
   7350  }
   7351 
   7352  const SymbolicAddressSignature& callee =
   7353      isMem32(memoryIndex) ? SASigMemorySizeM32 : SASigMemorySizeM64;
   7354 
   7355  MDefinition* ret;
   7356  if (!emitInstanceCall1(bytecodeOffset, callee, memoryIndexValue, &ret)) {
   7357    return false;
   7358  }
   7359 
   7360  iter().setResult(ret);
   7361  return true;
   7362 }
   7363 
   7364 bool FunctionCompiler::emitAtomicCmpXchg(ValType type, Scalar::Type viewType) {
   7365  LinearMemoryAddress<MDefinition*> addr;
   7366  MDefinition* oldValue;
   7367  MDefinition* newValue;
   7368  if (!iter().readAtomicCmpXchg(&addr, type, byteSize(viewType), &oldValue,
   7369                                &newValue)) {
   7370    return false;
   7371  }
   7372 
   7373  MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
   7374                          trapSiteDesc(), hugeMemoryEnabled(addr.memoryIndex),
   7375                          Synchronization::Full());
   7376  auto* ins =
   7377      atomicCompareExchangeHeap(addr.base, &access, type, oldValue, newValue);
   7378  if (!inDeadCode() && !ins) {
   7379    return false;
   7380  }
   7381 
   7382  iter().setResult(ins);
   7383  return true;
   7384 }
   7385 
   7386 bool FunctionCompiler::emitAtomicLoad(ValType type, Scalar::Type viewType) {
   7387  LinearMemoryAddress<MDefinition*> addr;
   7388  if (!iter().readAtomicLoad(&addr, type, byteSize(viewType))) {
   7389    return false;
   7390  }
   7391 
   7392  MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
   7393                          trapSiteDesc(), hugeMemoryEnabled(addr.memoryIndex),
   7394                          Synchronization::Load());
   7395  auto* ins = load(addr.base, &access, type);
   7396  if (!inDeadCode() && !ins) {
   7397    return false;
   7398  }
   7399 
   7400  iter().setResult(ins);
   7401  return true;
   7402 }
   7403 
   7404 bool FunctionCompiler::emitAtomicRMW(ValType type, Scalar::Type viewType,
   7405                                     jit::AtomicOp op) {
   7406  LinearMemoryAddress<MDefinition*> addr;
   7407  MDefinition* value;
   7408  if (!iter().readAtomicRMW(&addr, type, byteSize(viewType), &value)) {
   7409    return false;
   7410  }
   7411 
   7412  MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
   7413                          trapSiteDesc(), hugeMemoryEnabled(addr.memoryIndex),
   7414                          Synchronization::Full());
   7415  auto* ins = atomicBinopHeap(op, addr.base, &access, type, value);
   7416  if (!inDeadCode() && !ins) {
   7417    return false;
   7418  }
   7419 
   7420  iter().setResult(ins);
   7421  return true;
   7422 }
   7423 
   7424 bool FunctionCompiler::emitAtomicStore(ValType type, Scalar::Type viewType) {
   7425  LinearMemoryAddress<MDefinition*> addr;
   7426  MDefinition* value;
   7427  if (!iter().readAtomicStore(&addr, type, byteSize(viewType), &value)) {
   7428    return false;
   7429  }
   7430 
   7431  MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
   7432                          trapSiteDesc(), hugeMemoryEnabled(addr.memoryIndex),
   7433                          Synchronization::Store());
   7434  store(addr.base, &access, value);
   7435  return true;
   7436 }
   7437 
   7438 bool FunctionCompiler::emitWait(ValType type, uint32_t byteSize) {
   7439  MOZ_ASSERT(type == ValType::I32 || type == ValType::I64);
   7440  MOZ_ASSERT(type.size() == byteSize);
   7441 
   7442  uint32_t bytecodeOffset = readBytecodeOffset();
   7443 
   7444  LinearMemoryAddress<MDefinition*> addr;
   7445  MDefinition* expected;
   7446  MDefinition* timeout;
   7447  if (!iter().readWait(&addr, type, byteSize, &expected, &timeout)) {
   7448    return false;
   7449  }
   7450 
   7451  if (inDeadCode()) {
   7452    return true;
   7453  }
   7454 
   7455  MemoryAccessDesc access(addr.memoryIndex,
   7456                          type == ValType::I32 ? Scalar::Int32 : Scalar::Int64,
   7457                          addr.align, addr.offset, trapSiteDesc(),
   7458                          hugeMemoryEnabled(addr.memoryIndex));
   7459  MDefinition* ptr = computeEffectiveAddress(addr.base, &access);
   7460  if (!ptr) {
   7461    return false;
   7462  }
   7463 
   7464  MDefinition* memoryIndex = constantI32(int32_t(addr.memoryIndex));
   7465  if (!memoryIndex) {
   7466    return false;
   7467  }
   7468 
   7469  const SymbolicAddressSignature& callee =
   7470      isMem32(addr.memoryIndex)
   7471          ? (type == ValType::I32 ? SASigWaitI32M32 : SASigWaitI64M32)
   7472          : (type == ValType::I32 ? SASigWaitI32M64 : SASigWaitI64M64);
   7473 
   7474  MDefinition* ret;
   7475  if (!emitInstanceCall4(bytecodeOffset, callee, ptr, expected, timeout,
   7476                         memoryIndex, &ret)) {
   7477    return false;
   7478  }
   7479 
   7480  iter().setResult(ret);
   7481  return true;
   7482 }
   7483 
   7484 bool FunctionCompiler::emitFence() {
   7485  if (!iter().readFence()) {
   7486    return false;
   7487  }
   7488 
   7489  fence();
   7490  return true;
   7491 }
   7492 
   7493 bool FunctionCompiler::emitNotify() {
   7494  uint32_t bytecodeOffset = readBytecodeOffset();
   7495 
   7496  LinearMemoryAddress<MDefinition*> addr;
   7497  MDefinition* count;
   7498  if (!iter().readNotify(&addr, &count)) {
   7499    return false;
   7500  }
   7501 
   7502  if (inDeadCode()) {
   7503    return true;
   7504  }
   7505 
   7506  MemoryAccessDesc access(addr.memoryIndex, Scalar::Int32, addr.align,
   7507                          addr.offset, trapSiteDesc(),
   7508                          hugeMemoryEnabled(addr.memoryIndex));
   7509  MDefinition* ptr = computeEffectiveAddress(addr.base, &access);
   7510  if (!ptr) {
   7511    return false;
   7512  }
   7513 
   7514  MDefinition* memoryIndex = constantI32(int32_t(addr.memoryIndex));
   7515  if (!memoryIndex) {
   7516    return false;
   7517  }
   7518 
   7519  const SymbolicAddressSignature& callee =
   7520      isMem32(addr.memoryIndex) ? SASigWakeM32 : SASigWakeM64;
   7521 
   7522  MDefinition* ret;
   7523  if (!emitInstanceCall3(bytecodeOffset, callee, ptr, count, memoryIndex,
   7524                         &ret)) {
   7525    return false;
   7526  }
   7527 
   7528  iter().setResult(ret);
   7529  return true;
   7530 }
   7531 
   7532 bool FunctionCompiler::emitAtomicXchg(ValType type, Scalar::Type viewType) {
   7533  LinearMemoryAddress<MDefinition*> addr;
   7534  MDefinition* value;
   7535  if (!iter().readAtomicRMW(&addr, type, byteSize(viewType), &value)) {
   7536    return false;
   7537  }
   7538 
   7539  MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
   7540                          trapSiteDesc(), hugeMemoryEnabled(addr.memoryIndex),
   7541                          Synchronization::Full());
   7542  MDefinition* ins = atomicExchangeHeap(addr.base, &access, type, value);
   7543  if (!inDeadCode() && !ins) {
   7544    return false;
   7545  }
   7546 
   7547  iter().setResult(ins);
   7548  return true;
   7549 }
   7550 
   7551 bool FunctionCompiler::emitMemCopyCall(uint32_t dstMemIndex,
   7552                                       uint32_t srcMemIndex, MDefinition* dst,
   7553                                       MDefinition* src, MDefinition* len) {
   7554  uint32_t bytecodeOffset = readBytecodeOffset();
   7555 
   7556  if (dstMemIndex == srcMemIndex) {
   7557    const SymbolicAddressSignature& callee =
   7558        (codeMeta().usesSharedMemory(dstMemIndex)
   7559             ? (isMem32(dstMemIndex) ? SASigMemCopySharedM32
   7560                                     : SASigMemCopySharedM64)
   7561             : (isMem32(dstMemIndex) ? SASigMemCopyM32 : SASigMemCopyM64));
   7562    MDefinition* base = memoryBase(dstMemIndex);
   7563    if (!base) {
   7564      return false;
   7565    }
   7566    return emitInstanceCall4(bytecodeOffset, callee, dst, src, len, base);
   7567  }
   7568 
   7569  AddressType dstIndexType = codeMeta().memories[dstMemIndex].addressType();
   7570  AddressType srcIndexType = codeMeta().memories[srcMemIndex].addressType();
   7571 
   7572  if (dstIndexType == AddressType::I32) {
   7573    dst = extendI32(dst, /*isUnsigned=*/true);
   7574    if (!dst) {
   7575      return false;
   7576    }
   7577  }
   7578  if (srcIndexType == AddressType::I32) {
   7579    src = extendI32(src, /*isUnsigned=*/true);
   7580    if (!src) {
   7581      return false;
   7582    }
   7583  }
   7584  if (dstIndexType == AddressType::I32 || srcIndexType == AddressType::I32) {
   7585    len = extendI32(len, /*isUnsigned=*/true);
   7586    if (!len) {
   7587      return false;
   7588    }
   7589  }
   7590 
   7591  MDefinition* dstMemIndexValue = constantI32(int32_t(dstMemIndex));
   7592  if (!dstMemIndexValue) {
   7593    return false;
   7594  }
   7595 
   7596  MDefinition* srcMemIndexValue = constantI32(int32_t(srcMemIndex));
   7597  if (!srcMemIndexValue) {
   7598    return false;
   7599  }
   7600 
   7601  return emitInstanceCall5(bytecodeOffset, SASigMemCopyAny, dst, src, len,
   7602                           dstMemIndexValue, srcMemIndexValue);
   7603 }
   7604 
   7605 bool FunctionCompiler::emitMemCopyInline(uint32_t memoryIndex, MDefinition* dst,
   7606                                         MDefinition* src, uint32_t length) {
   7607  MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryCopyLength);
   7608 
   7609  // Compute the number of copies of each width we will need to do
   7610  size_t remainder = length;
   7611 #ifdef ENABLE_WASM_SIMD
   7612  size_t numCopies16 = 0;
   7613  if (MacroAssembler::SupportsFastUnalignedFPAccesses()) {
   7614    numCopies16 = remainder / sizeof(V128);
   7615    remainder %= sizeof(V128);
   7616  }
   7617 #endif
   7618 #ifdef JS_64BIT
   7619  size_t numCopies8 = remainder / sizeof(uint64_t);
   7620  remainder %= sizeof(uint64_t);
   7621 #endif
   7622  size_t numCopies4 = remainder / sizeof(uint32_t);
   7623  remainder %= sizeof(uint32_t);
   7624  size_t numCopies2 = remainder / sizeof(uint16_t);
   7625  remainder %= sizeof(uint16_t);
   7626  size_t numCopies1 = remainder;
   7627 
   7628  // Load all source bytes from low to high using the widest transfer width we
   7629  // can for the system. We will trap without writing anything if any source
   7630  // byte is out-of-bounds.
   7631  size_t offset = 0;
   7632  DefVector loadedValues;
   7633 
   7634 #ifdef ENABLE_WASM_SIMD
   7635  for (uint32_t i = 0; i < numCopies16; i++) {
   7636    MemoryAccessDesc access(memoryIndex, Scalar::Simd128, 1, offset,
   7637                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7638    auto* loadValue = load(src, &access, ValType::V128);
   7639    if (!loadValue || !loadedValues.append(loadValue)) {
   7640      return false;
   7641    }
   7642 
   7643    offset += sizeof(V128);
   7644  }
   7645 #endif
   7646 
   7647 #ifdef JS_64BIT
   7648  for (uint32_t i = 0; i < numCopies8; i++) {
   7649    MemoryAccessDesc access(memoryIndex, Scalar::Int64, 1, offset,
   7650                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7651    auto* loadValue = load(src, &access, ValType::I64);
   7652    if (!loadValue || !loadedValues.append(loadValue)) {
   7653      return false;
   7654    }
   7655 
   7656    offset += sizeof(uint64_t);
   7657  }
   7658 #endif
   7659 
   7660  for (uint32_t i = 0; i < numCopies4; i++) {
   7661    MemoryAccessDesc access(memoryIndex, Scalar::Uint32, 1, offset,
   7662                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7663    auto* loadValue = load(src, &access, ValType::I32);
   7664    if (!loadValue || !loadedValues.append(loadValue)) {
   7665      return false;
   7666    }
   7667 
   7668    offset += sizeof(uint32_t);
   7669  }
   7670 
   7671  if (numCopies2) {
   7672    MemoryAccessDesc access(memoryIndex, Scalar::Uint16, 1, offset,
   7673                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7674    auto* loadValue = load(src, &access, ValType::I32);
   7675    if (!loadValue || !loadedValues.append(loadValue)) {
   7676      return false;
   7677    }
   7678 
   7679    offset += sizeof(uint16_t);
   7680  }
   7681 
   7682  if (numCopies1) {
   7683    MemoryAccessDesc access(memoryIndex, Scalar::Uint8, 1, offset,
   7684                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7685    auto* loadValue = load(src, &access, ValType::I32);
   7686    if (!loadValue || !loadedValues.append(loadValue)) {
   7687      return false;
   7688    }
   7689  }
   7690 
   7691  // Store all source bytes to the destination from high to low. We will trap
   7692  // without writing anything on the first store if any dest byte is
   7693  // out-of-bounds.
   7694  offset = length;
   7695 
   7696  if (numCopies1) {
   7697    offset -= sizeof(uint8_t);
   7698 
   7699    MemoryAccessDesc access(memoryIndex, Scalar::Uint8, 1, offset,
   7700                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7701    auto* value = loadedValues.popCopy();
   7702    store(dst, &access, value);
   7703  }
   7704 
   7705  if (numCopies2) {
   7706    offset -= sizeof(uint16_t);
   7707 
   7708    MemoryAccessDesc access(memoryIndex, Scalar::Uint16, 1, offset,
   7709                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7710    auto* value = loadedValues.popCopy();
   7711    store(dst, &access, value);
   7712  }
   7713 
   7714  for (uint32_t i = 0; i < numCopies4; i++) {
   7715    offset -= sizeof(uint32_t);
   7716 
   7717    MemoryAccessDesc access(memoryIndex, Scalar::Uint32, 1, offset,
   7718                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7719    auto* value = loadedValues.popCopy();
   7720    store(dst, &access, value);
   7721  }
   7722 
   7723 #ifdef JS_64BIT
   7724  for (uint32_t i = 0; i < numCopies8; i++) {
   7725    offset -= sizeof(uint64_t);
   7726 
   7727    MemoryAccessDesc access(memoryIndex, Scalar::Int64, 1, offset,
   7728                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7729    auto* value = loadedValues.popCopy();
   7730    store(dst, &access, value);
   7731  }
   7732 #endif
   7733 
   7734 #ifdef ENABLE_WASM_SIMD
   7735  for (uint32_t i = 0; i < numCopies16; i++) {
   7736    offset -= sizeof(V128);
   7737 
   7738    MemoryAccessDesc access(memoryIndex, Scalar::Simd128, 1, offset,
   7739                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7740    auto* value = loadedValues.popCopy();
   7741    store(dst, &access, value);
   7742  }
   7743 #endif
   7744 
   7745  return true;
   7746 }
   7747 
   7748 bool FunctionCompiler::emitMemCopy() {
   7749  MDefinition *dst, *src, *len;
   7750  uint32_t dstMemIndex;
   7751  uint32_t srcMemIndex;
   7752  if (!iter().readMemOrTableCopy(true, &dstMemIndex, &dst, &srcMemIndex, &src,
   7753                                 &len)) {
   7754    return false;
   7755  }
   7756 
   7757  if (inDeadCode()) {
   7758    return true;
   7759  }
   7760 
   7761  if (dstMemIndex == srcMemIndex && len->isConstant()) {
   7762    uint64_t length = isMem32(dstMemIndex) ? len->toConstant()->toInt32()
   7763                                           : len->toConstant()->toInt64();
   7764    static_assert(MaxInlineMemoryCopyLength <= UINT32_MAX);
   7765    if (length != 0 && length <= MaxInlineMemoryCopyLength) {
   7766      return emitMemCopyInline(dstMemIndex, dst, src, uint32_t(length));
   7767    }
   7768  }
   7769 
   7770  return emitMemCopyCall(dstMemIndex, srcMemIndex, dst, src, len);
   7771 }
   7772 
   7773 bool FunctionCompiler::emitTableCopy() {
   7774  MDefinition *dst, *src, *len;
   7775  uint32_t dstTableIndex;
   7776  uint32_t srcTableIndex;
   7777  if (!iter().readMemOrTableCopy(false, &dstTableIndex, &dst, &srcTableIndex,
   7778                                 &src, &len)) {
   7779    return false;
   7780  }
   7781 
   7782  if (inDeadCode()) {
   7783    return true;
   7784  }
   7785 
   7786  uint32_t bytecodeOffset = readBytecodeOffset();
   7787  const TableDesc& dstTable = codeMeta().tables[dstTableIndex];
   7788  const TableDesc& srcTable = codeMeta().tables[srcTableIndex];
   7789 
   7790  AddressType dstAddressType = dstTable.addressType();
   7791  AddressType srcAddressType = srcTable.addressType();
   7792  AddressType lenAddressType =
   7793      dstAddressType == AddressType::I64 && srcAddressType == AddressType::I64
   7794          ? AddressType::I64
   7795          : AddressType::I32;
   7796 
   7797  MDefinition* dst32 = clampTableAddressToI32(dstAddressType, dst);
   7798  if (!dst32) {
   7799    return false;
   7800  }
   7801 
   7802  MDefinition* src32 = clampTableAddressToI32(srcAddressType, src);
   7803  if (!src32) {
   7804    return false;
   7805  }
   7806 
   7807  MDefinition* len32 = clampTableAddressToI32(lenAddressType, len);
   7808  if (!len32) {
   7809    return false;
   7810  }
   7811 
   7812  MDefinition* dti = constantI32(int32_t(dstTableIndex));
   7813  MDefinition* sti = constantI32(int32_t(srcTableIndex));
   7814 
   7815  return emitInstanceCall5(bytecodeOffset, SASigTableCopy, dst32, src32, len32,
   7816                           dti, sti);
   7817 }
   7818 
   7819 bool FunctionCompiler::emitDataOrElemDrop(bool isData) {
   7820  uint32_t segIndexVal = 0;
   7821  if (!iter().readDataOrElemDrop(isData, &segIndexVal)) {
   7822    return false;
   7823  }
   7824 
   7825  if (inDeadCode()) {
   7826    return true;
   7827  }
   7828 
   7829  uint32_t bytecodeOffset = readBytecodeOffset();
   7830 
   7831  MDefinition* segIndex = constantI32(int32_t(segIndexVal));
   7832 
   7833  const SymbolicAddressSignature& callee =
   7834      isData ? SASigDataDrop : SASigElemDrop;
   7835  return emitInstanceCall1(bytecodeOffset, callee, segIndex);
   7836 }
   7837 
   7838 bool FunctionCompiler::emitMemFillCall(uint32_t memoryIndex, MDefinition* start,
   7839                                       MDefinition* val, MDefinition* len) {
   7840  MDefinition* base = memoryBase(memoryIndex);
   7841 
   7842  uint32_t bytecodeOffset = readBytecodeOffset();
   7843  const SymbolicAddressSignature& callee =
   7844      (codeMeta().usesSharedMemory(memoryIndex)
   7845           ? (isMem32(memoryIndex) ? SASigMemFillSharedM32
   7846                                   : SASigMemFillSharedM64)
   7847           : (isMem32(memoryIndex) ? SASigMemFillM32 : SASigMemFillM64));
   7848  return emitInstanceCall4(bytecodeOffset, callee, start, val, len, base);
   7849 }
   7850 
   7851 bool FunctionCompiler::emitMemFillInline(uint32_t memoryIndex,
   7852                                         MDefinition* start, MDefinition* val,
   7853                                         uint32_t length) {
   7854  MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryFillLength);
   7855  uint32_t value = val->toConstant()->toInt32();
   7856 
   7857  // Compute the number of copies of each width we will need to do
   7858  size_t remainder = length;
   7859 #ifdef ENABLE_WASM_SIMD
   7860  size_t numCopies16 = 0;
   7861  if (MacroAssembler::SupportsFastUnalignedFPAccesses()) {
   7862    numCopies16 = remainder / sizeof(V128);
   7863    remainder %= sizeof(V128);
   7864  }
   7865 #endif
   7866 #ifdef JS_64BIT
   7867  size_t numCopies8 = remainder / sizeof(uint64_t);
   7868  remainder %= sizeof(uint64_t);
   7869 #endif
   7870  size_t numCopies4 = remainder / sizeof(uint32_t);
   7871  remainder %= sizeof(uint32_t);
   7872  size_t numCopies2 = remainder / sizeof(uint16_t);
   7873  remainder %= sizeof(uint16_t);
   7874  size_t numCopies1 = remainder;
   7875 
   7876  // Generate splatted definitions for wider fills as needed
   7877 #ifdef ENABLE_WASM_SIMD
   7878  MDefinition* val16 = numCopies16 ? constantV128(V128(value)) : nullptr;
   7879 #endif
   7880 #ifdef JS_64BIT
   7881  MDefinition* val8 =
   7882      numCopies8 ? constantI64(int64_t(SplatByteToUInt<uint64_t>(value, 8)))
   7883                 : nullptr;
   7884 #endif
   7885  MDefinition* val4 =
   7886      numCopies4 ? constantI32(int32_t(SplatByteToUInt<uint32_t>(value, 4)))
   7887                 : nullptr;
   7888  MDefinition* val2 =
   7889      numCopies2 ? constantI32(int32_t(SplatByteToUInt<uint32_t>(value, 2)))
   7890                 : nullptr;
   7891 
   7892  // Store the fill value to the destination from high to low. We will trap
   7893  // without writing anything on the first store if any dest byte is
   7894  // out-of-bounds.
   7895  size_t offset = length;
   7896 
   7897  if (numCopies1) {
   7898    offset -= sizeof(uint8_t);
   7899 
   7900    MemoryAccessDesc access(memoryIndex, Scalar::Uint8, 1, offset,
   7901                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7902    store(start, &access, val);
   7903  }
   7904 
   7905  if (numCopies2) {
   7906    offset -= sizeof(uint16_t);
   7907 
   7908    MemoryAccessDesc access(memoryIndex, Scalar::Uint16, 1, offset,
   7909                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7910    store(start, &access, val2);
   7911  }
   7912 
   7913  for (uint32_t i = 0; i < numCopies4; i++) {
   7914    offset -= sizeof(uint32_t);
   7915 
   7916    MemoryAccessDesc access(memoryIndex, Scalar::Uint32, 1, offset,
   7917                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7918    store(start, &access, val4);
   7919  }
   7920 
   7921 #ifdef JS_64BIT
   7922  for (uint32_t i = 0; i < numCopies8; i++) {
   7923    offset -= sizeof(uint64_t);
   7924 
   7925    MemoryAccessDesc access(memoryIndex, Scalar::Int64, 1, offset,
   7926                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7927    store(start, &access, val8);
   7928  }
   7929 #endif
   7930 
   7931 #ifdef ENABLE_WASM_SIMD
   7932  for (uint32_t i = 0; i < numCopies16; i++) {
   7933    offset -= sizeof(V128);
   7934 
   7935    MemoryAccessDesc access(memoryIndex, Scalar::Simd128, 1, offset,
   7936                            trapSiteDesc(), hugeMemoryEnabled(memoryIndex));
   7937    store(start, &access, val16);
   7938  }
   7939 #endif
   7940 
   7941  return true;
   7942 }
   7943 
   7944 bool FunctionCompiler::emitMemFill() {
   7945  uint32_t memoryIndex;
   7946  MDefinition *start, *val, *len;
   7947  if (!iter().readMemFill(&memoryIndex, &start, &val, &len)) {
   7948    return false;
   7949  }
   7950 
   7951  if (inDeadCode()) {
   7952    return true;
   7953  }
   7954 
   7955  if (len->isConstant() && val->isConstant()) {
   7956    uint64_t length = isMem32(memoryIndex) ? len->toConstant()->toInt32()
   7957                                           : len->toConstant()->toInt64();
   7958    static_assert(MaxInlineMemoryFillLength <= UINT32_MAX);
   7959    if (length != 0 && length <= MaxInlineMemoryFillLength) {
   7960      return emitMemFillInline(memoryIndex, start, val, uint32_t(length));
   7961    }
   7962  }
   7963 
   7964  return emitMemFillCall(memoryIndex, start, val, len);
   7965 }
   7966 
   7967 bool FunctionCompiler::emitMemInit() {
   7968  uint32_t segIndexVal = 0, dstMemIndex = 0;
   7969  MDefinition *dstOff, *srcOff, *len;
   7970  if (!iter().readMemOrTableInit(true, &segIndexVal, &dstMemIndex, &dstOff,
   7971                                 &srcOff, &len)) {
   7972    return false;
   7973  }
   7974 
   7975  if (inDeadCode()) {
   7976    return true;
   7977  }
   7978 
   7979  uint32_t bytecodeOffset = readBytecodeOffset();
   7980  const SymbolicAddressSignature& callee =
   7981      (isMem32(dstMemIndex) ? SASigMemInitM32 : SASigMemInitM64);
   7982 
   7983  MDefinition* segIndex = constantI32(int32_t(segIndexVal));
   7984  if (!segIndex) {
   7985    return false;
   7986  }
   7987 
   7988  MDefinition* dti = constantI32(int32_t(dstMemIndex));
   7989  if (!dti) {
   7990    return false;
   7991  }
   7992 
   7993  return emitInstanceCall5(bytecodeOffset, callee, dstOff, srcOff, len,
   7994                           segIndex, dti);
   7995 }
   7996 
   7997 bool FunctionCompiler::emitTableInit() {
   7998  uint32_t segIndexVal = 0, dstTableIndex = 0;
   7999  MDefinition *dstOff, *srcOff, *len;
   8000  if (!iter().readMemOrTableInit(false, &segIndexVal, &dstTableIndex, &dstOff,
   8001                                 &srcOff, &len)) {
   8002    return false;
   8003  }
   8004 
   8005  if (inDeadCode()) {
   8006    return true;
   8007  }
   8008 
   8009  uint32_t bytecodeOffset = readBytecodeOffset();
   8010  const TableDesc& table = codeMeta().tables[dstTableIndex];
   8011 
   8012  MDefinition* dstOff32 = clampTableAddressToI32(table.addressType(), dstOff);
   8013  if (!dstOff32) {
   8014    return false;
   8015  }
   8016 
   8017  MDefinition* segIndex = constantI32(int32_t(segIndexVal));
   8018  if (!segIndex) {
   8019    return false;
   8020  }
   8021 
   8022  MDefinition* dti = constantI32(int32_t(dstTableIndex));
   8023  if (!dti) {
   8024    return false;
   8025  }
   8026 
   8027  return emitInstanceCall5(bytecodeOffset, SASigTableInit, dstOff32, srcOff,
   8028                           len, segIndex, dti);
   8029 }
   8030 
   8031 bool FunctionCompiler::emitTableFill() {
   8032  uint32_t tableIndex;
   8033  MDefinition *start, *val, *len;
   8034  if (!iter().readTableFill(&tableIndex, &start, &val, &len)) {
   8035    return false;
   8036  }
   8037 
   8038  if (inDeadCode()) {
   8039    return true;
   8040  }
   8041 
   8042  uint32_t bytecodeOffset = readBytecodeOffset();
   8043  const TableDesc& table = codeMeta().tables[tableIndex];
   8044 
   8045  MDefinition* start32 = clampTableAddressToI32(table.addressType(), start);
   8046  if (!start32) {
   8047    return false;
   8048  }
   8049 
   8050  MDefinition* len32 = clampTableAddressToI32(table.addressType(), len);
   8051  if (!len32) {
   8052    return false;
   8053  }
   8054 
   8055  MDefinition* tableIndexArg = constantI32(int32_t(tableIndex));
   8056  if (!tableIndexArg) {
   8057    return false;
   8058  }
   8059 
   8060  return emitInstanceCall4(bytecodeOffset, SASigTableFill, start32, val, len32,
   8061                           tableIndexArg);
   8062 }
   8063 
   8064 #if ENABLE_WASM_MEMORY_CONTROL
   8065 bool FunctionCompiler::emitMemDiscard() {
   8066  uint32_t memoryIndex;
   8067  MDefinition *start, *len;
   8068  if (!iter().readMemDiscard(&memoryIndex, &start, &len)) {
   8069    return false;
   8070  }
   8071 
   8072  if (inDeadCode()) {
   8073    return true;
   8074  }
   8075 
   8076  uint32_t bytecodeOffset = readBytecodeOffset();
   8077 
   8078  MDefinition* base = memoryBase(memoryIndex);
   8079  bool mem32 = isMem32(memoryIndex);
   8080 
   8081  const SymbolicAddressSignature& callee =
   8082      (codeMeta().usesSharedMemory(memoryIndex)
   8083           ? (mem32 ? SASigMemDiscardSharedM32 : SASigMemDiscardSharedM64)
   8084           : (mem32 ? SASigMemDiscardM32 : SASigMemDiscardM64));
   8085  return emitInstanceCall3(bytecodeOffset, callee, start, len, base);
   8086 }
   8087 #endif
   8088 
   8089 bool FunctionCompiler::emitTableGet() {
   8090  uint32_t tableIndex;
   8091  MDefinition* address;
   8092  if (!iter().readTableGet(&tableIndex, &address)) {
   8093    return false;
   8094  }
   8095 
   8096  if (inDeadCode()) {
   8097    return true;
   8098  }
   8099 
   8100  const TableDesc& table = codeMeta().tables[tableIndex];
   8101 
   8102  if (table.elemType.tableRepr() == TableRepr::Ref) {
   8103    MDefinition* ret = tableGetAnyRef(tableIndex, address);
   8104    if (!ret) {
   8105      return false;
   8106    }
   8107    iter().setResult(ret);
   8108    return true;
   8109  }
   8110 
   8111  uint32_t bytecodeOffset = readBytecodeOffset();
   8112 
   8113  MDefinition* address32 = clampTableAddressToI32(table.addressType(), address);
   8114  if (!address32) {
   8115    return false;
   8116  }
   8117 
   8118  MDefinition* tableIndexArg = constantI32(int32_t(tableIndex));
   8119  if (!tableIndexArg) {
   8120    return false;
   8121  }
   8122 
   8123  // The return value here is either null, denoting an error, or a short-lived
   8124  // pointer to a location containing a possibly-null ref.
   8125  MDefinition* ret;
   8126  if (!emitInstanceCall2(bytecodeOffset, SASigTableGet, address32,
   8127                         tableIndexArg, &ret)) {
   8128    return false;
   8129  }
   8130 
   8131  iter().setResult(ret);
   8132  return true;
   8133 }
   8134 
   8135 bool FunctionCompiler::emitTableGrow() {
   8136  uint32_t tableIndex;
   8137  MDefinition* initValue;
   8138  MDefinition* delta;
   8139  if (!iter().readTableGrow(&tableIndex, &initValue, &delta)) {
   8140    return false;
   8141  }
   8142 
   8143  if (inDeadCode()) {
   8144    return true;
   8145  }
   8146 
   8147  uint32_t bytecodeOffset = readBytecodeOffset();
   8148  const TableDesc& table = codeMeta().tables[tableIndex];
   8149 
   8150  MDefinition* delta32 = clampTableAddressToI32(table.addressType(), delta);
   8151  if (!delta32) {
   8152    return false;
   8153  }
   8154 
   8155  MDefinition* tableIndexArg = constantI32(int32_t(tableIndex));
   8156  if (!tableIndexArg) {
   8157    return false;
   8158  }
   8159 
   8160  MDefinition* ret;
   8161  if (!emitInstanceCall3(bytecodeOffset, SASigTableGrow, initValue, delta32,
   8162                         tableIndexArg, &ret)) {
   8163    return false;
   8164  }
   8165 
   8166  if (table.addressType() == AddressType::I64) {
   8167    ret = extendI32(ret, false);
   8168    if (!ret) {
   8169      return false;
   8170    }
   8171  }
   8172 
   8173  iter().setResult(ret);
   8174  return true;
   8175 }
   8176 
   8177 bool FunctionCompiler::emitTableSet() {
   8178  uint32_t tableIndex;
   8179  MDefinition* address;
   8180  MDefinition* value;
   8181  if (!iter().readTableSet(&tableIndex, &address, &value)) {
   8182    return false;
   8183  }
   8184 
   8185  if (inDeadCode()) {
   8186    return true;
   8187  }
   8188 
   8189  uint32_t bytecodeOffset = readBytecodeOffset();
   8190 
   8191  const TableDesc& table = codeMeta().tables[tableIndex];
   8192 
   8193  if (table.elemType.tableRepr() == TableRepr::Ref) {
   8194    return tableSetAnyRef(tableIndex, address, value, bytecodeOffset);
   8195  }
   8196 
   8197  MDefinition* address32 = clampTableAddressToI32(table.addressType(), address);
   8198  if (!address32) {
   8199    return false;
   8200  }
   8201  MDefinition* tableIndexArg = constantI32(int32_t(tableIndex));
   8202  if (!tableIndexArg) {
   8203    return false;
   8204  }
   8205 
   8206  return emitInstanceCall3(bytecodeOffset, SASigTableSet, address32, value,
   8207                           tableIndexArg);
   8208 }
   8209 
   8210 bool FunctionCompiler::emitTableSize() {
   8211  uint32_t tableIndex;
   8212  if (!iter().readTableSize(&tableIndex)) {
   8213    return false;
   8214  }
   8215 
   8216  if (inDeadCode()) {
   8217    return true;
   8218  }
   8219 
   8220  MDefinition* length = loadTableLength(tableIndex);
   8221  if (!length) {
   8222    return false;
   8223  }
   8224 
   8225  iter().setResult(length);
   8226  return true;
   8227 }
   8228 
   8229 bool FunctionCompiler::emitRefFunc() {
   8230  uint32_t funcIndex;
   8231  if (!iter().readRefFunc(&funcIndex)) {
   8232    return false;
   8233  }
   8234 
   8235  if (inDeadCode()) {
   8236    return true;
   8237  }
   8238 
   8239  uint32_t bytecodeOffset = readBytecodeOffset();
   8240 
   8241  MDefinition* funcIndexArg = constantI32(int32_t(funcIndex));
   8242  if (!funcIndexArg) {
   8243    return false;
   8244  }
   8245 
   8246  // The return value here is either null, denoting an error, or a short-lived
   8247  // pointer to a location containing a possibly-null ref.
   8248  MDefinition* ret;
   8249  if (!emitInstanceCall1(bytecodeOffset, SASigRefFunc, funcIndexArg, &ret)) {
   8250    return false;
   8251  }
   8252 
   8253  iter().setResult(ret);
   8254  return true;
   8255 }
   8256 
   8257 bool FunctionCompiler::emitRefNull() {
   8258  RefType type;
   8259  if (!iter().readRefNull(&type)) {
   8260    return false;
   8261  }
   8262 
   8263  if (inDeadCode()) {
   8264    return true;
   8265  }
   8266 
   8267  MDefinition* nullVal = constantNullRef(MaybeRefType(type));
   8268  if (!nullVal) {
   8269    return false;
   8270  }
   8271  iter().setResult(nullVal);
   8272  return true;
   8273 }
   8274 
   8275 bool FunctionCompiler::emitRefIsNull() {
   8276  MDefinition* input;
   8277  if (!iter().readRefIsNull(&input)) {
   8278    return false;
   8279  }
   8280 
   8281  if (inDeadCode()) {
   8282    return true;
   8283  }
   8284 
   8285  MDefinition* nullVal = constantNullRef(MaybeRefType());
   8286  if (!nullVal) {
   8287    return false;
   8288  }
   8289  iter().setResult(
   8290      compare(input, nullVal, JSOp::Eq, MCompare::Compare_WasmAnyRef));
   8291  return true;
   8292 }
   8293 
   8294 #ifdef ENABLE_WASM_SIMD
   8295 bool FunctionCompiler::emitConstSimd128() {
   8296  V128 v128;
   8297  if (!iter().readV128Const(&v128)) {
   8298    return false;
   8299  }
   8300 
   8301  iter().setResult(constantV128(v128));
   8302  return true;
   8303 }
   8304 
   8305 bool FunctionCompiler::emitBinarySimd128(bool commutative, SimdOp op) {
   8306  MDefinition* lhs;
   8307  MDefinition* rhs;
   8308  if (!iter().readBinary(ValType::V128, &lhs, &rhs)) {
   8309    return false;
   8310  }
   8311 
   8312  iter().setResult(binarySimd128(lhs, rhs, commutative, op));
   8313  return true;
   8314 }
   8315 
   8316 bool FunctionCompiler::emitTernarySimd128(wasm::SimdOp op) {
   8317  MDefinition* v0;
   8318  MDefinition* v1;
   8319  MDefinition* v2;
   8320  if (!iter().readTernary(ValType::V128, &v0, &v1, &v2)) {
   8321    return false;
   8322  }
   8323 
   8324  iter().setResult(ternarySimd128(v0, v1, v2, op));
   8325  return true;
   8326 }
   8327 
   8328 bool FunctionCompiler::emitShiftSimd128(SimdOp op) {
   8329  MDefinition* lhs;
   8330  MDefinition* rhs;
   8331  if (!iter().readVectorShift(&lhs, &rhs)) {
   8332    return false;
   8333  }
   8334 
   8335  iter().setResult(shiftSimd128(lhs, rhs, op));
   8336  return true;
   8337 }
   8338 
   8339 bool FunctionCompiler::emitSplatSimd128(ValType inType, SimdOp op) {
   8340  MDefinition* src;
   8341  if (!iter().readConversion(inType, ValType::V128, &src)) {
   8342    return false;
   8343  }
   8344 
   8345  iter().setResult(scalarToSimd128(src, op));
   8346  return true;
   8347 }
   8348 
   8349 bool FunctionCompiler::emitUnarySimd128(SimdOp op) {
   8350  MDefinition* src;
   8351  if (!iter().readUnary(ValType::V128, &src)) {
   8352    return false;
   8353  }
   8354 
   8355  iter().setResult(unarySimd128(src, op));
   8356  return true;
   8357 }
   8358 
   8359 bool FunctionCompiler::emitReduceSimd128(SimdOp op) {
   8360  MDefinition* src;
   8361  if (!iter().readConversion(ValType::V128, ValType::I32, &src)) {
   8362    return false;
   8363  }
   8364 
   8365  iter().setResult(reduceSimd128(src, op, ValType::I32));
   8366  return true;
   8367 }
   8368 
   8369 bool FunctionCompiler::emitExtractLaneSimd128(ValType outType,
   8370                                              uint32_t laneLimit, SimdOp op) {
   8371  uint32_t laneIndex;
   8372  MDefinition* src;
   8373  if (!iter().readExtractLane(outType, laneLimit, &laneIndex, &src)) {
   8374    return false;
   8375  }
   8376 
   8377  iter().setResult(reduceSimd128(src, op, outType, laneIndex));
   8378  return true;
   8379 }
   8380 
   8381 bool FunctionCompiler::emitReplaceLaneSimd128(ValType laneType,
   8382                                              uint32_t laneLimit, SimdOp op) {
   8383  uint32_t laneIndex;
   8384  MDefinition* lhs;
   8385  MDefinition* rhs;
   8386  if (!iter().readReplaceLane(laneType, laneLimit, &laneIndex, &lhs, &rhs)) {
   8387    return false;
   8388  }
   8389 
   8390  iter().setResult(replaceLaneSimd128(lhs, rhs, laneIndex, op));
   8391  return true;
   8392 }
   8393 
   8394 bool FunctionCompiler::emitShuffleSimd128() {
   8395  MDefinition* v1;
   8396  MDefinition* v2;
   8397  V128 control;
   8398  if (!iter().readVectorShuffle(&v1, &v2, &control)) {
   8399    return false;
   8400  }
   8401 
   8402  iter().setResult(shuffleSimd128(v1, v2, control));
   8403  return true;
   8404 }
   8405 
   8406 bool FunctionCompiler::emitLoadSplatSimd128(Scalar::Type viewType,
   8407                                            wasm::SimdOp splatOp) {
   8408  LinearMemoryAddress<MDefinition*> addr;
   8409  if (!iter().readLoadSplat(Scalar::byteSize(viewType), &addr)) {
   8410    return false;
   8411  }
   8412 
   8413  auto* ins = loadSplatSimd128(viewType, addr, splatOp);
   8414  if (!inDeadCode() && !ins) {
   8415    return false;
   8416  }
   8417  iter().setResult(ins);
   8418  return true;
   8419 }
   8420 
   8421 bool FunctionCompiler::emitLoadExtendSimd128(wasm::SimdOp op) {
   8422  LinearMemoryAddress<MDefinition*> addr;
   8423  if (!iter().readLoadExtend(&addr)) {
   8424    return false;
   8425  }
   8426 
   8427  auto* ins = loadExtendSimd128(addr, op);
   8428  if (!inDeadCode() && !ins) {
   8429    return false;
   8430  }
   8431  iter().setResult(ins);
   8432  return true;
   8433 }
   8434 
   8435 bool FunctionCompiler::emitLoadZeroSimd128(Scalar::Type viewType,
   8436                                           size_t numBytes) {
   8437  LinearMemoryAddress<MDefinition*> addr;
   8438  if (!iter().readLoadSplat(numBytes, &addr)) {
   8439    return false;
   8440  }
   8441 
   8442  auto* ins = loadZeroSimd128(viewType, numBytes, addr);
   8443  if (!inDeadCode() && !ins) {
   8444    return false;
   8445  }
   8446  iter().setResult(ins);
   8447  return true;
   8448 }
   8449 
   8450 bool FunctionCompiler::emitLoadLaneSimd128(uint32_t laneSize) {
   8451  uint32_t laneIndex;
   8452  MDefinition* src;
   8453  LinearMemoryAddress<MDefinition*> addr;
   8454  if (!iter().readLoadLane(laneSize, &addr, &laneIndex, &src)) {
   8455    return false;
   8456  }
   8457 
   8458  auto* ins = loadLaneSimd128(laneSize, addr, laneIndex, src);
   8459  if (!inDeadCode() && !ins) {
   8460    return false;
   8461  }
   8462  iter().setResult(ins);
   8463  return true;
   8464 }
   8465 
   8466 bool FunctionCompiler::emitStoreLaneSimd128(uint32_t laneSize) {
   8467  uint32_t laneIndex;
   8468  MDefinition* src;
   8469  LinearMemoryAddress<MDefinition*> addr;
   8470  if (!iter().readStoreLane(laneSize, &addr, &laneIndex, &src)) {
   8471    return false;
   8472  }
   8473 
   8474  storeLaneSimd128(laneSize, addr, laneIndex, src);
   8475  return true;
   8476 }
   8477 
   8478 #endif  // ENABLE_WASM_SIMD
   8479 
   8480 bool FunctionCompiler::emitRefAsNonNull() {
   8481  MDefinition* ref;
   8482  if (!iter().readRefAsNonNull(&ref)) {
   8483    return false;
   8484  }
   8485 
   8486  if (inDeadCode()) {
   8487    return true;
   8488  }
   8489 
   8490  MDefinition* ins = refAsNonNull(ref);
   8491  if (!ins) {
   8492    return false;
   8493  }
   8494 
   8495  iter().setResult(ins);
   8496  return true;
   8497 }
   8498 
   8499 bool FunctionCompiler::emitBrOnNull() {
   8500  uint32_t relativeDepth;
   8501  ResultType type;
   8502  DefVector values;
   8503  MDefinition* condition;
   8504  if (!iter().readBrOnNull(&relativeDepth, &type, &values, &condition)) {
   8505    return false;
   8506  }
   8507 
   8508  return brOnNull(relativeDepth, values, type, condition);
   8509 }
   8510 
   8511 bool FunctionCompiler::emitBrOnNonNull() {
   8512  uint32_t relativeDepth;
   8513  ResultType type;
   8514  DefVector values;
   8515  MDefinition* condition;
   8516  if (!iter().readBrOnNonNull(&relativeDepth, &type, &values, &condition)) {
   8517    return false;
   8518  }
   8519 
   8520  return brOnNonNull(relativeDepth, values, type, condition);
   8521 }
   8522 
   8523 // Speculatively inline a call_refs that are likely to target the expected
   8524 // function index in this module. A fallback for if the actual callee is not
   8525 // any of the speculated expected callees is always generated. This leads to a
   8526 // control flow chain that is roughly:
   8527 //
   8528 // if (ref.func $expectedFuncIndex_1) == actualCalleeFunc:
   8529 //   (call_inline $expectedFuncIndex1)
   8530 // else if (ref.func $expectedFuncIndex_2) == actualCalleeFunc:
   8531 //   (call_inline $expectedFuncIndex2)
   8532 // ...
   8533 // else:
   8534 //   (call_ref actualCalleeFunc)
   8535 //
   8536 bool FunctionCompiler::emitSpeculativeInlineCallRef(
   8537    uint32_t bytecodeOffset, const FuncType& funcType,
   8538    CallRefHint expectedFuncIndices, MDefinition* actualCalleeFunc,
   8539    const DefVector& args, DefVector* results) {
   8540  // There must be at least one speculative target.
   8541  MOZ_ASSERT(!expectedFuncIndices.empty());
   8542 
   8543  // Perform an up front null check on the callee function reference.
   8544  actualCalleeFunc = refAsNonNull(actualCalleeFunc);
   8545  if (!actualCalleeFunc) {
   8546    return false;
   8547  }
   8548 
   8549  constexpr size_t numElseBlocks = CallRefHint::NUM_ENTRIES + 1;
   8550  Vector<MBasicBlock*, numElseBlocks, SystemAllocPolicy> elseBlocks;
   8551  if (!elseBlocks.reserve(numElseBlocks)) {
   8552    return false;
   8553  }
   8554 
   8555  for (uint32_t i = 0; i < expectedFuncIndices.length(); i++) {
   8556    uint32_t funcIndex = expectedFuncIndices.get(i);
   8557 
   8558    // Load the cached value of `ref.func $expectedFuncIndex` for comparing
   8559    // against `actualCalleeFunc`. This cached value may be null if the
   8560    // `ref.func` for the expected function has not been executed in this
   8561    // runtime session.
   8562    //
   8563    // This is okay because we have done a null check on the `actualCalleeFunc`
   8564    // already and so comparing it against a null expected callee func will
   8565    // return false and fall back to the general case. This can only happen if
   8566    // we've deserialized a cached module in a different session, and then run
   8567    // the code without ever acquiring a reference to the expected function. In
   8568    // that case, the expected callee could never be the target of this
   8569    // call_ref, so performing the fallback path is the right thing to do
   8570    // anyways.
   8571    MDefinition* expectedCalleeFunc = loadCachedRefFunc(funcIndex);
   8572    if (!expectedCalleeFunc) {
   8573      return false;
   8574    }
   8575 
   8576    // Check if the callee funcref we have is equals to the expected callee
   8577    // funcref we're inlining.
   8578    MDefinition* isExpectedCallee =
   8579        compare(actualCalleeFunc, expectedCalleeFunc, JSOp::Eq,
   8580                MCompare::Compare_WasmAnyRef);
   8581    if (!isExpectedCallee) {
   8582      return false;
   8583    }
   8584 
   8585    // Start a 'then' block, which will have the inlined code
   8586    MBasicBlock* elseBlock;
   8587    if (!branchAndStartThen(isExpectedCallee, &elseBlock)) {
   8588      return false;
   8589    }
   8590 
   8591    // Inline the expected callee as we do with direct calls
   8592    DefVector inlineResults;
   8593    if (!emitInlineCall(funcType, funcIndex,
   8594                        InliningHeuristics::CallKind::CallRef, args,
   8595                        &inlineResults)) {
   8596      return false;
   8597    }
   8598 
   8599    // Push the results for joining with the 'else' block
   8600    if (!pushDefs(inlineResults)) {
   8601      return false;
   8602    }
   8603 
   8604    // Switch to the 'else' block which will have, either the check for the
   8605    // next target, or the fallback `call_ref` if we're out of targets.
   8606    if (!switchToElse(elseBlock, &elseBlock)) {
   8607      return false;
   8608    }
   8609 
   8610    elseBlocks.infallibleAppend(elseBlock);
   8611  }
   8612 
   8613  DefVector callResults;
   8614  if (!callRef(funcType, actualCalleeFunc, bytecodeOffset, args,
   8615               &callResults)) {
   8616    return false;
   8617  }
   8618 
   8619  // Push the results for joining with the 'then' block
   8620  if (!pushDefs(callResults)) {
   8621    return false;
   8622  }
   8623 
   8624  // Join the various branches together
   8625  for (uint32_t i = elseBlocks.length() - 1; i != 0; i--) {
   8626    DefVector results;
   8627    if (!joinIfElse(elseBlocks[i], &results) || !pushDefs(results)) {
   8628      return false;
   8629    }
   8630  }
   8631  return joinIfElse(elseBlocks[0], results);
   8632 }
   8633 
   8634 bool FunctionCompiler::emitCallRef() {
   8635  uint32_t bytecodeOffset = readBytecodeOffset();
   8636  uint32_t funcTypeIndex;
   8637  MDefinition* callee;
   8638  DefVector args;
   8639 
   8640  if (!iter().readCallRef(&funcTypeIndex, &callee, &args)) {
   8641    return false;
   8642  }
   8643 
   8644  // We must unconditionally read a call_ref hint so that we stay in sync with
   8645  // how baseline generates them.
   8646  CallRefHint hint = readCallRefHint();
   8647 
   8648  if (inDeadCode()) {
   8649    return true;
   8650  }
   8651 
   8652  const FuncType& funcType = codeMeta().types->type(funcTypeIndex).funcType();
   8653 
   8654  // Ask the inlining heuristics which entries in `hint` we are allowed to
   8655  // inline.
   8656  CallRefHint approved =
   8657      auditInlineableCallees(InliningHeuristics::CallKind::CallRef, hint);
   8658  if (!approved.empty()) {
   8659    DefVector results;
   8660    if (!emitSpeculativeInlineCallRef(bytecodeOffset, funcType, approved,
   8661                                      callee, args, &results)) {
   8662      return false;
   8663    }
   8664    iter().setResults(results.length(), results);
   8665    return true;
   8666  }
   8667 
   8668  DefVector results;
   8669  if (!callRef(funcType, callee, bytecodeOffset, args, &results)) {
   8670    return false;
   8671  }
   8672 
   8673  iter().setResults(results.length(), results);
   8674  return true;
   8675 }
   8676 
   8677 bool FunctionCompiler::emitStructNew() {
   8678  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   8679 
   8680  uint32_t typeIndex;
   8681  DefVector args;
   8682  if (!iter().readStructNew(&typeIndex, &args)) {
   8683    return false;
   8684  }
   8685 
   8686  uint32_t allocSiteIndex = readAllocSiteIndex(typeIndex);
   8687 
   8688  if (inDeadCode()) {
   8689    return true;
   8690  }
   8691 
   8692  const TypeDef& typeDef = (*codeMeta().types)[typeIndex];
   8693  const StructType& structType = typeDef.structType();
   8694  MOZ_ASSERT(args.length() == structType.fields_.length());
   8695 
   8696  MDefinition* structObject =
   8697      createStructObject(typeIndex, allocSiteIndex, false);
   8698  if (!structObject) {
   8699    return false;
   8700  }
   8701 
   8702  // And fill in the fields.
   8703  for (uint32_t fieldIndex = 0; fieldIndex < structType.fields_.length();
   8704       fieldIndex++) {
   8705    if (!mirGen().ensureBallast()) {
   8706      return false;
   8707    }
   8708    if (!writeValueToStructField(lineOrBytecode, structType, fieldIndex,
   8709                                 structObject, args[fieldIndex],
   8710                                 WasmPreBarrierKind::None)) {
   8711      return false;
   8712    }
   8713  }
   8714 
   8715  iter().setResult(structObject);
   8716  return true;
   8717 }
   8718 
   8719 bool FunctionCompiler::emitStructNewDefault() {
   8720  uint32_t typeIndex;
   8721  if (!iter().readStructNewDefault(&typeIndex)) {
   8722    return false;
   8723  }
   8724 
   8725  uint32_t allocSiteIndex = readAllocSiteIndex(typeIndex);
   8726 
   8727  if (inDeadCode()) {
   8728    return true;
   8729  }
   8730 
   8731  MDefinition* structObject =
   8732      createStructObject(typeIndex, allocSiteIndex, true);
   8733  if (!structObject) {
   8734    return false;
   8735  }
   8736 
   8737  iter().setResult(structObject);
   8738  return true;
   8739 }
   8740 
   8741 bool FunctionCompiler::emitStructSet() {
   8742  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   8743 
   8744  uint32_t typeIndex;
   8745  uint32_t fieldIndex;
   8746  MDefinition* structObject;
   8747  MDefinition* value;
   8748  if (!iter().readStructSet(&typeIndex, &fieldIndex, &structObject, &value)) {
   8749    return false;
   8750  }
   8751 
   8752  if (inDeadCode()) {
   8753    return true;
   8754  }
   8755 
   8756  // Check for null is done at writeValueToStructField.
   8757 
   8758  // And fill in the field.
   8759  const StructType& structType = (*codeMeta().types)[typeIndex].structType();
   8760  return writeValueToStructField(lineOrBytecode, structType, fieldIndex,
   8761                                 structObject, value,
   8762                                 WasmPreBarrierKind::Normal);
   8763 }
   8764 
   8765 bool FunctionCompiler::emitStructGet(FieldWideningOp wideningOp) {
   8766  uint32_t typeIndex;
   8767  uint32_t fieldIndex;
   8768  MDefinition* structObject;
   8769  if (!iter().readStructGet(&typeIndex, &fieldIndex, wideningOp,
   8770                            &structObject)) {
   8771    return false;
   8772  }
   8773 
   8774  if (inDeadCode()) {
   8775    return true;
   8776  }
   8777 
   8778  // Check for null is done at readValueFromStructField.
   8779 
   8780  // And fetch the data.
   8781  const StructType& structType = (*codeMeta().types)[typeIndex].structType();
   8782  MDefinition* load = readValueFromStructField(structType, fieldIndex,
   8783                                               wideningOp, structObject);
   8784  if (!load) {
   8785    return false;
   8786  }
   8787 
   8788  iter().setResult(load);
   8789  return true;
   8790 }
   8791 
   8792 bool FunctionCompiler::emitArrayNew() {
   8793  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   8794 
   8795  uint32_t typeIndex;
   8796  MDefinition* numElements;
   8797  MDefinition* fillValue;
   8798  if (!iter().readArrayNew(&typeIndex, &numElements, &fillValue)) {
   8799    return false;
   8800  }
   8801 
   8802  uint32_t allocSiteIndex = readAllocSiteIndex(typeIndex);
   8803 
   8804  if (inDeadCode()) {
   8805    return true;
   8806  }
   8807 
   8808  // Create the array object, uninitialized.
   8809  MDefinition* arrayObject =
   8810      createArrayObject(typeIndex, allocSiteIndex, numElements,
   8811                        /*zeroFields=*/false);
   8812  if (!arrayObject) {
   8813    return false;
   8814  }
   8815 
   8816  const ArrayType& arrayType = (*codeMeta().types)[typeIndex].arrayType();
   8817  if (!fillArray(lineOrBytecode, arrayType, arrayObject, constantI32(0),
   8818                 numElements, fillValue, WasmPreBarrierKind::None,
   8819                 WasmPostBarrierKind::None)) {
   8820    return false;
   8821  }
   8822 
   8823  if (arrayType.elementType().isRefRepr()) {
   8824    // Emit one whole-cell post barrier for the whole array, since there is just
   8825    // one object and one value.
   8826    if (!postBarrierWholeCell(lineOrBytecode, arrayObject, fillValue)) {
   8827      return false;
   8828    }
   8829  }
   8830 
   8831  iter().setResult(arrayObject);
   8832  return true;
   8833 }
   8834 
   8835 bool FunctionCompiler::emitArrayNewDefault() {
   8836  // This is almost identical to EmitArrayNew, except we skip the
   8837  // initialisation loop.
   8838  uint32_t typeIndex;
   8839  MDefinition* numElements;
   8840  if (!iter().readArrayNewDefault(&typeIndex, &numElements)) {
   8841    return false;
   8842  }
   8843 
   8844  uint32_t allocSiteIndex = readAllocSiteIndex(typeIndex);
   8845 
   8846  if (inDeadCode()) {
   8847    return true;
   8848  }
   8849 
   8850  // Create the array object, default-initialized.
   8851  MDefinition* arrayObject =
   8852      createArrayObject(typeIndex, allocSiteIndex, numElements,
   8853                        /*zeroFields=*/true);
   8854  if (!arrayObject) {
   8855    return false;
   8856  }
   8857 
   8858  iter().setResult(arrayObject);
   8859  return true;
   8860 }
   8861 
   8862 bool FunctionCompiler::emitArrayNewFixed() {
   8863  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   8864 
   8865  uint32_t typeIndex, numElements;
   8866  DefVector values;
   8867 
   8868  if (!iter().readArrayNewFixed(&typeIndex, &numElements, &values)) {
   8869    return false;
   8870  }
   8871  MOZ_ASSERT(values.length() == numElements);
   8872 
   8873  uint32_t allocSiteIndex = readAllocSiteIndex(typeIndex);
   8874 
   8875  if (inDeadCode()) {
   8876    return true;
   8877  }
   8878 
   8879  MDefinition* numElementsDef = constantI32(int32_t(numElements));
   8880  if (!numElementsDef) {
   8881    return false;
   8882  }
   8883 
   8884  // Create the array object, uninitialized.
   8885  const ArrayType& arrayType = (*codeMeta().types)[typeIndex].arrayType();
   8886  StorageType elemType = arrayType.elementType();
   8887  uint32_t elemSize = elemType.size();
   8888  MDefinition* arrayObject =
   8889      createArrayObject(typeIndex, allocSiteIndex, numElementsDef,
   8890                        /*zeroFields=*/false);
   8891  if (!arrayObject) {
   8892    return false;
   8893  }
   8894 
   8895  // Make `base` point at the first byte of the (OOL) data area.
   8896  MDefinition* base = getWasmArrayObjectData(arrayObject);
   8897  if (!base) {
   8898    return false;
   8899  }
   8900 
   8901  // Write each element in turn.
   8902 
   8903  // How do we know that the offset expression `i * elemSize` below remains
   8904  // within 2^31 (signed-i32) range?  In the worst case we will have 16-byte
   8905  // values, and there can be at most MaxFunctionBytes expressions, if it were
   8906  // theoretically possible to generate one expression per instruction byte.
   8907  // Hence the max offset we can be expected to generate is
   8908  // `16 * MaxFunctionBytes`.
   8909  static_assert(16 /* sizeof v128 */ * MaxFunctionBytes <=
   8910                MaxArrayPayloadBytes);
   8911  MOZ_RELEASE_ASSERT(numElements <= MaxFunctionBytes);
   8912 
   8913  for (uint32_t i = 0; i < numElements; i++) {
   8914    if (!mirGen().ensureBallast()) {
   8915      return false;
   8916    }
   8917    // `i * elemSize` is made safe by the assertions above.
   8918    if (!writeGcValueAtBasePlusOffset(
   8919            lineOrBytecode, elemType, arrayObject, AliasSet::WasmArrayDataArea,
   8920            values[numElements - 1 - i], base, i * elemSize, i, false,
   8921            WasmPreBarrierKind::None, WasmPostBarrierKind::WholeCell)) {
   8922      return false;
   8923    }
   8924  }
   8925 
   8926  iter().setResult(arrayObject);
   8927  return true;
   8928 }
   8929 
   8930 bool FunctionCompiler::emitArrayNewData() {
   8931  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   8932 
   8933  uint32_t typeIndex, segIndex;
   8934  MDefinition* segByteOffset;
   8935  MDefinition* numElements;
   8936  if (!iter().readArrayNewData(&typeIndex, &segIndex, &segByteOffset,
   8937                               &numElements)) {
   8938    return false;
   8939  }
   8940 
   8941  uint32_t allocSiteIndex = readAllocSiteIndex(typeIndex);
   8942 
   8943  if (inDeadCode()) {
   8944    return true;
   8945  }
   8946 
   8947  MDefinition* typeIndexValue = constantI32(int32_t(typeIndex));
   8948  if (!typeIndexValue) {
   8949    return false;
   8950  }
   8951 
   8952  MDefinition* allocSite = loadAllocSiteInstanceData(allocSiteIndex);
   8953  if (!allocSite) {
   8954    return false;
   8955  }
   8956 
   8957  // Other values we need to pass to the instance call:
   8958  MDefinition* segIndexM = constantI32(int32_t(segIndex));
   8959  if (!segIndexM) {
   8960    return false;
   8961  }
   8962 
   8963  // Create call:
   8964  // arrayObject = Instance::arrayNewData(segByteOffset:u32, numElements:u32,
   8965  //                                      typeDefData:word, segIndex:u32)
   8966  // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
   8967  // this call will trap.
   8968  MDefinition* arrayObject;
   8969  if (!emitInstanceCall5(lineOrBytecode, SASigArrayNewData, segByteOffset,
   8970                         numElements, typeIndexValue, allocSite, segIndexM,
   8971                         &arrayObject)) {
   8972    return false;
   8973  }
   8974 
   8975  iter().setResult(arrayObject);
   8976  return true;
   8977 }
   8978 
   8979 bool FunctionCompiler::emitArrayNewElem() {
   8980  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   8981 
   8982  uint32_t typeIndex, segIndex;
   8983  MDefinition* segElemIndex;
   8984  MDefinition* numElements;
   8985  if (!iter().readArrayNewElem(&typeIndex, &segIndex, &segElemIndex,
   8986                               &numElements)) {
   8987    return false;
   8988  }
   8989 
   8990  uint32_t allocSiteIndex = readAllocSiteIndex(typeIndex);
   8991 
   8992  if (inDeadCode()) {
   8993    return true;
   8994  }
   8995 
   8996  MDefinition* typeIndexValue = constantI32(int32_t(typeIndex));
   8997  if (!typeIndexValue) {
   8998    return false;
   8999  }
   9000 
   9001  MDefinition* allocSite = loadAllocSiteInstanceData(allocSiteIndex);
   9002  if (!allocSite) {
   9003    return false;
   9004  }
   9005 
   9006  // Other values we need to pass to the instance call:
   9007  MDefinition* segIndexM = constantI32(int32_t(segIndex));
   9008  if (!segIndexM) {
   9009    return false;
   9010  }
   9011 
   9012  // Create call:
   9013  // arrayObject = Instance::arrayNewElem(segElemIndex:u32, numElements:u32,
   9014  //                                      typeDefData:word, segIndex:u32)
   9015  // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
   9016  // this call will trap.
   9017  MDefinition* arrayObject;
   9018  if (!emitInstanceCall5(lineOrBytecode, SASigArrayNewElem, segElemIndex,
   9019                         numElements, typeIndexValue, allocSite, segIndexM,
   9020                         &arrayObject)) {
   9021    return false;
   9022  }
   9023 
   9024  iter().setResult(arrayObject);
   9025  return true;
   9026 }
   9027 
   9028 bool FunctionCompiler::emitArrayInitData() {
   9029  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   9030 
   9031  uint32_t unusedTypeIndex, segIndex;
   9032  MDefinition* array;
   9033  MDefinition* arrayIndex;
   9034  MDefinition* segOffset;
   9035  MDefinition* length;
   9036  if (!iter().readArrayInitData(&unusedTypeIndex, &segIndex, &array,
   9037                                &arrayIndex, &segOffset, &length)) {
   9038    return false;
   9039  }
   9040 
   9041  if (inDeadCode()) {
   9042    return true;
   9043  }
   9044 
   9045  // Other values we need to pass to the instance call:
   9046  MDefinition* segIndexM = constantI32(int32_t(segIndex));
   9047  if (!segIndexM) {
   9048    return false;
   9049  }
   9050 
   9051  // Create call:
   9052  // Instance::arrayInitData(array:word, index:u32, segByteOffset:u32,
   9053  // numElements:u32, segIndex:u32) If the requested size exceeds
   9054  // MaxArrayPayloadBytes, the MIR generated by this call will trap.
   9055  return emitInstanceCall5(lineOrBytecode, SASigArrayInitData, array,
   9056                           arrayIndex, segOffset, length, segIndexM);
   9057 }
   9058 
   9059 bool FunctionCompiler::emitArrayInitElem() {
   9060  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   9061 
   9062  uint32_t typeIndex, segIndex;
   9063  MDefinition* array;
   9064  MDefinition* arrayIndex;
   9065  MDefinition* segOffset;
   9066  MDefinition* length;
   9067  if (!iter().readArrayInitElem(&typeIndex, &segIndex, &array, &arrayIndex,
   9068                                &segOffset, &length)) {
   9069    return false;
   9070  }
   9071 
   9072  if (inDeadCode()) {
   9073    return true;
   9074  }
   9075 
   9076  MDefinition* typeIndexValue = constantI32(int32_t(typeIndex));
   9077  if (!typeIndexValue) {
   9078    return false;
   9079  }
   9080 
   9081  // Other values we need to pass to the instance call:
   9082  MDefinition* segIndexM = constantI32(int32_t(segIndex));
   9083  if (!segIndexM) {
   9084    return false;
   9085  }
   9086 
   9087  // Create call:
   9088  // Instance::arrayInitElem(array:word, index:u32, segByteOffset:u32,
   9089  // numElements:u32, typeDefData:word, segIndex:u32) If the requested size
   9090  // exceeds MaxArrayPayloadBytes, the MIR generated by this call will trap.
   9091  return emitInstanceCall6(lineOrBytecode, SASigArrayInitElem, array,
   9092                           arrayIndex, segOffset, length, typeIndexValue,
   9093                           segIndexM);
   9094 }
   9095 
   9096 bool FunctionCompiler::emitArraySet() {
   9097  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   9098 
   9099  uint32_t typeIndex;
   9100  MDefinition* value;
   9101  MDefinition* index;
   9102  MDefinition* arrayObject;
   9103  if (!iter().readArraySet(&typeIndex, &value, &index, &arrayObject)) {
   9104    return false;
   9105  }
   9106 
   9107  if (inDeadCode()) {
   9108    return true;
   9109  }
   9110 
   9111  // Check for null is done at setupForArrayAccess.
   9112 
   9113  // Create the object null check and the array bounds check and get the OOL
   9114  // data pointer.
   9115  MDefinition* base = setupForArrayAccess(arrayObject, index);
   9116  if (!base) {
   9117    return false;
   9118  }
   9119 
   9120  // And do the store.
   9121  const ArrayType& arrayType = (*codeMeta().types)[typeIndex].arrayType();
   9122  StorageType elemType = arrayType.elementType();
   9123  uint32_t elemSize = elemType.size();
   9124  MOZ_ASSERT(elemSize >= 1 && elemSize <= 16);
   9125 
   9126  return writeGcValueAtBasePlusScaledIndex(
   9127      lineOrBytecode, elemType, arrayObject, AliasSet::WasmArrayDataArea, value,
   9128      base, elemSize, index, WasmPreBarrierKind::Normal,
   9129      WasmPostBarrierKind::Edge);
   9130 }
   9131 
   9132 bool FunctionCompiler::emitArrayGet(FieldWideningOp wideningOp) {
   9133  uint32_t typeIndex;
   9134  MDefinition* index;
   9135  MDefinition* arrayObject;
   9136  if (!iter().readArrayGet(&typeIndex, wideningOp, &index, &arrayObject)) {
   9137    return false;
   9138  }
   9139 
   9140  if (inDeadCode()) {
   9141    return true;
   9142  }
   9143 
   9144  // Check for null is done at setupForArrayAccess.
   9145 
   9146  // Create the object null check and the array bounds check and get the data
   9147  // pointer.
   9148  MDefinition* base = setupForArrayAccess(arrayObject, index);
   9149  if (!base) {
   9150    return false;
   9151  }
   9152 
   9153  // And do the load.
   9154  const ArrayType& arrayType = (*codeMeta().types)[typeIndex].arrayType();
   9155  StorageType elemType = arrayType.elementType();
   9156 
   9157  MDefinition* load =
   9158      readGcArrayValueAtIndex(elemType, wideningOp, arrayObject,
   9159                              AliasSet::WasmArrayDataArea, base, index);
   9160  if (!load) {
   9161    return false;
   9162  }
   9163 
   9164  iter().setResult(load);
   9165  return true;
   9166 }
   9167 
   9168 bool FunctionCompiler::emitArrayLen() {
   9169  MDefinition* arrayObject;
   9170  if (!iter().readArrayLen(&arrayObject)) {
   9171    return false;
   9172  }
   9173 
   9174  if (inDeadCode()) {
   9175    return true;
   9176  }
   9177 
   9178  // Check for null is done at getWasmArrayObjectNumElements.
   9179 
   9180  // Get the size value for the array
   9181  MDefinition* numElements = getWasmArrayObjectNumElements(arrayObject);
   9182  if (!numElements) {
   9183    return false;
   9184  }
   9185 
   9186  iter().setResult(numElements);
   9187  return true;
   9188 }
   9189 
   9190 bool FunctionCompiler::emitArrayCopy() {
   9191  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   9192 
   9193  uint32_t dstArrayTypeIndex;
   9194  uint32_t srcArrayTypeIndex;
   9195  MDefinition* dstArrayObject;
   9196  MDefinition* dstArrayIndex;
   9197  MDefinition* srcArrayObject;
   9198  MDefinition* srcArrayIndex;
   9199  MDefinition* numElements;
   9200  if (!iter().readArrayCopy(&dstArrayTypeIndex, &srcArrayTypeIndex,
   9201                            &dstArrayObject, &dstArrayIndex, &srcArrayObject,
   9202                            &srcArrayIndex, &numElements)) {
   9203    return false;
   9204  }
   9205 
   9206  if (inDeadCode()) {
   9207    return true;
   9208  }
   9209 
   9210  const ArrayType& dstArrayType =
   9211      codeMeta().types->type(dstArrayTypeIndex).arrayType();
   9212  StorageType dstElemType = dstArrayType.elementType();
   9213  int32_t elemSize = int32_t(dstElemType.size());
   9214  bool elemsAreRefTyped = dstElemType.isRefType();
   9215 
   9216  return createArrayCopy(lineOrBytecode, dstArrayObject, dstArrayIndex,
   9217                         srcArrayObject, srcArrayIndex, numElements, elemSize,
   9218                         elemsAreRefTyped);
   9219 }
   9220 
   9221 bool FunctionCompiler::emitArrayFill() {
   9222  uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
   9223 
   9224  uint32_t typeIndex;
   9225  MDefinition* array;
   9226  MDefinition* index;
   9227  MDefinition* val;
   9228  MDefinition* numElements;
   9229  if (!iter().readArrayFill(&typeIndex, &array, &index, &val, &numElements)) {
   9230    return false;
   9231  }
   9232 
   9233  if (inDeadCode()) {
   9234    return true;
   9235  }
   9236 
   9237  return createArrayFill(lineOrBytecode, typeIndex, array, index, val,
   9238                         numElements);
   9239 }
   9240 
   9241 bool FunctionCompiler::emitRefI31() {
   9242  MDefinition* input;
   9243  if (!iter().readConversion(ValType::I32,
   9244                             ValType(RefType::i31().asNonNullable()), &input)) {
   9245    return false;
   9246  }
   9247 
   9248  if (inDeadCode()) {
   9249    return true;
   9250  }
   9251 
   9252  MDefinition* output = refI31(input);
   9253  if (!output) {
   9254    return false;
   9255  }
   9256  iter().setResult(output);
   9257  return true;
   9258 }
   9259 
   9260 bool FunctionCompiler::emitI31Get(FieldWideningOp wideningOp) {
   9261  MOZ_ASSERT(wideningOp != FieldWideningOp::None);
   9262 
   9263  MDefinition* input;
   9264  if (!iter().readConversion(ValType(RefType::i31()), ValType::I32, &input)) {
   9265    return false;
   9266  }
   9267 
   9268  if (inDeadCode()) {
   9269    return true;
   9270  }
   9271 
   9272  input = refAsNonNull(input);
   9273  if (!input) {
   9274    return false;
   9275  }
   9276  MDefinition* output = i31Get(input, wideningOp);
   9277  if (!output) {
   9278    return false;
   9279  }
   9280  iter().setResult(output);
   9281  return true;
   9282 }
   9283 
   9284 bool FunctionCompiler::emitRefTest(bool nullable) {
   9285  MDefinition* ref;
   9286  RefType sourceType;
   9287  RefType destType;
   9288  if (!iter().readRefTest(nullable, &sourceType, &destType, &ref)) {
   9289    return false;
   9290  }
   9291 
   9292  if (inDeadCode()) {
   9293    return true;
   9294  }
   9295 
   9296  MDefinition* success = refTest(ref, destType);
   9297  if (!success) {
   9298    return false;
   9299  }
   9300 
   9301  iter().setResult(success);
   9302  return true;
   9303 }
   9304 
   9305 bool FunctionCompiler::emitRefCast(bool nullable) {
   9306  MDefinition* ref;
   9307  RefType sourceType;
   9308  RefType destType;
   9309  if (!iter().readRefCast(nullable, &sourceType, &destType, &ref)) {
   9310    return false;
   9311  }
   9312 
   9313  if (inDeadCode()) {
   9314    return true;
   9315  }
   9316 
   9317  MDefinition* castedRef = refCast(ref, destType);
   9318  if (!castedRef) {
   9319    return false;
   9320  }
   9321 
   9322  iter().setResult(castedRef);
   9323  return true;
   9324 }
   9325 
   9326 bool FunctionCompiler::emitBrOnCast(bool onSuccess) {
   9327  uint32_t labelRelativeDepth;
   9328  RefType sourceType;
   9329  RefType destType;
   9330  ResultType labelType;
   9331  DefVector values;
   9332  if (!iter().readBrOnCast(onSuccess, &labelRelativeDepth, &sourceType,
   9333                           &destType, &labelType, &values)) {
   9334    return false;
   9335  }
   9336 
   9337  return brOnCastCommon(onSuccess, labelRelativeDepth, sourceType, destType,
   9338                        labelType, values);
   9339 }
   9340 
   9341 bool FunctionCompiler::emitAnyConvertExtern() {
   9342  MDefinition* ref;
   9343  if (!iter().readRefConversion(RefType::extern_(), RefType::any(), &ref)) {
   9344    return false;
   9345  }
   9346 
   9347  if (inDeadCode()) {
   9348    return true;
   9349  }
   9350 
   9351  MDefinition* conversion = convertAnyExtern(ref, wasm::RefType::Kind::Any);
   9352  if (!conversion) {
   9353    return false;
   9354  }
   9355 
   9356  iter().setResult(conversion);
   9357  return true;
   9358 }
   9359 
   9360 bool FunctionCompiler::emitExternConvertAny() {
   9361  MDefinition* ref;
   9362  if (!iter().readRefConversion(RefType::any(), RefType::extern_(), &ref)) {
   9363    return false;
   9364  }
   9365 
   9366  if (inDeadCode()) {
   9367    return true;
   9368  }
   9369 
   9370  MDefinition* conversion = convertAnyExtern(ref, wasm::RefType::Kind::Extern);
   9371  if (!conversion) {
   9372    return false;
   9373  }
   9374 
   9375  iter().setResult(conversion);
   9376  return true;
   9377 }
   9378 
   9379 bool FunctionCompiler::emitCallBuiltinModuleFunc() {
   9380  const BuiltinModuleFunc* builtinModuleFunc;
   9381 
   9382  DefVector params;
   9383  if (!iter().readCallBuiltinModuleFunc(&builtinModuleFunc, &params)) {
   9384    return false;
   9385  }
   9386 
   9387  return callBuiltinModuleFunc(*builtinModuleFunc, params);
   9388 }
   9389 
   9390 bool FunctionCompiler::emitBodyExprs() {
   9391  if (!iter().startFunction(funcIndex())) {
   9392    return false;
   9393  }
   9394 
   9395 #define CHECK(c)          \
   9396  if (!(c)) return false; \
   9397  break
   9398 
   9399  while (true) {
   9400    if (!mirGen().ensureBallast()) {
   9401      return false;
   9402    }
   9403 
   9404    OpBytes op;
   9405    if (!iter().readOp(&op)) {
   9406      return false;
   9407    }
   9408 
   9409    switch (op.b0) {
   9410      case uint16_t(Op::End):
   9411        if (!emitEnd()) {
   9412          return false;
   9413        }
   9414        if (iter().controlStackEmpty()) {
   9415          return true;
   9416        }
   9417        break;
   9418 
   9419      // Control opcodes
   9420      case uint16_t(Op::Unreachable):
   9421        CHECK(emitUnreachable());
   9422      case uint16_t(Op::Nop):
   9423        CHECK(iter().readNop());
   9424      case uint16_t(Op::Block):
   9425        CHECK(emitBlock());
   9426      case uint16_t(Op::Loop):
   9427        CHECK(emitLoop());
   9428      case uint16_t(Op::If):
   9429        CHECK(emitIf());
   9430      case uint16_t(Op::Else):
   9431        CHECK(emitElse());
   9432      case uint16_t(Op::Try):
   9433        CHECK(emitTry());
   9434      case uint16_t(Op::Catch):
   9435        CHECK(emitCatch());
   9436      case uint16_t(Op::CatchAll):
   9437        CHECK(emitCatchAll());
   9438      case uint16_t(Op::Delegate):
   9439        CHECK(emitDelegate());
   9440      case uint16_t(Op::Throw):
   9441        CHECK(emitThrow());
   9442      case uint16_t(Op::Rethrow):
   9443        CHECK(emitRethrow());
   9444      case uint16_t(Op::ThrowRef):
   9445        CHECK(emitThrowRef());
   9446      case uint16_t(Op::TryTable):
   9447        CHECK(emitTryTable());
   9448      case uint16_t(Op::Br):
   9449        CHECK(emitBr());
   9450      case uint16_t(Op::BrIf):
   9451        CHECK(emitBrIf());
   9452      case uint16_t(Op::BrTable):
   9453        CHECK(emitBrTable());
   9454      case uint16_t(Op::Return):
   9455        CHECK(emitReturn());
   9456 
   9457      // Calls
   9458      case uint16_t(Op::Call):
   9459        CHECK(emitCall(/* asmJSFuncDef = */ false));
   9460      case uint16_t(Op::CallIndirect):
   9461        CHECK(emitCallIndirect(/* oldStyle = */ false));
   9462 
   9463      // Parametric operators
   9464      case uint16_t(Op::Drop):
   9465        CHECK(iter().readDrop());
   9466      case uint16_t(Op::SelectNumeric):
   9467        CHECK(emitSelect(/*typed*/ false));
   9468      case uint16_t(Op::SelectTyped):
   9469        CHECK(emitSelect(/*typed*/ true));
   9470 
   9471      // Locals and globals
   9472      case uint16_t(Op::LocalGet):
   9473        CHECK(emitGetLocal());
   9474      case uint16_t(Op::LocalSet):
   9475        CHECK(emitSetLocal());
   9476      case uint16_t(Op::LocalTee):
   9477        CHECK(emitTeeLocal());
   9478      case uint16_t(Op::GlobalGet):
   9479        CHECK(emitGetGlobal());
   9480      case uint16_t(Op::GlobalSet):
   9481        CHECK(emitSetGlobal());
   9482      case uint16_t(Op::TableGet):
   9483        CHECK(emitTableGet());
   9484      case uint16_t(Op::TableSet):
   9485        CHECK(emitTableSet());
   9486 
   9487      // Memory-related operators
   9488      case uint16_t(Op::I32Load):
   9489        CHECK(emitLoad(ValType::I32, Scalar::Int32));
   9490      case uint16_t(Op::I64Load):
   9491        CHECK(emitLoad(ValType::I64, Scalar::Int64));
   9492      case uint16_t(Op::F32Load):
   9493        CHECK(emitLoad(ValType::F32, Scalar::Float32));
   9494      case uint16_t(Op::F64Load):
   9495        CHECK(emitLoad(ValType::F64, Scalar::Float64));
   9496      case uint16_t(Op::I32Load8S):
   9497        CHECK(emitLoad(ValType::I32, Scalar::Int8));
   9498      case uint16_t(Op::I32Load8U):
   9499        CHECK(emitLoad(ValType::I32, Scalar::Uint8));
   9500      case uint16_t(Op::I32Load16S):
   9501        CHECK(emitLoad(ValType::I32, Scalar::Int16));
   9502      case uint16_t(Op::I32Load16U):
   9503        CHECK(emitLoad(ValType::I32, Scalar::Uint16));
   9504      case uint16_t(Op::I64Load8S):
   9505        CHECK(emitLoad(ValType::I64, Scalar::Int8));
   9506      case uint16_t(Op::I64Load8U):
   9507        CHECK(emitLoad(ValType::I64, Scalar::Uint8));
   9508      case uint16_t(Op::I64Load16S):
   9509        CHECK(emitLoad(ValType::I64, Scalar::Int16));
   9510      case uint16_t(Op::I64Load16U):
   9511        CHECK(emitLoad(ValType::I64, Scalar::Uint16));
   9512      case uint16_t(Op::I64Load32S):
   9513        CHECK(emitLoad(ValType::I64, Scalar::Int32));
   9514      case uint16_t(Op::I64Load32U):
   9515        CHECK(emitLoad(ValType::I64, Scalar::Uint32));
   9516      case uint16_t(Op::I32Store):
   9517        CHECK(emitStore(ValType::I32, Scalar::Int32));
   9518      case uint16_t(Op::I64Store):
   9519        CHECK(emitStore(ValType::I64, Scalar::Int64));
   9520      case uint16_t(Op::F32Store):
   9521        CHECK(emitStore(ValType::F32, Scalar::Float32));
   9522      case uint16_t(Op::F64Store):
   9523        CHECK(emitStore(ValType::F64, Scalar::Float64));
   9524      case uint16_t(Op::I32Store8):
   9525        CHECK(emitStore(ValType::I32, Scalar::Int8));
   9526      case uint16_t(Op::I32Store16):
   9527        CHECK(emitStore(ValType::I32, Scalar::Int16));
   9528      case uint16_t(Op::I64Store8):
   9529        CHECK(emitStore(ValType::I64, Scalar::Int8));
   9530      case uint16_t(Op::I64Store16):
   9531        CHECK(emitStore(ValType::I64, Scalar::Int16));
   9532      case uint16_t(Op::I64Store32):
   9533        CHECK(emitStore(ValType::I64, Scalar::Int32));
   9534      case uint16_t(Op::MemorySize):
   9535        CHECK(emitMemorySize());
   9536      case uint16_t(Op::MemoryGrow):
   9537        CHECK(emitMemoryGrow());
   9538 
   9539      // Constants
   9540      case uint16_t(Op::I32Const):
   9541        CHECK(emitI32Const());
   9542      case uint16_t(Op::I64Const):
   9543        CHECK(emitI64Const());
   9544      case uint16_t(Op::F32Const):
   9545        CHECK(emitF32Const());
   9546      case uint16_t(Op::F64Const):
   9547        CHECK(emitF64Const());
   9548 
   9549      // Comparison operators
   9550      case uint16_t(Op::I32Eqz):
   9551        CHECK(emitConversion<MNot>(ValType::I32, ValType::I32));
   9552      case uint16_t(Op::I32Eq):
   9553        CHECK(emitComparison(ValType::I32, JSOp::Eq, MCompare::Compare_Int32));
   9554      case uint16_t(Op::I32Ne):
   9555        CHECK(emitComparison(ValType::I32, JSOp::Ne, MCompare::Compare_Int32));
   9556      case uint16_t(Op::I32LtS):
   9557        CHECK(emitComparison(ValType::I32, JSOp::Lt, MCompare::Compare_Int32));
   9558      case uint16_t(Op::I32LtU):
   9559        CHECK(emitComparison(ValType::I32, JSOp::Lt, MCompare::Compare_UInt32));
   9560      case uint16_t(Op::I32GtS):
   9561        CHECK(emitComparison(ValType::I32, JSOp::Gt, MCompare::Compare_Int32));
   9562      case uint16_t(Op::I32GtU):
   9563        CHECK(emitComparison(ValType::I32, JSOp::Gt, MCompare::Compare_UInt32));
   9564      case uint16_t(Op::I32LeS):
   9565        CHECK(emitComparison(ValType::I32, JSOp::Le, MCompare::Compare_Int32));
   9566      case uint16_t(Op::I32LeU):
   9567        CHECK(emitComparison(ValType::I32, JSOp::Le, MCompare::Compare_UInt32));
   9568      case uint16_t(Op::I32GeS):
   9569        CHECK(emitComparison(ValType::I32, JSOp::Ge, MCompare::Compare_Int32));
   9570      case uint16_t(Op::I32GeU):
   9571        CHECK(emitComparison(ValType::I32, JSOp::Ge, MCompare::Compare_UInt32));
   9572      case uint16_t(Op::I64Eqz):
   9573        CHECK(emitConversion<MNot>(ValType::I64, ValType::I32));
   9574      case uint16_t(Op::I64Eq):
   9575        CHECK(emitComparison(ValType::I64, JSOp::Eq, MCompare::Compare_Int64));
   9576      case uint16_t(Op::I64Ne):
   9577        CHECK(emitComparison(ValType::I64, JSOp::Ne, MCompare::Compare_Int64));
   9578      case uint16_t(Op::I64LtS):
   9579        CHECK(emitComparison(ValType::I64, JSOp::Lt, MCompare::Compare_Int64));
   9580      case uint16_t(Op::I64LtU):
   9581        CHECK(emitComparison(ValType::I64, JSOp::Lt, MCompare::Compare_UInt64));
   9582      case uint16_t(Op::I64GtS):
   9583        CHECK(emitComparison(ValType::I64, JSOp::Gt, MCompare::Compare_Int64));
   9584      case uint16_t(Op::I64GtU):
   9585        CHECK(emitComparison(ValType::I64, JSOp::Gt, MCompare::Compare_UInt64));
   9586      case uint16_t(Op::I64LeS):
   9587        CHECK(emitComparison(ValType::I64, JSOp::Le, MCompare::Compare_Int64));
   9588      case uint16_t(Op::I64LeU):
   9589        CHECK(emitComparison(ValType::I64, JSOp::Le, MCompare::Compare_UInt64));
   9590      case uint16_t(Op::I64GeS):
   9591        CHECK(emitComparison(ValType::I64, JSOp::Ge, MCompare::Compare_Int64));
   9592      case uint16_t(Op::I64GeU):
   9593        CHECK(emitComparison(ValType::I64, JSOp::Ge, MCompare::Compare_UInt64));
   9594      case uint16_t(Op::F32Eq):
   9595        CHECK(
   9596            emitComparison(ValType::F32, JSOp::Eq, MCompare::Compare_Float32));
   9597      case uint16_t(Op::F32Ne):
   9598        CHECK(
   9599            emitComparison(ValType::F32, JSOp::Ne, MCompare::Compare_Float32));
   9600      case uint16_t(Op::F32Lt):
   9601        CHECK(
   9602            emitComparison(ValType::F32, JSOp::Lt, MCompare::Compare_Float32));
   9603      case uint16_t(Op::F32Gt):
   9604        CHECK(
   9605            emitComparison(ValType::F32, JSOp::Gt, MCompare::Compare_Float32));
   9606      case uint16_t(Op::F32Le):
   9607        CHECK(
   9608            emitComparison(ValType::F32, JSOp::Le, MCompare::Compare_Float32));
   9609      case uint16_t(Op::F32Ge):
   9610        CHECK(
   9611            emitComparison(ValType::F32, JSOp::Ge, MCompare::Compare_Float32));
   9612      case uint16_t(Op::F64Eq):
   9613        CHECK(emitComparison(ValType::F64, JSOp::Eq, MCompare::Compare_Double));
   9614      case uint16_t(Op::F64Ne):
   9615        CHECK(emitComparison(ValType::F64, JSOp::Ne, MCompare::Compare_Double));
   9616      case uint16_t(Op::F64Lt):
   9617        CHECK(emitComparison(ValType::F64, JSOp::Lt, MCompare::Compare_Double));
   9618      case uint16_t(Op::F64Gt):
   9619        CHECK(emitComparison(ValType::F64, JSOp::Gt, MCompare::Compare_Double));
   9620      case uint16_t(Op::F64Le):
   9621        CHECK(emitComparison(ValType::F64, JSOp::Le, MCompare::Compare_Double));
   9622      case uint16_t(Op::F64Ge):
   9623        CHECK(emitComparison(ValType::F64, JSOp::Ge, MCompare::Compare_Double));
   9624 
   9625      // Numeric operators
   9626      case uint16_t(Op::I32Clz):
   9627        CHECK(emitUnaryWithType<MClz>(ValType::I32, MIRType::Int32));
   9628      case uint16_t(Op::I32Ctz):
   9629        CHECK(emitUnaryWithType<MCtz>(ValType::I32, MIRType::Int32));
   9630      case uint16_t(Op::I32Popcnt):
   9631        CHECK(emitUnaryWithType<MPopcnt>(ValType::I32, MIRType::Int32));
   9632      case uint16_t(Op::I32Add):
   9633        CHECK(emitAdd(ValType::I32, MIRType::Int32));
   9634      case uint16_t(Op::I32Sub):
   9635        CHECK(emitSub(ValType::I32, MIRType::Int32));
   9636      case uint16_t(Op::I32Mul):
   9637        CHECK(emitMul(ValType::I32, MIRType::Int32));
   9638      case uint16_t(Op::I32DivS):
   9639      case uint16_t(Op::I32DivU):
   9640        CHECK(emitDiv(ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32DivU));
   9641      case uint16_t(Op::I32RemS):
   9642      case uint16_t(Op::I32RemU):
   9643        CHECK(emitRem(ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32RemU));
   9644      case uint16_t(Op::I32And):
   9645        CHECK(emitBitwiseAndOrXor(ValType::I32, MIRType::Int32,
   9646                                  MWasmBinaryBitwise::SubOpcode::And));
   9647      case uint16_t(Op::I32Or):
   9648        CHECK(emitBitwiseAndOrXor(ValType::I32, MIRType::Int32,
   9649                                  MWasmBinaryBitwise::SubOpcode::Or));
   9650      case uint16_t(Op::I32Xor):
   9651        CHECK(emitBitwiseAndOrXor(ValType::I32, MIRType::Int32,
   9652                                  MWasmBinaryBitwise::SubOpcode::Xor));
   9653      case uint16_t(Op::I32Shl):
   9654        CHECK(emitShift<MLsh>(ValType::I32, MIRType::Int32));
   9655      case uint16_t(Op::I32ShrS):
   9656        CHECK(emitShift<MRsh>(ValType::I32, MIRType::Int32));
   9657      case uint16_t(Op::I32ShrU):
   9658        CHECK(emitUrsh(ValType::I32, MIRType::Int32));
   9659      case uint16_t(Op::I32Rotl):
   9660      case uint16_t(Op::I32Rotr):
   9661        CHECK(emitRotate(ValType::I32, Op(op.b0) == Op::I32Rotl));
   9662      case uint16_t(Op::I64Clz):
   9663        CHECK(emitUnaryWithType<MClz>(ValType::I64, MIRType::Int64));
   9664      case uint16_t(Op::I64Ctz):
   9665        CHECK(emitUnaryWithType<MCtz>(ValType::I64, MIRType::Int64));
   9666      case uint16_t(Op::I64Popcnt):
   9667        CHECK(emitUnaryWithType<MPopcnt>(ValType::I64, MIRType::Int64));
   9668      case uint16_t(Op::I64Add):
   9669        CHECK(emitAdd(ValType::I64, MIRType::Int64));
   9670      case uint16_t(Op::I64Sub):
   9671        CHECK(emitSub(ValType::I64, MIRType::Int64));
   9672      case uint16_t(Op::I64Mul):
   9673        CHECK(emitMul(ValType::I64, MIRType::Int64));
   9674      case uint16_t(Op::I64DivS):
   9675      case uint16_t(Op::I64DivU):
   9676        CHECK(emitDiv(ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64DivU));
   9677      case uint16_t(Op::I64RemS):
   9678      case uint16_t(Op::I64RemU):
   9679        CHECK(emitRem(ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64RemU));
   9680      case uint16_t(Op::I64And):
   9681        CHECK(emitBitwiseAndOrXor(ValType::I64, MIRType::Int64,
   9682                                  MWasmBinaryBitwise::SubOpcode::And));
   9683      case uint16_t(Op::I64Or):
   9684        CHECK(emitBitwiseAndOrXor(ValType::I64, MIRType::Int64,
   9685                                  MWasmBinaryBitwise::SubOpcode::Or));
   9686      case uint16_t(Op::I64Xor):
   9687        CHECK(emitBitwiseAndOrXor(ValType::I64, MIRType::Int64,
   9688                                  MWasmBinaryBitwise::SubOpcode::Xor));
   9689      case uint16_t(Op::I64Shl):
   9690        CHECK(emitShift<MLsh>(ValType::I64, MIRType::Int64));
   9691      case uint16_t(Op::I64ShrS):
   9692        CHECK(emitShift<MRsh>(ValType::I64, MIRType::Int64));
   9693      case uint16_t(Op::I64ShrU):
   9694        CHECK(emitUrsh(ValType::I64, MIRType::Int64));
   9695      case uint16_t(Op::I64Rotl):
   9696      case uint16_t(Op::I64Rotr):
   9697        CHECK(emitRotate(ValType::I64, Op(op.b0) == Op::I64Rotl));
   9698      case uint16_t(Op::F32Abs):
   9699        CHECK(emitUnaryWithType<MAbs>(ValType::F32, MIRType::Float32));
   9700      case uint16_t(Op::F32Neg):
   9701        CHECK(emitUnaryWithType<MWasmNeg>(ValType::F32, MIRType::Float32));
   9702      case uint16_t(Op::F32Ceil):
   9703        CHECK(emitUnaryMathBuiltinCall(SASigCeilF));
   9704      case uint16_t(Op::F32Floor):
   9705        CHECK(emitUnaryMathBuiltinCall(SASigFloorF));
   9706      case uint16_t(Op::F32Trunc):
   9707        CHECK(emitUnaryMathBuiltinCall(SASigTruncF));
   9708      case uint16_t(Op::F32Nearest):
   9709        CHECK(emitUnaryMathBuiltinCall(SASigNearbyIntF));
   9710      case uint16_t(Op::F32Sqrt):
   9711        CHECK(emitUnaryWithType<MSqrt>(ValType::F32, MIRType::Float32));
   9712      case uint16_t(Op::F32Add):
   9713        CHECK(emitAdd(ValType::F32, MIRType::Float32));
   9714      case uint16_t(Op::F32Sub):
   9715        CHECK(emitSub(ValType::F32, MIRType::Float32));
   9716      case uint16_t(Op::F32Mul):
   9717        CHECK(emitMul(ValType::F32, MIRType::Float32));
   9718      case uint16_t(Op::F32Div):
   9719        CHECK(emitDiv(ValType::F32, MIRType::Float32,
   9720                      /* isUnsigned = */ false));
   9721      case uint16_t(Op::F32Min):
   9722      case uint16_t(Op::F32Max):
   9723        CHECK(emitMinMax(ValType::F32, MIRType::Float32,
   9724                         Op(op.b0) == Op::F32Max));
   9725      case uint16_t(Op::F32CopySign):
   9726        CHECK(emitCopySign(ValType::F32));
   9727      case uint16_t(Op::F64Abs):
   9728        CHECK(emitUnaryWithType<MAbs>(ValType::F64, MIRType::Double));
   9729      case uint16_t(Op::F64Neg):
   9730        CHECK(emitUnaryWithType<MWasmNeg>(ValType::F64, MIRType::Double));
   9731      case uint16_t(Op::F64Ceil):
   9732        CHECK(emitUnaryMathBuiltinCall(SASigCeilD));
   9733      case uint16_t(Op::F64Floor):
   9734        CHECK(emitUnaryMathBuiltinCall(SASigFloorD));
   9735      case uint16_t(Op::F64Trunc):
   9736        CHECK(emitUnaryMathBuiltinCall(SASigTruncD));
   9737      case uint16_t(Op::F64Nearest):
   9738        CHECK(emitUnaryMathBuiltinCall(SASigNearbyIntD));
   9739      case uint16_t(Op::F64Sqrt):
   9740        CHECK(emitUnaryWithType<MSqrt>(ValType::F64, MIRType::Double));
   9741      case uint16_t(Op::F64Add):
   9742        CHECK(emitAdd(ValType::F64, MIRType::Double));
   9743      case uint16_t(Op::F64Sub):
   9744        CHECK(emitSub(ValType::F64, MIRType::Double));
   9745      case uint16_t(Op::F64Mul):
   9746        CHECK(emitMul(ValType::F64, MIRType::Double));
   9747      case uint16_t(Op::F64Div):
   9748        CHECK(emitDiv(ValType::F64, MIRType::Double,
   9749                      /* isUnsigned = */ false));
   9750      case uint16_t(Op::F64Min):
   9751      case uint16_t(Op::F64Max):
   9752        CHECK(
   9753            emitMinMax(ValType::F64, MIRType::Double, Op(op.b0) == Op::F64Max));
   9754      case uint16_t(Op::F64CopySign):
   9755        CHECK(emitCopySign(ValType::F64));
   9756 
   9757      // Conversions
   9758      case uint16_t(Op::I32WrapI64):
   9759        CHECK(emitConversion<MWrapInt64ToInt32>(ValType::I64, ValType::I32));
   9760      case uint16_t(Op::I32TruncF32S):
   9761      case uint16_t(Op::I32TruncF32U):
   9762        CHECK(emitTruncate(ValType::F32, ValType::I32,
   9763                           Op(op.b0) == Op::I32TruncF32U, false));
   9764      case uint16_t(Op::I32TruncF64S):
   9765      case uint16_t(Op::I32TruncF64U):
   9766        CHECK(emitTruncate(ValType::F64, ValType::I32,
   9767                           Op(op.b0) == Op::I32TruncF64U, false));
   9768      case uint16_t(Op::I64ExtendI32S):
   9769      case uint16_t(Op::I64ExtendI32U):
   9770        CHECK(emitExtendI32(Op(op.b0) == Op::I64ExtendI32U));
   9771      case uint16_t(Op::I64TruncF32S):
   9772      case uint16_t(Op::I64TruncF32U):
   9773        CHECK(emitTruncate(ValType::F32, ValType::I64,
   9774                           Op(op.b0) == Op::I64TruncF32U, false));
   9775      case uint16_t(Op::I64TruncF64S):
   9776      case uint16_t(Op::I64TruncF64U):
   9777        CHECK(emitTruncate(ValType::F64, ValType::I64,
   9778                           Op(op.b0) == Op::I64TruncF64U, false));
   9779      case uint16_t(Op::F32ConvertI32S):
   9780        CHECK(emitConversion<MToFloat32>(ValType::I32, ValType::F32));
   9781      case uint16_t(Op::F32ConvertI32U):
   9782        CHECK(
   9783            emitConversion<MWasmUnsignedToFloat32>(ValType::I32, ValType::F32));
   9784      case uint16_t(Op::F32ConvertI64S):
   9785      case uint16_t(Op::F32ConvertI64U):
   9786        CHECK(emitConvertI64ToFloatingPoint(ValType::F32, MIRType::Float32,
   9787                                            Op(op.b0) == Op::F32ConvertI64U));
   9788      case uint16_t(Op::F32DemoteF64):
   9789        CHECK(emitConversion<MToFloat32>(ValType::F64, ValType::F32));
   9790      case uint16_t(Op::F64ConvertI32S):
   9791        CHECK(emitConversion<MToDouble>(ValType::I32, ValType::F64));
   9792      case uint16_t(Op::F64ConvertI32U):
   9793        CHECK(
   9794            emitConversion<MWasmUnsignedToDouble>(ValType::I32, ValType::F64));
   9795      case uint16_t(Op::F64ConvertI64S):
   9796      case uint16_t(Op::F64ConvertI64U):
   9797        CHECK(emitConvertI64ToFloatingPoint(ValType::F64, MIRType::Double,
   9798                                            Op(op.b0) == Op::F64ConvertI64U));
   9799      case uint16_t(Op::F64PromoteF32):
   9800        CHECK(emitConversion<MToDouble>(ValType::F32, ValType::F64));
   9801 
   9802      // Reinterpretations
   9803      case uint16_t(Op::I32ReinterpretF32):
   9804        CHECK(emitReinterpret(ValType::I32, ValType::F32, MIRType::Int32));
   9805      case uint16_t(Op::I64ReinterpretF64):
   9806        CHECK(emitReinterpret(ValType::I64, ValType::F64, MIRType::Int64));
   9807      case uint16_t(Op::F32ReinterpretI32):
   9808        CHECK(emitReinterpret(ValType::F32, ValType::I32, MIRType::Float32));
   9809      case uint16_t(Op::F64ReinterpretI64):
   9810        CHECK(emitReinterpret(ValType::F64, ValType::I64, MIRType::Double));
   9811 
   9812      case uint16_t(Op::RefEq):
   9813        CHECK(emitComparison(RefType::eq(), JSOp::Eq,
   9814                             MCompare::Compare_WasmAnyRef));
   9815      case uint16_t(Op::RefFunc):
   9816        CHECK(emitRefFunc());
   9817      case uint16_t(Op::RefNull):
   9818        CHECK(emitRefNull());
   9819      case uint16_t(Op::RefIsNull):
   9820        CHECK(emitRefIsNull());
   9821 
   9822      // Sign extensions
   9823      case uint16_t(Op::I32Extend8S):
   9824        CHECK(emitSignExtend(1, 4));
   9825      case uint16_t(Op::I32Extend16S):
   9826        CHECK(emitSignExtend(2, 4));
   9827      case uint16_t(Op::I64Extend8S):
   9828        CHECK(emitSignExtend(1, 8));
   9829      case uint16_t(Op::I64Extend16S):
   9830        CHECK(emitSignExtend(2, 8));
   9831      case uint16_t(Op::I64Extend32S):
   9832        CHECK(emitSignExtend(4, 8));
   9833 
   9834      case uint16_t(Op::ReturnCall): {
   9835        CHECK(emitReturnCall());
   9836      }
   9837      case uint16_t(Op::ReturnCallIndirect): {
   9838        CHECK(emitReturnCallIndirect());
   9839      }
   9840 
   9841      case uint16_t(Op::RefAsNonNull):
   9842        CHECK(emitRefAsNonNull());
   9843      case uint16_t(Op::BrOnNull): {
   9844        CHECK(emitBrOnNull());
   9845      }
   9846      case uint16_t(Op::BrOnNonNull): {
   9847        CHECK(emitBrOnNonNull());
   9848      }
   9849      case uint16_t(Op::CallRef): {
   9850        CHECK(emitCallRef());
   9851      }
   9852 
   9853      case uint16_t(Op::ReturnCallRef): {
   9854        CHECK(emitReturnCallRef());
   9855      }
   9856 
   9857      // Gc operations
   9858      case uint16_t(Op::GcPrefix): {
   9859        switch (op.b1) {
   9860          case uint32_t(GcOp::StructNew):
   9861            CHECK(emitStructNew());
   9862          case uint32_t(GcOp::StructNewDefault):
   9863            CHECK(emitStructNewDefault());
   9864          case uint32_t(GcOp::StructSet):
   9865            CHECK(emitStructSet());
   9866          case uint32_t(GcOp::StructGet):
   9867            CHECK(emitStructGet(FieldWideningOp::None));
   9868          case uint32_t(GcOp::StructGetS):
   9869            CHECK(emitStructGet(FieldWideningOp::Signed));
   9870          case uint32_t(GcOp::StructGetU):
   9871            CHECK(emitStructGet(FieldWideningOp::Unsigned));
   9872          case uint32_t(GcOp::ArrayNew):
   9873            CHECK(emitArrayNew());
   9874          case uint32_t(GcOp::ArrayNewDefault):
   9875            CHECK(emitArrayNewDefault());
   9876          case uint32_t(GcOp::ArrayNewFixed):
   9877            CHECK(emitArrayNewFixed());
   9878          case uint32_t(GcOp::ArrayNewData):
   9879            CHECK(emitArrayNewData());
   9880          case uint32_t(GcOp::ArrayNewElem):
   9881            CHECK(emitArrayNewElem());
   9882          case uint32_t(GcOp::ArrayInitData):
   9883            CHECK(emitArrayInitData());
   9884          case uint32_t(GcOp::ArrayInitElem):
   9885            CHECK(emitArrayInitElem());
   9886          case uint32_t(GcOp::ArraySet):
   9887            CHECK(emitArraySet());
   9888          case uint32_t(GcOp::ArrayGet):
   9889            CHECK(emitArrayGet(FieldWideningOp::None));
   9890          case uint32_t(GcOp::ArrayGetS):
   9891            CHECK(emitArrayGet(FieldWideningOp::Signed));
   9892          case uint32_t(GcOp::ArrayGetU):
   9893            CHECK(emitArrayGet(FieldWideningOp::Unsigned));
   9894          case uint32_t(GcOp::ArrayLen):
   9895            CHECK(emitArrayLen());
   9896          case uint32_t(GcOp::ArrayCopy):
   9897            CHECK(emitArrayCopy());
   9898          case uint32_t(GcOp::ArrayFill):
   9899            CHECK(emitArrayFill());
   9900          case uint32_t(GcOp::RefI31):
   9901            CHECK(emitRefI31());
   9902          case uint32_t(GcOp::I31GetS):
   9903            CHECK(emitI31Get(FieldWideningOp::Signed));
   9904          case uint32_t(GcOp::I31GetU):
   9905            CHECK(emitI31Get(FieldWideningOp::Unsigned));
   9906          case uint32_t(GcOp::BrOnCast):
   9907            CHECK(emitBrOnCast(/*onSuccess=*/true));
   9908          case uint32_t(GcOp::BrOnCastFail):
   9909            CHECK(emitBrOnCast(/*onSuccess=*/false));
   9910          case uint32_t(GcOp::RefTest):
   9911            CHECK(emitRefTest(/*nullable=*/false));
   9912          case uint32_t(GcOp::RefTestNull):
   9913            CHECK(emitRefTest(/*nullable=*/true));
   9914          case uint32_t(GcOp::RefCast):
   9915            CHECK(emitRefCast(/*nullable=*/false));
   9916          case uint32_t(GcOp::RefCastNull):
   9917            CHECK(emitRefCast(/*nullable=*/true));
   9918          case uint16_t(GcOp::AnyConvertExtern):
   9919            CHECK(emitAnyConvertExtern());
   9920          case uint16_t(GcOp::ExternConvertAny):
   9921            CHECK(emitExternConvertAny());
   9922          default:
   9923            return iter().unrecognizedOpcode(&op);
   9924        }  // switch (op.b1)
   9925        break;
   9926      }
   9927 
   9928      // SIMD operations
   9929 #ifdef ENABLE_WASM_SIMD
   9930      case uint16_t(Op::SimdPrefix): {
   9931        if (!codeMeta().simdAvailable()) {
   9932          return iter().unrecognizedOpcode(&op);
   9933        }
   9934        switch (op.b1) {
   9935          case uint32_t(SimdOp::V128Const):
   9936            CHECK(emitConstSimd128());
   9937          case uint32_t(SimdOp::V128Load):
   9938            CHECK(emitLoad(ValType::V128, Scalar::Simd128));
   9939          case uint32_t(SimdOp::V128Store):
   9940            CHECK(emitStore(ValType::V128, Scalar::Simd128));
   9941          case uint32_t(SimdOp::V128And):
   9942          case uint32_t(SimdOp::V128Or):
   9943          case uint32_t(SimdOp::V128Xor):
   9944          case uint32_t(SimdOp::I8x16AvgrU):
   9945          case uint32_t(SimdOp::I16x8AvgrU):
   9946          case uint32_t(SimdOp::I8x16Add):
   9947          case uint32_t(SimdOp::I8x16AddSatS):
   9948          case uint32_t(SimdOp::I8x16AddSatU):
   9949          case uint32_t(SimdOp::I8x16MinS):
   9950          case uint32_t(SimdOp::I8x16MinU):
   9951          case uint32_t(SimdOp::I8x16MaxS):
   9952          case uint32_t(SimdOp::I8x16MaxU):
   9953          case uint32_t(SimdOp::I16x8Add):
   9954          case uint32_t(SimdOp::I16x8AddSatS):
   9955          case uint32_t(SimdOp::I16x8AddSatU):
   9956          case uint32_t(SimdOp::I16x8Mul):
   9957          case uint32_t(SimdOp::I16x8MinS):
   9958          case uint32_t(SimdOp::I16x8MinU):
   9959          case uint32_t(SimdOp::I16x8MaxS):
   9960          case uint32_t(SimdOp::I16x8MaxU):
   9961          case uint32_t(SimdOp::I32x4Add):
   9962          case uint32_t(SimdOp::I32x4Mul):
   9963          case uint32_t(SimdOp::I32x4MinS):
   9964          case uint32_t(SimdOp::I32x4MinU):
   9965          case uint32_t(SimdOp::I32x4MaxS):
   9966          case uint32_t(SimdOp::I32x4MaxU):
   9967          case uint32_t(SimdOp::I64x2Add):
   9968          case uint32_t(SimdOp::I64x2Mul):
   9969          case uint32_t(SimdOp::F32x4Add):
   9970          case uint32_t(SimdOp::F32x4Mul):
   9971          case uint32_t(SimdOp::F32x4Min):
   9972          case uint32_t(SimdOp::F32x4Max):
   9973          case uint32_t(SimdOp::F64x2Add):
   9974          case uint32_t(SimdOp::F64x2Mul):
   9975          case uint32_t(SimdOp::F64x2Min):
   9976          case uint32_t(SimdOp::F64x2Max):
   9977          case uint32_t(SimdOp::I8x16Eq):
   9978          case uint32_t(SimdOp::I8x16Ne):
   9979          case uint32_t(SimdOp::I16x8Eq):
   9980          case uint32_t(SimdOp::I16x8Ne):
   9981          case uint32_t(SimdOp::I32x4Eq):
   9982          case uint32_t(SimdOp::I32x4Ne):
   9983          case uint32_t(SimdOp::I64x2Eq):
   9984          case uint32_t(SimdOp::I64x2Ne):
   9985          case uint32_t(SimdOp::F32x4Eq):
   9986          case uint32_t(SimdOp::F32x4Ne):
   9987          case uint32_t(SimdOp::F64x2Eq):
   9988          case uint32_t(SimdOp::F64x2Ne):
   9989          case uint32_t(SimdOp::I32x4DotI16x8S):
   9990          case uint32_t(SimdOp::I16x8ExtmulLowI8x16S):
   9991          case uint32_t(SimdOp::I16x8ExtmulHighI8x16S):
   9992          case uint32_t(SimdOp::I16x8ExtmulLowI8x16U):
   9993          case uint32_t(SimdOp::I16x8ExtmulHighI8x16U):
   9994          case uint32_t(SimdOp::I32x4ExtmulLowI16x8S):
   9995          case uint32_t(SimdOp::I32x4ExtmulHighI16x8S):
   9996          case uint32_t(SimdOp::I32x4ExtmulLowI16x8U):
   9997          case uint32_t(SimdOp::I32x4ExtmulHighI16x8U):
   9998          case uint32_t(SimdOp::I64x2ExtmulLowI32x4S):
   9999          case uint32_t(SimdOp::I64x2ExtmulHighI32x4S):
  10000          case uint32_t(SimdOp::I64x2ExtmulLowI32x4U):
  10001          case uint32_t(SimdOp::I64x2ExtmulHighI32x4U):
  10002          case uint32_t(SimdOp::I16x8Q15MulrSatS):
  10003            CHECK(emitBinarySimd128(/* commutative= */ true, SimdOp(op.b1)));
  10004          case uint32_t(SimdOp::V128AndNot):
  10005          case uint32_t(SimdOp::I8x16Sub):
  10006          case uint32_t(SimdOp::I8x16SubSatS):
  10007          case uint32_t(SimdOp::I8x16SubSatU):
  10008          case uint32_t(SimdOp::I16x8Sub):
  10009          case uint32_t(SimdOp::I16x8SubSatS):
  10010          case uint32_t(SimdOp::I16x8SubSatU):
  10011          case uint32_t(SimdOp::I32x4Sub):
  10012          case uint32_t(SimdOp::I64x2Sub):
  10013          case uint32_t(SimdOp::F32x4Sub):
  10014          case uint32_t(SimdOp::F32x4Div):
  10015          case uint32_t(SimdOp::F64x2Sub):
  10016          case uint32_t(SimdOp::F64x2Div):
  10017          case uint32_t(SimdOp::I8x16NarrowI16x8S):
  10018          case uint32_t(SimdOp::I8x16NarrowI16x8U):
  10019          case uint32_t(SimdOp::I16x8NarrowI32x4S):
  10020          case uint32_t(SimdOp::I16x8NarrowI32x4U):
  10021          case uint32_t(SimdOp::I8x16LtS):
  10022          case uint32_t(SimdOp::I8x16LtU):
  10023          case uint32_t(SimdOp::I8x16GtS):
  10024          case uint32_t(SimdOp::I8x16GtU):
  10025          case uint32_t(SimdOp::I8x16LeS):
  10026          case uint32_t(SimdOp::I8x16LeU):
  10027          case uint32_t(SimdOp::I8x16GeS):
  10028          case uint32_t(SimdOp::I8x16GeU):
  10029          case uint32_t(SimdOp::I16x8LtS):
  10030          case uint32_t(SimdOp::I16x8LtU):
  10031          case uint32_t(SimdOp::I16x8GtS):
  10032          case uint32_t(SimdOp::I16x8GtU):
  10033          case uint32_t(SimdOp::I16x8LeS):
  10034          case uint32_t(SimdOp::I16x8LeU):
  10035          case uint32_t(SimdOp::I16x8GeS):
  10036          case uint32_t(SimdOp::I16x8GeU):
  10037          case uint32_t(SimdOp::I32x4LtS):
  10038          case uint32_t(SimdOp::I32x4LtU):
  10039          case uint32_t(SimdOp::I32x4GtS):
  10040          case uint32_t(SimdOp::I32x4GtU):
  10041          case uint32_t(SimdOp::I32x4LeS):
  10042          case uint32_t(SimdOp::I32x4LeU):
  10043          case uint32_t(SimdOp::I32x4GeS):
  10044          case uint32_t(SimdOp::I32x4GeU):
  10045          case uint32_t(SimdOp::I64x2LtS):
  10046          case uint32_t(SimdOp::I64x2GtS):
  10047          case uint32_t(SimdOp::I64x2LeS):
  10048          case uint32_t(SimdOp::I64x2GeS):
  10049          case uint32_t(SimdOp::F32x4Lt):
  10050          case uint32_t(SimdOp::F32x4Gt):
  10051          case uint32_t(SimdOp::F32x4Le):
  10052          case uint32_t(SimdOp::F32x4Ge):
  10053          case uint32_t(SimdOp::F64x2Lt):
  10054          case uint32_t(SimdOp::F64x2Gt):
  10055          case uint32_t(SimdOp::F64x2Le):
  10056          case uint32_t(SimdOp::F64x2Ge):
  10057          case uint32_t(SimdOp::I8x16Swizzle):
  10058          case uint32_t(SimdOp::F32x4PMax):
  10059          case uint32_t(SimdOp::F32x4PMin):
  10060          case uint32_t(SimdOp::F64x2PMax):
  10061          case uint32_t(SimdOp::F64x2PMin):
  10062            CHECK(emitBinarySimd128(/* commutative= */ false, SimdOp(op.b1)));
  10063          case uint32_t(SimdOp::I8x16Splat):
  10064          case uint32_t(SimdOp::I16x8Splat):
  10065          case uint32_t(SimdOp::I32x4Splat):
  10066            CHECK(emitSplatSimd128(ValType::I32, SimdOp(op.b1)));
  10067          case uint32_t(SimdOp::I64x2Splat):
  10068            CHECK(emitSplatSimd128(ValType::I64, SimdOp(op.b1)));
  10069          case uint32_t(SimdOp::F32x4Splat):
  10070            CHECK(emitSplatSimd128(ValType::F32, SimdOp(op.b1)));
  10071          case uint32_t(SimdOp::F64x2Splat):
  10072            CHECK(emitSplatSimd128(ValType::F64, SimdOp(op.b1)));
  10073          case uint32_t(SimdOp::I8x16Neg):
  10074          case uint32_t(SimdOp::I16x8Neg):
  10075          case uint32_t(SimdOp::I16x8ExtendLowI8x16S):
  10076          case uint32_t(SimdOp::I16x8ExtendHighI8x16S):
  10077          case uint32_t(SimdOp::I16x8ExtendLowI8x16U):
  10078          case uint32_t(SimdOp::I16x8ExtendHighI8x16U):
  10079          case uint32_t(SimdOp::I32x4Neg):
  10080          case uint32_t(SimdOp::I32x4ExtendLowI16x8S):
  10081          case uint32_t(SimdOp::I32x4ExtendHighI16x8S):
  10082          case uint32_t(SimdOp::I32x4ExtendLowI16x8U):
  10083          case uint32_t(SimdOp::I32x4ExtendHighI16x8U):
  10084          case uint32_t(SimdOp::I32x4TruncSatF32x4S):
  10085          case uint32_t(SimdOp::I32x4TruncSatF32x4U):
  10086          case uint32_t(SimdOp::I64x2Neg):
  10087          case uint32_t(SimdOp::I64x2ExtendLowI32x4S):
  10088          case uint32_t(SimdOp::I64x2ExtendHighI32x4S):
  10089          case uint32_t(SimdOp::I64x2ExtendLowI32x4U):
  10090          case uint32_t(SimdOp::I64x2ExtendHighI32x4U):
  10091          case uint32_t(SimdOp::F32x4Abs):
  10092          case uint32_t(SimdOp::F32x4Neg):
  10093          case uint32_t(SimdOp::F32x4Sqrt):
  10094          case uint32_t(SimdOp::F32x4ConvertI32x4S):
  10095          case uint32_t(SimdOp::F32x4ConvertI32x4U):
  10096          case uint32_t(SimdOp::F64x2Abs):
  10097          case uint32_t(SimdOp::F64x2Neg):
  10098          case uint32_t(SimdOp::F64x2Sqrt):
  10099          case uint32_t(SimdOp::V128Not):
  10100          case uint32_t(SimdOp::I8x16Popcnt):
  10101          case uint32_t(SimdOp::I8x16Abs):
  10102          case uint32_t(SimdOp::I16x8Abs):
  10103          case uint32_t(SimdOp::I32x4Abs):
  10104          case uint32_t(SimdOp::I64x2Abs):
  10105          case uint32_t(SimdOp::F32x4Ceil):
  10106          case uint32_t(SimdOp::F32x4Floor):
  10107          case uint32_t(SimdOp::F32x4Trunc):
  10108          case uint32_t(SimdOp::F32x4Nearest):
  10109          case uint32_t(SimdOp::F64x2Ceil):
  10110          case uint32_t(SimdOp::F64x2Floor):
  10111          case uint32_t(SimdOp::F64x2Trunc):
  10112          case uint32_t(SimdOp::F64x2Nearest):
  10113          case uint32_t(SimdOp::F32x4DemoteF64x2Zero):
  10114          case uint32_t(SimdOp::F64x2PromoteLowF32x4):
  10115          case uint32_t(SimdOp::F64x2ConvertLowI32x4S):
  10116          case uint32_t(SimdOp::F64x2ConvertLowI32x4U):
  10117          case uint32_t(SimdOp::I32x4TruncSatF64x2SZero):
  10118          case uint32_t(SimdOp::I32x4TruncSatF64x2UZero):
  10119          case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16S):
  10120          case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16U):
  10121          case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8S):
  10122          case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8U):
  10123            CHECK(emitUnarySimd128(SimdOp(op.b1)));
  10124          case uint32_t(SimdOp::V128AnyTrue):
  10125          case uint32_t(SimdOp::I8x16AllTrue):
  10126          case uint32_t(SimdOp::I16x8AllTrue):
  10127          case uint32_t(SimdOp::I32x4AllTrue):
  10128          case uint32_t(SimdOp::I64x2AllTrue):
  10129          case uint32_t(SimdOp::I8x16Bitmask):
  10130          case uint32_t(SimdOp::I16x8Bitmask):
  10131          case uint32_t(SimdOp::I32x4Bitmask):
  10132          case uint32_t(SimdOp::I64x2Bitmask):
  10133            CHECK(emitReduceSimd128(SimdOp(op.b1)));
  10134          case uint32_t(SimdOp::I8x16Shl):
  10135          case uint32_t(SimdOp::I8x16ShrS):
  10136          case uint32_t(SimdOp::I8x16ShrU):
  10137          case uint32_t(SimdOp::I16x8Shl):
  10138          case uint32_t(SimdOp::I16x8ShrS):
  10139          case uint32_t(SimdOp::I16x8ShrU):
  10140          case uint32_t(SimdOp::I32x4Shl):
  10141          case uint32_t(SimdOp::I32x4ShrS):
  10142          case uint32_t(SimdOp::I32x4ShrU):
  10143          case uint32_t(SimdOp::I64x2Shl):
  10144          case uint32_t(SimdOp::I64x2ShrS):
  10145          case uint32_t(SimdOp::I64x2ShrU):
  10146            CHECK(emitShiftSimd128(SimdOp(op.b1)));
  10147          case uint32_t(SimdOp::I8x16ExtractLaneS):
  10148          case uint32_t(SimdOp::I8x16ExtractLaneU):
  10149            CHECK(emitExtractLaneSimd128(ValType::I32, 16, SimdOp(op.b1)));
  10150          case uint32_t(SimdOp::I16x8ExtractLaneS):
  10151          case uint32_t(SimdOp::I16x8ExtractLaneU):
  10152            CHECK(emitExtractLaneSimd128(ValType::I32, 8, SimdOp(op.b1)));
  10153          case uint32_t(SimdOp::I32x4ExtractLane):
  10154            CHECK(emitExtractLaneSimd128(ValType::I32, 4, SimdOp(op.b1)));
  10155          case uint32_t(SimdOp::I64x2ExtractLane):
  10156            CHECK(emitExtractLaneSimd128(ValType::I64, 2, SimdOp(op.b1)));
  10157          case uint32_t(SimdOp::F32x4ExtractLane):
  10158            CHECK(emitExtractLaneSimd128(ValType::F32, 4, SimdOp(op.b1)));
  10159          case uint32_t(SimdOp::F64x2ExtractLane):
  10160            CHECK(emitExtractLaneSimd128(ValType::F64, 2, SimdOp(op.b1)));
  10161          case uint32_t(SimdOp::I8x16ReplaceLane):
  10162            CHECK(emitReplaceLaneSimd128(ValType::I32, 16, SimdOp(op.b1)));
  10163          case uint32_t(SimdOp::I16x8ReplaceLane):
  10164            CHECK(emitReplaceLaneSimd128(ValType::I32, 8, SimdOp(op.b1)));
  10165          case uint32_t(SimdOp::I32x4ReplaceLane):
  10166            CHECK(emitReplaceLaneSimd128(ValType::I32, 4, SimdOp(op.b1)));
  10167          case uint32_t(SimdOp::I64x2ReplaceLane):
  10168            CHECK(emitReplaceLaneSimd128(ValType::I64, 2, SimdOp(op.b1)));
  10169          case uint32_t(SimdOp::F32x4ReplaceLane):
  10170            CHECK(emitReplaceLaneSimd128(ValType::F32, 4, SimdOp(op.b1)));
  10171          case uint32_t(SimdOp::F64x2ReplaceLane):
  10172            CHECK(emitReplaceLaneSimd128(ValType::F64, 2, SimdOp(op.b1)));
  10173          case uint32_t(SimdOp::V128Bitselect):
  10174            CHECK(emitTernarySimd128(SimdOp(op.b1)));
  10175          case uint32_t(SimdOp::I8x16Shuffle):
  10176            CHECK(emitShuffleSimd128());
  10177          case uint32_t(SimdOp::V128Load8Splat):
  10178            CHECK(emitLoadSplatSimd128(Scalar::Uint8, SimdOp::I8x16Splat));
  10179          case uint32_t(SimdOp::V128Load16Splat):
  10180            CHECK(emitLoadSplatSimd128(Scalar::Uint16, SimdOp::I16x8Splat));
  10181          case uint32_t(SimdOp::V128Load32Splat):
  10182            CHECK(emitLoadSplatSimd128(Scalar::Float32, SimdOp::I32x4Splat));
  10183          case uint32_t(SimdOp::V128Load64Splat):
  10184            CHECK(emitLoadSplatSimd128(Scalar::Float64, SimdOp::I64x2Splat));
  10185          case uint32_t(SimdOp::V128Load8x8S):
  10186          case uint32_t(SimdOp::V128Load8x8U):
  10187          case uint32_t(SimdOp::V128Load16x4S):
  10188          case uint32_t(SimdOp::V128Load16x4U):
  10189          case uint32_t(SimdOp::V128Load32x2S):
  10190          case uint32_t(SimdOp::V128Load32x2U):
  10191            CHECK(emitLoadExtendSimd128(SimdOp(op.b1)));
  10192          case uint32_t(SimdOp::V128Load32Zero):
  10193            CHECK(emitLoadZeroSimd128(Scalar::Float32, 4));
  10194          case uint32_t(SimdOp::V128Load64Zero):
  10195            CHECK(emitLoadZeroSimd128(Scalar::Float64, 8));
  10196          case uint32_t(SimdOp::V128Load8Lane):
  10197            CHECK(emitLoadLaneSimd128(1));
  10198          case uint32_t(SimdOp::V128Load16Lane):
  10199            CHECK(emitLoadLaneSimd128(2));
  10200          case uint32_t(SimdOp::V128Load32Lane):
  10201            CHECK(emitLoadLaneSimd128(4));
  10202          case uint32_t(SimdOp::V128Load64Lane):
  10203            CHECK(emitLoadLaneSimd128(8));
  10204          case uint32_t(SimdOp::V128Store8Lane):
  10205            CHECK(emitStoreLaneSimd128(1));
  10206          case uint32_t(SimdOp::V128Store16Lane):
  10207            CHECK(emitStoreLaneSimd128(2));
  10208          case uint32_t(SimdOp::V128Store32Lane):
  10209            CHECK(emitStoreLaneSimd128(4));
  10210          case uint32_t(SimdOp::V128Store64Lane):
  10211            CHECK(emitStoreLaneSimd128(8));
  10212 #  ifdef ENABLE_WASM_RELAXED_SIMD
  10213          case uint32_t(SimdOp::F32x4RelaxedMadd):
  10214          case uint32_t(SimdOp::F32x4RelaxedNmadd):
  10215          case uint32_t(SimdOp::F64x2RelaxedMadd):
  10216          case uint32_t(SimdOp::F64x2RelaxedNmadd):
  10217          case uint32_t(SimdOp::I8x16RelaxedLaneSelect):
  10218          case uint32_t(SimdOp::I16x8RelaxedLaneSelect):
  10219          case uint32_t(SimdOp::I32x4RelaxedLaneSelect):
  10220          case uint32_t(SimdOp::I64x2RelaxedLaneSelect):
  10221          case uint32_t(SimdOp::I32x4RelaxedDotI8x16I7x16AddS): {
  10222            if (!codeMeta().v128RelaxedEnabled()) {
  10223              return iter().unrecognizedOpcode(&op);
  10224            }
  10225            CHECK(emitTernarySimd128(SimdOp(op.b1)));
  10226          }
  10227          case uint32_t(SimdOp::F32x4RelaxedMin):
  10228          case uint32_t(SimdOp::F32x4RelaxedMax):
  10229          case uint32_t(SimdOp::F64x2RelaxedMin):
  10230          case uint32_t(SimdOp::F64x2RelaxedMax): {
  10231            if (!codeMeta().v128RelaxedEnabled()) {
  10232              return iter().unrecognizedOpcode(&op);
  10233            }
  10234            // These aren't really commutative, because at least on Intel, the
  10235            // behaviour in the presence of NaNs depends on the order of the
  10236            // operands.  And we need to have that ordering fixed, so that we
  10237            // can produce the same results as baseline.  See bug 1946618.
  10238            CHECK(emitBinarySimd128(/* commutative= */ false, SimdOp(op.b1)));
  10239          }
  10240          case uint32_t(SimdOp::I16x8RelaxedQ15MulrS): {
  10241            if (!codeMeta().v128RelaxedEnabled()) {
  10242              return iter().unrecognizedOpcode(&op);
  10243            }
  10244            CHECK(emitBinarySimd128(/* commutative= */ true, SimdOp(op.b1)));
  10245          }
  10246          case uint32_t(SimdOp::I32x4RelaxedTruncF32x4S):
  10247          case uint32_t(SimdOp::I32x4RelaxedTruncF32x4U):
  10248          case uint32_t(SimdOp::I32x4RelaxedTruncF64x2SZero):
  10249          case uint32_t(SimdOp::I32x4RelaxedTruncF64x2UZero): {
  10250            if (!codeMeta().v128RelaxedEnabled()) {
  10251              return iter().unrecognizedOpcode(&op);
  10252            }
  10253            CHECK(emitUnarySimd128(SimdOp(op.b1)));
  10254          }
  10255          case uint32_t(SimdOp::I8x16RelaxedSwizzle):
  10256          case uint32_t(SimdOp::I16x8RelaxedDotI8x16I7x16S): {
  10257            if (!codeMeta().v128RelaxedEnabled()) {
  10258              return iter().unrecognizedOpcode(&op);
  10259            }
  10260            CHECK(emitBinarySimd128(/* commutative= */ false, SimdOp(op.b1)));
  10261          }
  10262 #  endif
  10263 
  10264          default:
  10265            return iter().unrecognizedOpcode(&op);
  10266        }  // switch (op.b1)
  10267        break;
  10268      }
  10269 #endif
  10270 
  10271      // Miscellaneous operations
  10272      case uint16_t(Op::MiscPrefix): {
  10273        switch (op.b1) {
  10274          case uint32_t(MiscOp::I32TruncSatF32S):
  10275          case uint32_t(MiscOp::I32TruncSatF32U):
  10276            CHECK(emitTruncate(ValType::F32, ValType::I32,
  10277                               MiscOp(op.b1) == MiscOp::I32TruncSatF32U, true));
  10278          case uint32_t(MiscOp::I32TruncSatF64S):
  10279          case uint32_t(MiscOp::I32TruncSatF64U):
  10280            CHECK(emitTruncate(ValType::F64, ValType::I32,
  10281                               MiscOp(op.b1) == MiscOp::I32TruncSatF64U, true));
  10282          case uint32_t(MiscOp::I64TruncSatF32S):
  10283          case uint32_t(MiscOp::I64TruncSatF32U):
  10284            CHECK(emitTruncate(ValType::F32, ValType::I64,
  10285                               MiscOp(op.b1) == MiscOp::I64TruncSatF32U, true));
  10286          case uint32_t(MiscOp::I64TruncSatF64S):
  10287          case uint32_t(MiscOp::I64TruncSatF64U):
  10288            CHECK(emitTruncate(ValType::F64, ValType::I64,
  10289                               MiscOp(op.b1) == MiscOp::I64TruncSatF64U, true));
  10290          case uint32_t(MiscOp::MemoryCopy):
  10291            CHECK(emitMemCopy());
  10292          case uint32_t(MiscOp::DataDrop):
  10293            CHECK(emitDataOrElemDrop(/*isData=*/true));
  10294          case uint32_t(MiscOp::MemoryFill):
  10295            CHECK(emitMemFill());
  10296          case uint32_t(MiscOp::MemoryInit):
  10297            CHECK(emitMemInit());
  10298          case uint32_t(MiscOp::TableCopy):
  10299            CHECK(emitTableCopy());
  10300          case uint32_t(MiscOp::ElemDrop):
  10301            CHECK(emitDataOrElemDrop(/*isData=*/false));
  10302          case uint32_t(MiscOp::TableInit):
  10303            CHECK(emitTableInit());
  10304          case uint32_t(MiscOp::TableFill):
  10305            CHECK(emitTableFill());
  10306 #if ENABLE_WASM_MEMORY_CONTROL
  10307          case uint32_t(MiscOp::MemoryDiscard): {
  10308            if (!codeMeta().memoryControlEnabled()) {
  10309              return iter().unrecognizedOpcode(&op);
  10310            }
  10311            CHECK(emitMemDiscard());
  10312          }
  10313 #endif
  10314          case uint32_t(MiscOp::TableGrow):
  10315            CHECK(emitTableGrow());
  10316          case uint32_t(MiscOp::TableSize):
  10317            CHECK(emitTableSize());
  10318          default:
  10319            return iter().unrecognizedOpcode(&op);
  10320        }
  10321        break;
  10322      }
  10323 
  10324      // Thread operations
  10325      case uint16_t(Op::ThreadPrefix): {
  10326        // Though thread ops can be used on nonshared memories, we make them
  10327        // unavailable if shared memory has been disabled in the prefs, for
  10328        // maximum predictability and safety and consistency with JS.
  10329        if (codeMeta().sharedMemoryEnabled() == Shareable::False) {
  10330          return iter().unrecognizedOpcode(&op);
  10331        }
  10332        switch (op.b1) {
  10333          case uint32_t(ThreadOp::Notify):
  10334            CHECK(emitNotify());
  10335 
  10336          case uint32_t(ThreadOp::I32Wait):
  10337            CHECK(emitWait(ValType::I32, 4));
  10338          case uint32_t(ThreadOp::I64Wait):
  10339            CHECK(emitWait(ValType::I64, 8));
  10340          case uint32_t(ThreadOp::Fence):
  10341            CHECK(emitFence());
  10342 
  10343          case uint32_t(ThreadOp::I32AtomicLoad):
  10344            CHECK(emitAtomicLoad(ValType::I32, Scalar::Int32));
  10345          case uint32_t(ThreadOp::I64AtomicLoad):
  10346            CHECK(emitAtomicLoad(ValType::I64, Scalar::Int64));
  10347          case uint32_t(ThreadOp::I32AtomicLoad8U):
  10348            CHECK(emitAtomicLoad(ValType::I32, Scalar::Uint8));
  10349          case uint32_t(ThreadOp::I32AtomicLoad16U):
  10350            CHECK(emitAtomicLoad(ValType::I32, Scalar::Uint16));
  10351          case uint32_t(ThreadOp::I64AtomicLoad8U):
  10352            CHECK(emitAtomicLoad(ValType::I64, Scalar::Uint8));
  10353          case uint32_t(ThreadOp::I64AtomicLoad16U):
  10354            CHECK(emitAtomicLoad(ValType::I64, Scalar::Uint16));
  10355          case uint32_t(ThreadOp::I64AtomicLoad32U):
  10356            CHECK(emitAtomicLoad(ValType::I64, Scalar::Uint32));
  10357 
  10358          case uint32_t(ThreadOp::I32AtomicStore):
  10359            CHECK(emitAtomicStore(ValType::I32, Scalar::Int32));
  10360          case uint32_t(ThreadOp::I64AtomicStore):
  10361            CHECK(emitAtomicStore(ValType::I64, Scalar::Int64));
  10362          case uint32_t(ThreadOp::I32AtomicStore8U):
  10363            CHECK(emitAtomicStore(ValType::I32, Scalar::Uint8));
  10364          case uint32_t(ThreadOp::I32AtomicStore16U):
  10365            CHECK(emitAtomicStore(ValType::I32, Scalar::Uint16));
  10366          case uint32_t(ThreadOp::I64AtomicStore8U):
  10367            CHECK(emitAtomicStore(ValType::I64, Scalar::Uint8));
  10368          case uint32_t(ThreadOp::I64AtomicStore16U):
  10369            CHECK(emitAtomicStore(ValType::I64, Scalar::Uint16));
  10370          case uint32_t(ThreadOp::I64AtomicStore32U):
  10371            CHECK(emitAtomicStore(ValType::I64, Scalar::Uint32));
  10372 
  10373          case uint32_t(ThreadOp::I32AtomicAdd):
  10374            CHECK(emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::Add));
  10375          case uint32_t(ThreadOp::I64AtomicAdd):
  10376            CHECK(emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::Add));
  10377          case uint32_t(ThreadOp::I32AtomicAdd8U):
  10378            CHECK(emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::Add));
  10379          case uint32_t(ThreadOp::I32AtomicAdd16U):
  10380            CHECK(emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::Add));
  10381          case uint32_t(ThreadOp::I64AtomicAdd8U):
  10382            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::Add));
  10383          case uint32_t(ThreadOp::I64AtomicAdd16U):
  10384            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::Add));
  10385          case uint32_t(ThreadOp::I64AtomicAdd32U):
  10386            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::Add));
  10387 
  10388          case uint32_t(ThreadOp::I32AtomicSub):
  10389            CHECK(emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::Sub));
  10390          case uint32_t(ThreadOp::I64AtomicSub):
  10391            CHECK(emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::Sub));
  10392          case uint32_t(ThreadOp::I32AtomicSub8U):
  10393            CHECK(emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::Sub));
  10394          case uint32_t(ThreadOp::I32AtomicSub16U):
  10395            CHECK(emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::Sub));
  10396          case uint32_t(ThreadOp::I64AtomicSub8U):
  10397            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::Sub));
  10398          case uint32_t(ThreadOp::I64AtomicSub16U):
  10399            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::Sub));
  10400          case uint32_t(ThreadOp::I64AtomicSub32U):
  10401            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::Sub));
  10402 
  10403          case uint32_t(ThreadOp::I32AtomicAnd):
  10404            CHECK(emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::And));
  10405          case uint32_t(ThreadOp::I64AtomicAnd):
  10406            CHECK(emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::And));
  10407          case uint32_t(ThreadOp::I32AtomicAnd8U):
  10408            CHECK(emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::And));
  10409          case uint32_t(ThreadOp::I32AtomicAnd16U):
  10410            CHECK(emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::And));
  10411          case uint32_t(ThreadOp::I64AtomicAnd8U):
  10412            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::And));
  10413          case uint32_t(ThreadOp::I64AtomicAnd16U):
  10414            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::And));
  10415          case uint32_t(ThreadOp::I64AtomicAnd32U):
  10416            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::And));
  10417 
  10418          case uint32_t(ThreadOp::I32AtomicOr):
  10419            CHECK(emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::Or));
  10420          case uint32_t(ThreadOp::I64AtomicOr):
  10421            CHECK(emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::Or));
  10422          case uint32_t(ThreadOp::I32AtomicOr8U):
  10423            CHECK(emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::Or));
  10424          case uint32_t(ThreadOp::I32AtomicOr16U):
  10425            CHECK(emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::Or));
  10426          case uint32_t(ThreadOp::I64AtomicOr8U):
  10427            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::Or));
  10428          case uint32_t(ThreadOp::I64AtomicOr16U):
  10429            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::Or));
  10430          case uint32_t(ThreadOp::I64AtomicOr32U):
  10431            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::Or));
  10432 
  10433          case uint32_t(ThreadOp::I32AtomicXor):
  10434            CHECK(emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::Xor));
  10435          case uint32_t(ThreadOp::I64AtomicXor):
  10436            CHECK(emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::Xor));
  10437          case uint32_t(ThreadOp::I32AtomicXor8U):
  10438            CHECK(emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::Xor));
  10439          case uint32_t(ThreadOp::I32AtomicXor16U):
  10440            CHECK(emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::Xor));
  10441          case uint32_t(ThreadOp::I64AtomicXor8U):
  10442            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::Xor));
  10443          case uint32_t(ThreadOp::I64AtomicXor16U):
  10444            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::Xor));
  10445          case uint32_t(ThreadOp::I64AtomicXor32U):
  10446            CHECK(emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::Xor));
  10447 
  10448          case uint32_t(ThreadOp::I32AtomicXchg):
  10449            CHECK(emitAtomicXchg(ValType::I32, Scalar::Int32));
  10450          case uint32_t(ThreadOp::I64AtomicXchg):
  10451            CHECK(emitAtomicXchg(ValType::I64, Scalar::Int64));
  10452          case uint32_t(ThreadOp::I32AtomicXchg8U):
  10453            CHECK(emitAtomicXchg(ValType::I32, Scalar::Uint8));
  10454          case uint32_t(ThreadOp::I32AtomicXchg16U):
  10455            CHECK(emitAtomicXchg(ValType::I32, Scalar::Uint16));
  10456          case uint32_t(ThreadOp::I64AtomicXchg8U):
  10457            CHECK(emitAtomicXchg(ValType::I64, Scalar::Uint8));
  10458          case uint32_t(ThreadOp::I64AtomicXchg16U):
  10459            CHECK(emitAtomicXchg(ValType::I64, Scalar::Uint16));
  10460          case uint32_t(ThreadOp::I64AtomicXchg32U):
  10461            CHECK(emitAtomicXchg(ValType::I64, Scalar::Uint32));
  10462 
  10463          case uint32_t(ThreadOp::I32AtomicCmpXchg):
  10464            CHECK(emitAtomicCmpXchg(ValType::I32, Scalar::Int32));
  10465          case uint32_t(ThreadOp::I64AtomicCmpXchg):
  10466            CHECK(emitAtomicCmpXchg(ValType::I64, Scalar::Int64));
  10467          case uint32_t(ThreadOp::I32AtomicCmpXchg8U):
  10468            CHECK(emitAtomicCmpXchg(ValType::I32, Scalar::Uint8));
  10469          case uint32_t(ThreadOp::I32AtomicCmpXchg16U):
  10470            CHECK(emitAtomicCmpXchg(ValType::I32, Scalar::Uint16));
  10471          case uint32_t(ThreadOp::I64AtomicCmpXchg8U):
  10472            CHECK(emitAtomicCmpXchg(ValType::I64, Scalar::Uint8));
  10473          case uint32_t(ThreadOp::I64AtomicCmpXchg16U):
  10474            CHECK(emitAtomicCmpXchg(ValType::I64, Scalar::Uint16));
  10475          case uint32_t(ThreadOp::I64AtomicCmpXchg32U):
  10476            CHECK(emitAtomicCmpXchg(ValType::I64, Scalar::Uint32));
  10477 
  10478          default:
  10479            return iter().unrecognizedOpcode(&op);
  10480        }
  10481        break;
  10482      }
  10483 
  10484      // asm.js-specific operators
  10485      case uint16_t(Op::MozPrefix): {
  10486        if (op.b1 == uint32_t(MozOp::CallBuiltinModuleFunc)) {
  10487          if (!codeMeta().isBuiltinModule()) {
  10488            return iter().unrecognizedOpcode(&op);
  10489          }
  10490          CHECK(emitCallBuiltinModuleFunc());
  10491        }
  10492 #ifdef ENABLE_WASM_JSPI
  10493        if (op.b1 == uint32_t(MozOp::StackSwitch)) {
  10494          if (!codeMeta().isBuiltinModule() ||
  10495              !codeMeta().jsPromiseIntegrationEnabled()) {
  10496            return iter().unrecognizedOpcode(&op);
  10497          }
  10498          CHECK(emitStackSwitch());
  10499        }
  10500 #endif
  10501 
  10502        if (!codeMeta().isAsmJS()) {
  10503          return iter().unrecognizedOpcode(&op);
  10504        }
  10505        switch (op.b1) {
  10506          case uint32_t(MozOp::TeeGlobal):
  10507            CHECK(emitTeeGlobal());
  10508          case uint32_t(MozOp::I32Min):
  10509          case uint32_t(MozOp::I32Max):
  10510            CHECK(emitMinMax(ValType::I32, MIRType::Int32,
  10511                             MozOp(op.b1) == MozOp::I32Max));
  10512          case uint32_t(MozOp::I32Neg):
  10513            CHECK(emitUnaryWithType<MWasmNeg>(ValType::I32, MIRType::Int32));
  10514          case uint32_t(MozOp::I32BitNot):
  10515            CHECK(emitBitNot(ValType::I32, MIRType::Int32));
  10516          case uint32_t(MozOp::I32Abs):
  10517            CHECK(emitUnaryWithType<MAbs>(ValType::I32, MIRType::Int32));
  10518          case uint32_t(MozOp::F32TeeStoreF64):
  10519            CHECK(emitTeeStoreWithCoercion(ValType::F32, Scalar::Float64));
  10520          case uint32_t(MozOp::F64TeeStoreF32):
  10521            CHECK(emitTeeStoreWithCoercion(ValType::F64, Scalar::Float32));
  10522          case uint32_t(MozOp::I32TeeStore8):
  10523            CHECK(emitTeeStore(ValType::I32, Scalar::Int8));
  10524          case uint32_t(MozOp::I32TeeStore16):
  10525            CHECK(emitTeeStore(ValType::I32, Scalar::Int16));
  10526          case uint32_t(MozOp::I64TeeStore8):
  10527            CHECK(emitTeeStore(ValType::I64, Scalar::Int8));
  10528          case uint32_t(MozOp::I64TeeStore16):
  10529            CHECK(emitTeeStore(ValType::I64, Scalar::Int16));
  10530          case uint32_t(MozOp::I64TeeStore32):
  10531            CHECK(emitTeeStore(ValType::I64, Scalar::Int32));
  10532          case uint32_t(MozOp::I32TeeStore):
  10533            CHECK(emitTeeStore(ValType::I32, Scalar::Int32));
  10534          case uint32_t(MozOp::I64TeeStore):
  10535            CHECK(emitTeeStore(ValType::I64, Scalar::Int64));
  10536          case uint32_t(MozOp::F32TeeStore):
  10537            CHECK(emitTeeStore(ValType::F32, Scalar::Float32));
  10538          case uint32_t(MozOp::F64TeeStore):
  10539            CHECK(emitTeeStore(ValType::F64, Scalar::Float64));
  10540          case uint32_t(MozOp::F64Mod):
  10541            CHECK(emitRem(ValType::F64, MIRType::Double,
  10542                          /* isUnsigned = */ false));
  10543          case uint32_t(MozOp::F64SinNative):
  10544            CHECK(emitUnaryMathBuiltinCall(SASigSinNativeD));
  10545          case uint32_t(MozOp::F64SinFdlibm):
  10546            CHECK(emitUnaryMathBuiltinCall(SASigSinFdlibmD));
  10547          case uint32_t(MozOp::F64CosNative):
  10548            CHECK(emitUnaryMathBuiltinCall(SASigCosNativeD));
  10549          case uint32_t(MozOp::F64CosFdlibm):
  10550            CHECK(emitUnaryMathBuiltinCall(SASigCosFdlibmD));
  10551          case uint32_t(MozOp::F64TanNative):
  10552            CHECK(emitUnaryMathBuiltinCall(SASigTanNativeD));
  10553          case uint32_t(MozOp::F64TanFdlibm):
  10554            CHECK(emitUnaryMathBuiltinCall(SASigTanFdlibmD));
  10555          case uint32_t(MozOp::F64Asin):
  10556            CHECK(emitUnaryMathBuiltinCall(SASigASinD));
  10557          case uint32_t(MozOp::F64Acos):
  10558            CHECK(emitUnaryMathBuiltinCall(SASigACosD));
  10559          case uint32_t(MozOp::F64Atan):
  10560            CHECK(emitUnaryMathBuiltinCall(SASigATanD));
  10561          case uint32_t(MozOp::F64Exp):
  10562            CHECK(emitUnaryMathBuiltinCall(SASigExpD));
  10563          case uint32_t(MozOp::F64Log):
  10564            CHECK(emitUnaryMathBuiltinCall(SASigLogD));
  10565          case uint32_t(MozOp::F64Pow):
  10566            CHECK(emitBinaryMathBuiltinCall(SASigPowD));
  10567          case uint32_t(MozOp::F64Atan2):
  10568            CHECK(emitBinaryMathBuiltinCall(SASigATan2D));
  10569          case uint32_t(MozOp::OldCallDirect):
  10570            CHECK(emitCall(/* asmJSFuncDef = */ true));
  10571          case uint32_t(MozOp::OldCallIndirect):
  10572            CHECK(emitCallIndirect(/* oldStyle = */ true));
  10573 
  10574          default:
  10575            return iter().unrecognizedOpcode(&op);
  10576        }
  10577        break;
  10578      }
  10579 
  10580      default:
  10581        return iter().unrecognizedOpcode(&op);
  10582    }
  10583  }
  10584 
  10585  MOZ_CRASH("unreachable");
  10586 
  10587 #undef CHECK
  10588 }
  10589 
  10590 }  // end anonymous namespace
  10591 
  10592 bool RootCompiler::generate() {
  10593  // Only activate branch hinting if the option is enabled and some hints were
  10594  // parsed.
  10595  if (codeMeta_.branchHintingEnabled() && !codeMeta_.branchHints.isEmpty()) {
  10596    compileInfo_.setBranchHinting(true);
  10597  }
  10598 
  10599  // Figure out what the inlining budget for this function is.  If we've
  10600  // already exceeded the module-level limit, the budget is zero.  See
  10601  // "[SMDOC] Per-function and per-module inlining limits" (WasmHeuristics.h)
  10602  if (codeTailMeta_) {
  10603    auto guard = codeTailMeta_->inliningBudget.lock();
  10604 
  10605    if (guard.get() > 0) {
  10606      localInliningBudget_ =
  10607          int64_t(codeMeta_.codeSectionSize()) * PerFunctionMaxInliningRatio;
  10608      localInliningBudget_ =
  10609          std::min<int64_t>(localInliningBudget_, guard.get());
  10610    } else {
  10611      localInliningBudget_ = 0;
  10612    }
  10613    MOZ_ASSERT(localInliningBudget_ >= 0);
  10614  } else {
  10615    localInliningBudget_ = 0;
  10616  }
  10617 
  10618  // Build the MIR graph
  10619  FunctionCompiler funcCompiler(*this, decoder_, func_, locals_, compileInfo_);
  10620  if (!funcCompiler.initRoot() || !funcCompiler.startBlock() ||
  10621      !funcCompiler.emitBodyExprs()) {
  10622    return false;
  10623  }
  10624  funcCompiler.finish();
  10625  observedFeatures_ = funcCompiler.featureUsage();
  10626 
  10627  MOZ_ASSERT(loopDepth_ == 0);
  10628 
  10629  funcStats_.numFuncs += 1;
  10630  funcStats_.bytecodeSize += func_.bytecodeSize();
  10631  funcStats_.inlinedDirectCallCount += inliningStats_.inlinedDirectFunctions;
  10632  funcStats_.inlinedCallRefCount += inliningStats_.inlinedCallRefFunctions;
  10633  funcStats_.inlinedDirectCallBytecodeSize +=
  10634      inliningStats_.inlinedDirectBytecodeSize;
  10635  funcStats_.inlinedCallRefBytecodeSize +=
  10636      inliningStats_.inlinedCallRefBytecodeSize;
  10637  funcStats_.numLargeFunctionBackoffs +=
  10638      inliningStats_.largeFunctionBackoff ? 1 : 0;
  10639 
  10640  if (codeTailMeta_) {
  10641    auto guard = codeTailMeta_->inliningBudget.lock();
  10642    // Update the module's inlining budget accordingly.  If it is already
  10643    // negative, no more inlining for the module can happen, so there's no
  10644    // point in updating it further.
  10645    if (guard.get() >= 0) {
  10646      guard.get() -= int64_t(inliningStats_.inlinedDirectBytecodeSize);
  10647      guard.get() -= int64_t(inliningStats_.inlinedCallRefBytecodeSize);
  10648      if (guard.get() < 0) {
  10649        JS_LOG(wasmPerf, Info,
  10650               "CM=..%06lx  RC::generate            "
  10651               "Inlining budget for entire module exceeded",
  10652               0xFFFFFF & (unsigned long)uintptr_t(&codeMeta_));
  10653      }
  10654    }
  10655    // If this particular root function overran the function-level
  10656    // limit, note that in the module too.
  10657    if (localInliningBudget_ < 0) {
  10658      funcStats_.numInliningBudgetOverruns += 1;
  10659    }
  10660  }
  10661 
  10662  return true;
  10663 }
  10664 
  10665 CompileInfo* RootCompiler::startInlineCall(
  10666    uint32_t callerFuncIndex, BytecodeOffset callerOffset,
  10667    uint32_t calleeFuncIndex, uint32_t numLocals, size_t inlineeBytecodeSize,
  10668    InliningHeuristics::CallKind callKind) {
  10669  if (callKind == InliningHeuristics::CallKind::Direct) {
  10670    inliningStats_.inlinedDirectBytecodeSize += inlineeBytecodeSize;
  10671    inliningStats_.inlinedDirectFunctions += 1;
  10672  } else {
  10673    MOZ_ASSERT(callKind == InliningHeuristics::CallKind::CallRef);
  10674    inliningStats_.inlinedCallRefBytecodeSize += inlineeBytecodeSize;
  10675    inliningStats_.inlinedCallRefFunctions += 1;
  10676  }
  10677 
  10678  // Update the inlining budget accordingly.  If it is already negative, no
  10679  // more inlining within this root function can happen, so there's no
  10680  // point in updating it further.
  10681  if (localInliningBudget_ >= 0) {
  10682    localInliningBudget_ -= int64_t(inlineeBytecodeSize);
  10683 #ifdef JS_JITSPEW
  10684    if (localInliningBudget_ <= 0) {
  10685      JS_LOG(wasmPerf, Info,
  10686             "CM=..%06lx  RC::startInlineCall     "
  10687             "Inlining budget for fI=%u exceeded",
  10688             0xFFFFFF & (unsigned long)uintptr_t(&codeMeta_), callerFuncIndex);
  10689    }
  10690 #endif
  10691  }
  10692 
  10693  // Add the callers offset to the stack of inlined caller offsets
  10694  if (!inlinedCallerOffsets_.append(callerOffset)) {
  10695    return nullptr;
  10696  }
  10697 
  10698  // Cache a copy of the current stack of inlined caller offsets that can be
  10699  // shared across all call sites
  10700  InlinedCallerOffsets inlinedCallerOffsets;
  10701  if (!inlinedCallerOffsets.appendAll(inlinedCallerOffsets_)) {
  10702    return nullptr;
  10703  }
  10704 
  10705  if (!inliningContext_.append(std::move(inlinedCallerOffsets),
  10706                               &inlinedCallerOffsetsIndex_)) {
  10707    return nullptr;
  10708  }
  10709 
  10710  UniqueCompileInfo compileInfo = MakeUnique<CompileInfo>(numLocals);
  10711  if (!compileInfo || !compileInfos_.append(std::move(compileInfo))) {
  10712    return nullptr;
  10713  }
  10714  return compileInfos_[compileInfos_.length() - 1].get();
  10715 }
  10716 
  10717 void RootCompiler::finishInlineCall() { inlinedCallerOffsets_.popBack(); }
  10718 
  10719 bool wasm::IonCompileFunctions(const CodeMetadata& codeMeta,
  10720                               const CodeTailMetadata* codeTailMeta,
  10721                               const CompilerEnvironment& compilerEnv,
  10722                               LifoAlloc& lifo,
  10723                               const FuncCompileInputVector& inputs,
  10724                               CompiledCode* code, UniqueChars* error) {
  10725  MOZ_ASSERT(compilerEnv.tier() == Tier::Optimized);
  10726  MOZ_ASSERT(compilerEnv.debug() == DebugEnabled::False);
  10727  MOZ_ASSERT_IF(compilerEnv.mode() == CompileMode::LazyTiering, !!codeTailMeta);
  10728 
  10729  // We should not interact with the GC heap, nor allocate from it when we are
  10730  // compiling wasm code. Ion data structures have some fields for GC objects
  10731  // that we do not use, yet can confuse the static analysis here. Disable it
  10732  // for this function.
  10733  JS::AutoSuppressGCAnalysis nogc;
  10734 
  10735  TempAllocator alloc(&lifo);
  10736  JitContext jitContext;
  10737  MOZ_ASSERT(IsCompilingWasm());
  10738  WasmMacroAssembler masm(alloc);
  10739 #if defined(JS_CODEGEN_ARM64)
  10740  masm.SetStackPointer64(PseudoStackPointer64);
  10741 #endif
  10742 
  10743  // Swap in already-allocated empty vectors to avoid malloc/free.
  10744  MOZ_ASSERT(code->empty());
  10745  if (!code->swap(masm)) {
  10746    return false;
  10747  }
  10748 
  10749  // Create a description of the stack layout created by GenerateTrapExit().
  10750  RegisterOffsets trapExitLayout;
  10751  size_t trapExitLayoutNumWords;
  10752  GenerateTrapExitRegisterOffsets(&trapExitLayout, &trapExitLayoutNumWords);
  10753 
  10754  for (const FuncCompileInput& func : inputs) {
  10755    JitSpewCont(JitSpew_Codegen, "\n");
  10756    JitSpew(JitSpew_Codegen,
  10757            "# ================================"
  10758            "==================================");
  10759    JitSpew(JitSpew_Codegen, "# ==");
  10760    JitSpew(JitSpew_Codegen,
  10761            "# wasm::IonCompileFunctions: starting on function index %d",
  10762            (int)func.index);
  10763 
  10764    Decoder d(func.begin, func.end, func.lineOrBytecode, error);
  10765 
  10766    // Build the local types vector.
  10767    ValTypeVector locals;
  10768    if (!DecodeLocalEntriesWithParams(d, codeMeta, func.index, &locals)) {
  10769      return false;
  10770    }
  10771 
  10772    // Set up for Ion compilation.
  10773    RootCompiler rootCompiler(compilerEnv, codeMeta, codeTailMeta, alloc,
  10774                              locals, func, d, masm.tryNotes(),
  10775                              masm.inliningContext());
  10776    if (!rootCompiler.generate()) {
  10777      return false;
  10778    }
  10779 
  10780    // Record observed feature usage
  10781    FeatureUsage observedFeatures = rootCompiler.observedFeatures();
  10782    code->featureUsage |= observedFeatures;
  10783 
  10784    // Compile MIR graph
  10785    {
  10786      rootCompiler.mirGen().spewBeginWasmFunction(func.index);
  10787      jit::AutoSpewEndFunction spewEndFunction(&rootCompiler.mirGen());
  10788 
  10789      if (!OptimizeMIR(&rootCompiler.mirGen())) {
  10790        return false;
  10791      }
  10792 
  10793      LIRGraph* lir = GenerateLIR(&rootCompiler.mirGen());
  10794      if (!lir) {
  10795        return false;
  10796      }
  10797 
  10798      size_t unwindInfoBefore = masm.codeRangeUnwindInfos().length();
  10799 
  10800      CodeGenerator codegen(&rootCompiler.mirGen(), lir, &masm, &codeMeta);
  10801 
  10802      TrapSiteDesc prologueTrapSiteDesc(
  10803          wasm::BytecodeOffset(func.lineOrBytecode));
  10804      FuncOffsets offsets;
  10805      ArgTypeVector args(codeMeta.getFuncType(func.index));
  10806      if (!codegen.generateWasm(CallIndirectId::forFunc(codeMeta, func.index),
  10807                                prologueTrapSiteDesc, args, trapExitLayout,
  10808                                trapExitLayoutNumWords, &offsets,
  10809                                &code->stackMaps, &d)) {
  10810        return false;
  10811      }
  10812 
  10813      bool hasUnwindInfo =
  10814          unwindInfoBefore != masm.codeRangeUnwindInfos().length();
  10815 
  10816      // Record this function's code range
  10817      if (!code->codeRanges.emplaceBack(func.index, offsets, hasUnwindInfo)) {
  10818        return false;
  10819      }
  10820    }
  10821 
  10822    if (PerfEnabled()) {
  10823      IonPerfSpewer spewer = std::move(rootCompiler.mirGen().perfSpewer());
  10824      if (!code->funcIonSpewers.emplaceBack(func.index, std::move(spewer))) {
  10825        return false;
  10826      }
  10827    }
  10828 
  10829    // Record this function's compilation stats
  10830    code->compileStats.merge(rootCompiler.funcStats());
  10831 
  10832    // Record this function's specific feature usage
  10833    if (!code->funcs.emplaceBack(func.index, observedFeatures)) {
  10834      return false;
  10835    }
  10836 
  10837    JitSpew(JitSpew_Codegen,
  10838            "# wasm::IonCompileFunctions: completed function index %d",
  10839            (int)func.index);
  10840    JitSpew(JitSpew_Codegen, "# ==");
  10841    JitSpew(JitSpew_Codegen,
  10842            "# ================================"
  10843            "==================================");
  10844    JitSpewCont(JitSpew_Codegen, "\n");
  10845  }
  10846 
  10847  masm.finish();
  10848  if (masm.oom()) {
  10849    return false;
  10850  }
  10851 
  10852  return code->swap(masm);
  10853 }
  10854 
  10855 bool wasm::IonDumpFunction(const CompilerEnvironment& compilerEnv,
  10856                           const CodeMetadata& codeMeta,
  10857                           const FuncCompileInput& func, GenericPrinter& out,
  10858                           UniqueChars* error) {
  10859 #ifdef JS_JITSPEW
  10860  LifoAlloc lifo(TempAllocator::PreferredLifoChunkSize,
  10861                 js::BackgroundMallocArena);
  10862  TempAllocator alloc(&lifo);
  10863  JitContext jitContext;
  10864  Decoder d(func.begin, func.end, func.lineOrBytecode, error);
  10865 
  10866  // Decode the locals.
  10867  ValTypeVector locals;
  10868  if (!DecodeLocalEntriesWithParams(d, codeMeta, func.index, &locals)) {
  10869    return false;
  10870  }
  10871 
  10872  TryNoteVector tryNotes;
  10873  InliningContext inliningContext;
  10874  RootCompiler rootCompiler(compilerEnv, codeMeta, nullptr, alloc, locals, func,
  10875                            d, tryNotes, inliningContext);
  10876  MIRGenerator& mirGen = rootCompiler.mirGen();
  10877  GraphSpewer graphSpewer(out, &codeMeta);
  10878 
  10879  graphSpewer.begin();
  10880  mirGen.setGraphSpewer(&graphSpewer);
  10881  mirGen.spewBeginWasmFunction(func.index);
  10882 
  10883  if (!rootCompiler.generate() || !OptimizeMIR(&mirGen) ||
  10884      !GenerateLIR(&mirGen)) {
  10885    return false;
  10886  }
  10887 
  10888  mirGen.spewEndFunction();
  10889  graphSpewer.end();
  10890 
  10891 #else
  10892  out.printf("cannot dump Ion without --enable-jitspew");
  10893 #endif
  10894  return true;
  10895 }
  10896 
  10897 bool js::wasm::IonPlatformSupport() {
  10898 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) ||       \
  10899    defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS64) ||    \
  10900    defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_LOONG64) || \
  10901    defined(JS_CODEGEN_RISCV64)
  10902  return true;
  10903 #else
  10904  return false;
  10905 #endif
  10906 }