tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MIR-wasm.cpp (35548B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/MIR-wasm.h"
      8 
      9 #include "mozilla/ScopeExit.h"
     10 
     11 #include "jit/MIRGraph.h"
     12 #include "js/Conversions.h"
     13 #include "wasm/WasmCode.h"
     14 #include "wasm/WasmFeatures.h"  // for wasm::ReportSimdAnalysis
     15 
     16 #include "wasm/WasmInstance-inl.h"
     17 
     18 using namespace js;
     19 using namespace js::jit;
     20 
     21 using JS::ToInt32;
     22 
     23 using mozilla::CheckedInt;
     24 using mozilla::IsFloat32Representable;
     25 
     26 HashNumber MWasmFloatConstant::valueHash() const {
     27 #ifdef ENABLE_WASM_SIMD
     28  return ConstantValueHash(type(), u.bits_[0] ^ u.bits_[1]);
     29 #else
     30  return ConstantValueHash(type(), u.bits_[0]);
     31 #endif
     32 }
     33 
     34 bool MWasmFloatConstant::congruentTo(const MDefinition* ins) const {
     35  return ins->isWasmFloatConstant() && type() == ins->type() &&
     36 #ifdef ENABLE_WASM_SIMD
     37         u.bits_[1] == ins->toWasmFloatConstant()->u.bits_[1] &&
     38 #endif
     39         u.bits_[0] == ins->toWasmFloatConstant()->u.bits_[0];
     40 }
     41 
     42 HashNumber MWasmNullConstant::valueHash() const {
     43  return ConstantValueHash(MIRType::WasmAnyRef, 0);
     44 }
     45 
     46 MDefinition* MWasmTruncateToInt32::foldsTo(TempAllocator& alloc) {
     47  MDefinition* input = getOperand(0);
     48  if (input->type() == MIRType::Int32) {
     49    return input;
     50  }
     51 
     52  if (input->type() == MIRType::Double && input->isConstant()) {
     53    double d = input->toConstant()->toDouble();
     54    if (std::isnan(d)) {
     55      return this;
     56    }
     57 
     58    if (!isUnsigned() && d <= double(INT32_MAX) && d >= double(INT32_MIN)) {
     59      return MConstant::NewInt32(alloc, ToInt32(d));
     60    }
     61 
     62    if (isUnsigned() && d <= double(UINT32_MAX) && d >= 0) {
     63      return MConstant::NewInt32(alloc, ToInt32(d));
     64    }
     65  }
     66 
     67  if (input->type() == MIRType::Float32 && input->isConstant()) {
     68    double f = double(input->toConstant()->toFloat32());
     69    if (std::isnan(f)) {
     70      return this;
     71    }
     72 
     73    if (!isUnsigned() && f <= double(INT32_MAX) && f >= double(INT32_MIN)) {
     74      return MConstant::NewInt32(alloc, ToInt32(f));
     75    }
     76 
     77    if (isUnsigned() && f <= double(UINT32_MAX) && f >= 0) {
     78      return MConstant::NewInt32(alloc, ToInt32(f));
     79    }
     80  }
     81 
     82  return this;
     83 }
     84 
     85 MDefinition* MWasmExtendU32Index::foldsTo(TempAllocator& alloc) {
     86  MDefinition* input = this->input();
     87  if (input->isConstant()) {
     88    return MConstant::NewInt64(
     89        alloc, int64_t(uint32_t(input->toConstant()->toInt32())));
     90  }
     91 
     92  return this;
     93 }
     94 
     95 MDefinition* MWasmWrapU32Index::foldsTo(TempAllocator& alloc) {
     96  MDefinition* input = this->input();
     97  if (input->isConstant()) {
     98    return MConstant::NewInt32(
     99        alloc, int32_t(uint32_t(input->toConstant()->toInt64())));
    100  }
    101 
    102  return this;
    103 }
    104 
    105 // Some helpers for folding wasm and/or/xor on int32/64 values.  Rather than
    106 // duplicating these for 32 and 64-bit values, all folding is done on 64-bit
    107 // values and masked for the 32-bit case.
    108 
    109 const uint64_t Low32Mask = uint64_t(0xFFFFFFFFULL);
    110 
    111 // Routines to check and disassemble values.
    112 
    113 static bool IsIntegralConstant(const MDefinition* def) {
    114  return def->isConstant() &&
    115         (def->type() == MIRType::Int32 || def->type() == MIRType::Int64);
    116 }
    117 
    118 static uint64_t GetIntegralConstant(const MDefinition* def) {
    119  if (def->type() == MIRType::Int32) {
    120    return uint64_t(def->toConstant()->toInt32()) & Low32Mask;
    121  }
    122  return uint64_t(def->toConstant()->toInt64());
    123 }
    124 
    125 static bool IsIntegralConstantZero(const MDefinition* def) {
    126  return IsIntegralConstant(def) && GetIntegralConstant(def) == 0;
    127 }
    128 
    129 static bool IsIntegralConstantOnes(const MDefinition* def) {
    130  uint64_t ones = def->type() == MIRType::Int32 ? Low32Mask : ~uint64_t(0);
    131  return IsIntegralConstant(def) && GetIntegralConstant(def) == ones;
    132 }
    133 
    134 // Routines to create values.
    135 static MDefinition* ToIntegralConstant(TempAllocator& alloc, MIRType ty,
    136                                       uint64_t val) {
    137  switch (ty) {
    138    case MIRType::Int32:
    139      return MConstant::NewInt32(alloc, int32_t(uint32_t(val & Low32Mask)));
    140    case MIRType::Int64:
    141      return MConstant::NewInt64(alloc, int64_t(val));
    142    default:
    143      MOZ_CRASH();
    144  }
    145 }
    146 
    147 static MDefinition* ZeroOfType(TempAllocator& alloc, MIRType ty) {
    148  return ToIntegralConstant(alloc, ty, 0);
    149 }
    150 
    151 static MDefinition* OnesOfType(TempAllocator& alloc, MIRType ty) {
    152  return ToIntegralConstant(alloc, ty, ~uint64_t(0));
    153 }
    154 
    155 MDefinition* MWasmBinaryBitwise::foldsTo(TempAllocator& alloc) {
    156  MOZ_ASSERT(op() == Opcode::WasmBinaryBitwise);
    157  MOZ_ASSERT(type() == MIRType::Int32 || type() == MIRType::Int64);
    158 
    159  MDefinition* argL = getOperand(0);
    160  MDefinition* argR = getOperand(1);
    161  MOZ_ASSERT(argL->type() == type() && argR->type() == type());
    162 
    163  // The args are the same (SSA name)
    164  if (argL == argR) {
    165    switch (subOpcode()) {
    166      case SubOpcode::And:
    167      case SubOpcode::Or:
    168        return argL;
    169      case SubOpcode::Xor:
    170        return ZeroOfType(alloc, type());
    171      default:
    172        MOZ_CRASH();
    173    }
    174  }
    175 
    176  // Both args constant
    177  if (IsIntegralConstant(argL) && IsIntegralConstant(argR)) {
    178    uint64_t valL = GetIntegralConstant(argL);
    179    uint64_t valR = GetIntegralConstant(argR);
    180    uint64_t val = valL;
    181    switch (subOpcode()) {
    182      case SubOpcode::And:
    183        val &= valR;
    184        break;
    185      case SubOpcode::Or:
    186        val |= valR;
    187        break;
    188      case SubOpcode::Xor:
    189        val ^= valR;
    190        break;
    191      default:
    192        MOZ_CRASH();
    193    }
    194    return ToIntegralConstant(alloc, type(), val);
    195  }
    196 
    197  // Left arg is zero
    198  if (IsIntegralConstantZero(argL)) {
    199    switch (subOpcode()) {
    200      case SubOpcode::And:
    201        return ZeroOfType(alloc, type());
    202      case SubOpcode::Or:
    203      case SubOpcode::Xor:
    204        return argR;
    205      default:
    206        MOZ_CRASH();
    207    }
    208  }
    209 
    210  // Right arg is zero
    211  if (IsIntegralConstantZero(argR)) {
    212    switch (subOpcode()) {
    213      case SubOpcode::And:
    214        return ZeroOfType(alloc, type());
    215      case SubOpcode::Or:
    216      case SubOpcode::Xor:
    217        return argL;
    218      default:
    219        MOZ_CRASH();
    220    }
    221  }
    222 
    223  // Left arg is ones
    224  if (IsIntegralConstantOnes(argL)) {
    225    switch (subOpcode()) {
    226      case SubOpcode::And:
    227        return argR;
    228      case SubOpcode::Or:
    229        return OnesOfType(alloc, type());
    230      case SubOpcode::Xor:
    231        return MBitNot::New(alloc, argR, type());
    232      default:
    233        MOZ_CRASH();
    234    }
    235  }
    236 
    237  // Right arg is ones
    238  if (IsIntegralConstantOnes(argR)) {
    239    switch (subOpcode()) {
    240      case SubOpcode::And:
    241        return argL;
    242      case SubOpcode::Or:
    243        return OnesOfType(alloc, type());
    244      case SubOpcode::Xor:
    245        return MBitNot::New(alloc, argL, type());
    246      default:
    247        MOZ_CRASH();
    248    }
    249  }
    250 
    251  return this;
    252 }
    253 
    254 MDefinition* MWasmAddOffset::foldsTo(TempAllocator& alloc) {
    255  MDefinition* baseArg = base();
    256  if (!baseArg->isConstant()) {
    257    return this;
    258  }
    259 
    260  if (baseArg->type() == MIRType::Int32) {
    261    CheckedInt<uint32_t> ptr = baseArg->toConstant()->toInt32();
    262    ptr += offset();
    263    if (!ptr.isValid()) {
    264      return this;
    265    }
    266    return MConstant::NewInt32(alloc, ptr.value());
    267  }
    268 
    269  MOZ_ASSERT(baseArg->type() == MIRType::Int64);
    270  CheckedInt<uint64_t> ptr = baseArg->toConstant()->toInt64();
    271  ptr += offset();
    272  if (!ptr.isValid()) {
    273    return this;
    274  }
    275  return MConstant::NewInt64(alloc, ptr.value());
    276 }
    277 
    278 bool MWasmAlignmentCheck::congruentTo(const MDefinition* ins) const {
    279  if (!ins->isWasmAlignmentCheck()) {
    280    return false;
    281  }
    282  const MWasmAlignmentCheck* check = ins->toWasmAlignmentCheck();
    283  return byteSize_ == check->byteSize() && congruentIfOperandsEqual(check);
    284 }
    285 
    286 MDefinition::AliasType MAsmJSLoadHeap::mightAlias(
    287    const MDefinition* def) const {
    288  if (def->isAsmJSStoreHeap()) {
    289    const MAsmJSStoreHeap* store = def->toAsmJSStoreHeap();
    290    if (store->accessType() != accessType()) {
    291      return AliasType::MayAlias;
    292    }
    293    if (!base()->isConstant() || !store->base()->isConstant()) {
    294      return AliasType::MayAlias;
    295    }
    296    const MConstant* otherBase = store->base()->toConstant();
    297    if (base()->toConstant()->equals(otherBase)) {
    298      return AliasType::MayAlias;
    299    }
    300    return AliasType::NoAlias;
    301  }
    302  return AliasType::MayAlias;
    303 }
    304 
    305 bool MAsmJSLoadHeap::congruentTo(const MDefinition* ins) const {
    306  if (!ins->isAsmJSLoadHeap()) {
    307    return false;
    308  }
    309  const MAsmJSLoadHeap* load = ins->toAsmJSLoadHeap();
    310  return load->accessType() == accessType() && congruentIfOperandsEqual(load);
    311 }
    312 
    313 MDefinition::AliasType MWasmLoadInstanceDataField::mightAlias(
    314    const MDefinition* def) const {
    315  if (def->isWasmStoreInstanceDataField()) {
    316    const MWasmStoreInstanceDataField* store =
    317        def->toWasmStoreInstanceDataField();
    318    return store->instanceDataOffset() == instanceDataOffset_
    319               ? AliasType::MayAlias
    320               : AliasType::NoAlias;
    321  }
    322 
    323  return AliasType::MayAlias;
    324 }
    325 
    326 MDefinition::AliasType MWasmLoadGlobalCell::mightAlias(
    327    const MDefinition* def) const {
    328  if (def->isWasmStoreGlobalCell()) {
    329    // No globals of different type can alias.  See bug 1467415 comment 3.
    330    if (type() != def->toWasmStoreGlobalCell()->value()->type()) {
    331      return AliasType::NoAlias;
    332    }
    333 
    334    // We could do better here.  We're dealing with two indirect globals.
    335    // If at at least one of them is created in this module, then they
    336    // can't alias -- in other words they can only alias if they are both
    337    // imported.  That would require having a flag on globals to indicate
    338    // which are imported.  See bug 1467415 comment 3, 4th rule.
    339  }
    340 
    341  return AliasType::MayAlias;
    342 }
    343 
    344 HashNumber MWasmLoadInstanceDataField::valueHash() const {
    345  // Same comment as in MWasmLoadInstanceDataField::congruentTo() applies here.
    346  HashNumber hash = MUnaryInstruction::valueHash();
    347  hash = addU32ToHash(hash, instanceDataOffset_);
    348  return hash;
    349 }
    350 
    351 bool MWasmLoadInstanceDataField::congruentTo(const MDefinition* ins) const {
    352  if (!ins->isWasmLoadInstanceDataField()) {
    353    return false;
    354  }
    355 
    356  const MWasmLoadInstanceDataField* other = ins->toWasmLoadInstanceDataField();
    357 
    358  // We don't need to consider the isConstant_ markings here, because
    359  // equivalence of offsets implies equivalence of constness.
    360  bool sameOffsets = instanceDataOffset_ == other->instanceDataOffset_;
    361  MOZ_ASSERT_IF(sameOffsets, isConstant_ == other->isConstant_);
    362 
    363  // We omit checking congruence of the operands.  There is only one
    364  // operand, the instance pointer, and it only ever has one value within the
    365  // domain of optimization.  If that should ever change then operand
    366  // congruence checking should be reinstated.
    367  return sameOffsets /* && congruentIfOperandsEqual(other) */;
    368 }
    369 
    370 MDefinition* MWasmLoadInstanceDataField::foldsTo(TempAllocator& alloc) {
    371  if (!dependency() || !dependency()->isWasmStoreInstanceDataField()) {
    372    return this;
    373  }
    374 
    375  MWasmStoreInstanceDataField* store =
    376      dependency()->toWasmStoreInstanceDataField();
    377  if (!store->block()->dominates(block())) {
    378    return this;
    379  }
    380 
    381  if (store->instanceDataOffset() != instanceDataOffset()) {
    382    return this;
    383  }
    384 
    385  if (store->value()->type() != type()) {
    386    return this;
    387  }
    388 
    389  return store->value();
    390 }
    391 
    392 MDefinition* MWasmSelect::foldsTo(TempAllocator& alloc) {
    393  if (condExpr()->isConstant()) {
    394    return condExpr()->toConstant()->toInt32() != 0 ? trueExpr() : falseExpr();
    395  }
    396  return this;
    397 }
    398 
    399 bool MWasmLoadGlobalCell::congruentTo(const MDefinition* ins) const {
    400  if (!ins->isWasmLoadGlobalCell()) {
    401    return false;
    402  }
    403  const MWasmLoadGlobalCell* other = ins->toWasmLoadGlobalCell();
    404  return congruentIfOperandsEqual(other);
    405 }
    406 
    407 #ifdef ENABLE_WASM_SIMD
    408 MDefinition* MWasmTernarySimd128::foldsTo(TempAllocator& alloc) {
    409  if (simdOp() == wasm::SimdOp::V128Bitselect) {
    410    if (v2()->op() == MDefinition::Opcode::WasmFloatConstant) {
    411      int8_t shuffle[16];
    412      if (specializeBitselectConstantMaskAsShuffle(shuffle)) {
    413        return BuildWasmShuffleSimd128(alloc, shuffle, v0(), v1());
    414      }
    415    } else if (canRelaxBitselect()) {
    416      return MWasmTernarySimd128::New(alloc, v0(), v1(), v2(),
    417                                      wasm::SimdOp::I8x16RelaxedLaneSelect);
    418    }
    419  }
    420  return this;
    421 }
    422 
    423 inline static bool MatchSpecificShift(MDefinition* instr,
    424                                      wasm::SimdOp simdShiftOp,
    425                                      int shiftValue) {
    426  return instr->isWasmShiftSimd128() &&
    427         instr->toWasmShiftSimd128()->simdOp() == simdShiftOp &&
    428         instr->toWasmShiftSimd128()->rhs()->isConstant() &&
    429         instr->toWasmShiftSimd128()->rhs()->toConstant()->toInt32() ==
    430             shiftValue;
    431 }
    432 
    433 // Matches MIR subtree that represents PMADDUBSW instruction generated by
    434 // emscripten. The a and b parameters return subtrees that correspond
    435 // operands of the instruction, if match is found.
    436 static bool MatchPmaddubswSequence(MWasmBinarySimd128* lhs,
    437                                   MWasmBinarySimd128* rhs, MDefinition** a,
    438                                   MDefinition** b) {
    439  MOZ_ASSERT(lhs->simdOp() == wasm::SimdOp::I16x8Mul &&
    440             rhs->simdOp() == wasm::SimdOp::I16x8Mul);
    441  // The emscripten/LLVM produced the following sequence for _mm_maddubs_epi16:
    442  //
    443  //  return _mm_adds_epi16(
    444  //    _mm_mullo_epi16(
    445  //      _mm_and_si128(__a, _mm_set1_epi16(0x00FF)),
    446  //      _mm_srai_epi16(_mm_slli_epi16(__b, 8), 8)),
    447  //    _mm_mullo_epi16(_mm_srli_epi16(__a, 8), _mm_srai_epi16(__b, 8)));
    448  //
    449  //  This will roughly correspond the following MIR:
    450  //    MWasmBinarySimd128[I16x8AddSatS]
    451  //      |-- lhs: MWasmBinarySimd128[I16x8Mul]                      (lhs)
    452  //      |     |-- lhs: MWasmBinarySimd128WithConstant[V128And]     (op0)
    453  //      |     |     |-- lhs: a
    454  //      |     |      -- rhs: SimdConstant::SplatX8(0x00FF)
    455  //      |      -- rhs: MWasmShiftSimd128[I16x8ShrS]                (op1)
    456  //      |           |-- lhs: MWasmShiftSimd128[I16x8Shl]
    457  //      |           |     |-- lhs: b
    458  //      |           |      -- rhs: MConstant[8]
    459  //      |            -- rhs: MConstant[8]
    460  //       -- rhs: MWasmBinarySimd128[I16x8Mul]                      (rhs)
    461  //            |-- lhs: MWasmShiftSimd128[I16x8ShrU]                (op2)
    462  //            |     |-- lhs: a
    463  //            |     |-- rhs: MConstant[8]
    464  //             -- rhs: MWasmShiftSimd128[I16x8ShrS]                (op3)
    465  //                  |-- lhs: b
    466  //                   -- rhs: MConstant[8]
    467 
    468  // The I16x8AddSatS and I16x8Mul are commutative, so their operands
    469  // may be swapped. Rearrange op0, op1, op2, op3 to be in the order
    470  // noted above.
    471  MDefinition *op0 = lhs->lhs(), *op1 = lhs->rhs(), *op2 = rhs->lhs(),
    472              *op3 = rhs->rhs();
    473  if (op1->isWasmBinarySimd128WithConstant()) {
    474    // Move MWasmBinarySimd128WithConstant[V128And] as first operand in lhs.
    475    std::swap(op0, op1);
    476  } else if (op3->isWasmBinarySimd128WithConstant()) {
    477    // Move MWasmBinarySimd128WithConstant[V128And] as first operand in rhs.
    478    std::swap(op2, op3);
    479  }
    480  if (op2->isWasmBinarySimd128WithConstant()) {
    481    // The lhs and rhs are swapped.
    482    // Make MWasmBinarySimd128WithConstant[V128And] to be op0.
    483    std::swap(op0, op2);
    484    std::swap(op1, op3);
    485  }
    486  if (op2->isWasmShiftSimd128() &&
    487      op2->toWasmShiftSimd128()->simdOp() == wasm::SimdOp::I16x8ShrS) {
    488    // The op2 and op3 appears to be in wrong order, swap.
    489    std::swap(op2, op3);
    490  }
    491 
    492  // Check all instructions SIMD code and constant values for assigned
    493  // names op0, op1, op2, op3 (see diagram above).
    494  const uint16_t const00FF[8] = {255, 255, 255, 255, 255, 255, 255, 255};
    495  if (!op0->isWasmBinarySimd128WithConstant() ||
    496      op0->toWasmBinarySimd128WithConstant()->simdOp() !=
    497          wasm::SimdOp::V128And ||
    498      memcmp(op0->toWasmBinarySimd128WithConstant()->rhs().bytes(), const00FF,
    499             16) != 0 ||
    500      !MatchSpecificShift(op1, wasm::SimdOp::I16x8ShrS, 8) ||
    501      !MatchSpecificShift(op2, wasm::SimdOp::I16x8ShrU, 8) ||
    502      !MatchSpecificShift(op3, wasm::SimdOp::I16x8ShrS, 8) ||
    503      !MatchSpecificShift(op1->toWasmShiftSimd128()->lhs(),
    504                          wasm::SimdOp::I16x8Shl, 8)) {
    505    return false;
    506  }
    507 
    508  // Check if the instructions arguments that are subtrees match the
    509  // a and b assignments. May depend on GVN behavior.
    510  MDefinition* maybeA = op0->toWasmBinarySimd128WithConstant()->lhs();
    511  MDefinition* maybeB = op3->toWasmShiftSimd128()->lhs();
    512  if (maybeA != op2->toWasmShiftSimd128()->lhs() ||
    513      maybeB != op1->toWasmShiftSimd128()->lhs()->toWasmShiftSimd128()->lhs()) {
    514    return false;
    515  }
    516 
    517  *a = maybeA;
    518  *b = maybeB;
    519  return true;
    520 }
    521 
    522 MDefinition* MWasmBinarySimd128::foldsTo(TempAllocator& alloc) {
    523  if (simdOp() == wasm::SimdOp::I8x16Swizzle && rhs()->isWasmFloatConstant()) {
    524    // Specialize swizzle(v, constant) as shuffle(mask, v, zero) to trigger all
    525    // our shuffle optimizations.  We don't report this rewriting as the report
    526    // will be overwritten by the subsequent shuffle analysis.
    527    int8_t shuffleMask[16];
    528    memcpy(shuffleMask, rhs()->toWasmFloatConstant()->toSimd128().bytes(), 16);
    529    for (int i = 0; i < 16; i++) {
    530      // Out-of-bounds lanes reference the zero vector; in many cases, the zero
    531      // vector is removed by subsequent optimizations.
    532      if (shuffleMask[i] < 0 || shuffleMask[i] > 15) {
    533        shuffleMask[i] = 16;
    534      }
    535    }
    536    MWasmFloatConstant* zero =
    537        MWasmFloatConstant::NewSimd128(alloc, SimdConstant::SplatX4(0));
    538    if (!zero) {
    539      return nullptr;
    540    }
    541    block()->insertBefore(this, zero);
    542    return BuildWasmShuffleSimd128(alloc, shuffleMask, lhs(), zero);
    543  }
    544 
    545  // Specialize var OP const / const OP var when possible.
    546  //
    547  // As the LIR layer can't directly handle v128 constants as part of its normal
    548  // machinery we specialize some nodes here if they have single-use v128
    549  // constant arguments.  The purpose is to generate code that inlines the
    550  // constant in the instruction stream, using either a rip-relative load+op or
    551  // quickly-synthesized constant in a scratch on x64.  There is a general
    552  // assumption here that that is better than generating the constant into an
    553  // allocatable register, since that register value could not be reused. (This
    554  // ignores the possibility that the constant load could be hoisted).
    555 
    556  if (lhs()->isWasmFloatConstant() != rhs()->isWasmFloatConstant() &&
    557      specializeForConstantRhs()) {
    558    if (isCommutative() && lhs()->isWasmFloatConstant() && lhs()->hasOneUse()) {
    559      return MWasmBinarySimd128WithConstant::New(
    560          alloc, rhs(), lhs()->toWasmFloatConstant()->toSimd128(), simdOp());
    561    }
    562 
    563    if (rhs()->isWasmFloatConstant() && rhs()->hasOneUse()) {
    564      return MWasmBinarySimd128WithConstant::New(
    565          alloc, lhs(), rhs()->toWasmFloatConstant()->toSimd128(), simdOp());
    566    }
    567  }
    568 
    569  // Check special encoding for PMADDUBSW.
    570  if (canPmaddubsw() && simdOp() == wasm::SimdOp::I16x8AddSatS &&
    571      lhs()->isWasmBinarySimd128() && rhs()->isWasmBinarySimd128() &&
    572      lhs()->toWasmBinarySimd128()->simdOp() == wasm::SimdOp::I16x8Mul &&
    573      rhs()->toWasmBinarySimd128()->simdOp() == wasm::SimdOp::I16x8Mul) {
    574    MDefinition *a, *b;
    575    if (MatchPmaddubswSequence(lhs()->toWasmBinarySimd128(),
    576                               rhs()->toWasmBinarySimd128(), &a, &b)) {
    577      return MWasmBinarySimd128::New(alloc, a, b, /* commutative = */ false,
    578                                     wasm::SimdOp::MozPMADDUBSW);
    579    }
    580  }
    581 
    582  return this;
    583 }
    584 
    585 MDefinition* MWasmScalarToSimd128::foldsTo(TempAllocator& alloc) {
    586 #  ifdef DEBUG
    587  auto logging = mozilla::MakeScopeExit([&] {
    588    js::wasm::ReportSimdAnalysis("scalar-to-simd128 -> constant folded");
    589  });
    590 #  endif
    591  if (input()->isConstant()) {
    592    MConstant* c = input()->toConstant();
    593    switch (simdOp()) {
    594      case wasm::SimdOp::I8x16Splat:
    595        return MWasmFloatConstant::NewSimd128(
    596            alloc, SimdConstant::SplatX16(c->toInt32()));
    597      case wasm::SimdOp::I16x8Splat:
    598        return MWasmFloatConstant::NewSimd128(
    599            alloc, SimdConstant::SplatX8(c->toInt32()));
    600      case wasm::SimdOp::I32x4Splat:
    601        return MWasmFloatConstant::NewSimd128(
    602            alloc, SimdConstant::SplatX4(c->toInt32()));
    603      case wasm::SimdOp::I64x2Splat:
    604        return MWasmFloatConstant::NewSimd128(
    605            alloc, SimdConstant::SplatX2(c->toInt64()));
    606      default:
    607 #  ifdef DEBUG
    608        logging.release();
    609 #  endif
    610        return this;
    611    }
    612  }
    613  if (input()->isWasmFloatConstant()) {
    614    MWasmFloatConstant* c = input()->toWasmFloatConstant();
    615    switch (simdOp()) {
    616      case wasm::SimdOp::F32x4Splat:
    617        return MWasmFloatConstant::NewSimd128(
    618            alloc, SimdConstant::SplatX4(c->toFloat32()));
    619      case wasm::SimdOp::F64x2Splat:
    620        return MWasmFloatConstant::NewSimd128(
    621            alloc, SimdConstant::SplatX2(c->toDouble()));
    622      default:
    623 #  ifdef DEBUG
    624        logging.release();
    625 #  endif
    626        return this;
    627    }
    628  }
    629 #  ifdef DEBUG
    630  logging.release();
    631 #  endif
    632  return this;
    633 }
    634 
    635 template <typename T>
    636 static bool AllTrue(const T& v) {
    637  constexpr size_t count = sizeof(T) / sizeof(*v);
    638  static_assert(count == 16 || count == 8 || count == 4 || count == 2);
    639  bool result = true;
    640  for (unsigned i = 0; i < count; i++) {
    641    result = result && v[i] != 0;
    642  }
    643  return result;
    644 }
    645 
    646 template <typename T>
    647 static int32_t Bitmask(const T& v) {
    648  constexpr size_t count = sizeof(T) / sizeof(*v);
    649  constexpr size_t shift = 8 * sizeof(*v) - 1;
    650  static_assert(shift == 7 || shift == 15 || shift == 31 || shift == 63);
    651  int32_t result = 0;
    652  for (unsigned i = 0; i < count; i++) {
    653    result = result | int32_t(((v[i] >> shift) & 1) << i);
    654  }
    655  return result;
    656 }
    657 
    658 MDefinition* MWasmReduceSimd128::foldsTo(TempAllocator& alloc) {
    659 #  ifdef DEBUG
    660  auto logging = mozilla::MakeScopeExit([&] {
    661    js::wasm::ReportSimdAnalysis("simd128-to-scalar -> constant folded");
    662  });
    663 #  endif
    664  if (input()->isWasmFloatConstant()) {
    665    SimdConstant c = input()->toWasmFloatConstant()->toSimd128();
    666    int32_t i32Result = 0;
    667    switch (simdOp()) {
    668      case wasm::SimdOp::V128AnyTrue:
    669        i32Result = !c.isZeroBits();
    670        break;
    671      case wasm::SimdOp::I8x16AllTrue:
    672        i32Result = AllTrue(
    673            SimdConstant::CreateSimd128((int8_t*)c.bytes()).asInt8x16());
    674        break;
    675      case wasm::SimdOp::I8x16Bitmask:
    676        i32Result = Bitmask(
    677            SimdConstant::CreateSimd128((int8_t*)c.bytes()).asInt8x16());
    678        break;
    679      case wasm::SimdOp::I16x8AllTrue:
    680        i32Result = AllTrue(
    681            SimdConstant::CreateSimd128((int16_t*)c.bytes()).asInt16x8());
    682        break;
    683      case wasm::SimdOp::I16x8Bitmask:
    684        i32Result = Bitmask(
    685            SimdConstant::CreateSimd128((int16_t*)c.bytes()).asInt16x8());
    686        break;
    687      case wasm::SimdOp::I32x4AllTrue:
    688        i32Result = AllTrue(
    689            SimdConstant::CreateSimd128((int32_t*)c.bytes()).asInt32x4());
    690        break;
    691      case wasm::SimdOp::I32x4Bitmask:
    692        i32Result = Bitmask(
    693            SimdConstant::CreateSimd128((int32_t*)c.bytes()).asInt32x4());
    694        break;
    695      case wasm::SimdOp::I64x2AllTrue:
    696        i32Result = AllTrue(
    697            SimdConstant::CreateSimd128((int64_t*)c.bytes()).asInt64x2());
    698        break;
    699      case wasm::SimdOp::I64x2Bitmask:
    700        i32Result = Bitmask(
    701            SimdConstant::CreateSimd128((int64_t*)c.bytes()).asInt64x2());
    702        break;
    703      case wasm::SimdOp::I8x16ExtractLaneS:
    704        i32Result =
    705            SimdConstant::CreateSimd128((int8_t*)c.bytes()).asInt8x16()[imm()];
    706        break;
    707      case wasm::SimdOp::I8x16ExtractLaneU:
    708        i32Result = int32_t(SimdConstant::CreateSimd128((int8_t*)c.bytes())
    709                                .asInt8x16()[imm()]) &
    710                    0xFF;
    711        break;
    712      case wasm::SimdOp::I16x8ExtractLaneS:
    713        i32Result =
    714            SimdConstant::CreateSimd128((int16_t*)c.bytes()).asInt16x8()[imm()];
    715        break;
    716      case wasm::SimdOp::I16x8ExtractLaneU:
    717        i32Result = int32_t(SimdConstant::CreateSimd128((int16_t*)c.bytes())
    718                                .asInt16x8()[imm()]) &
    719                    0xFFFF;
    720        break;
    721      case wasm::SimdOp::I32x4ExtractLane:
    722        i32Result =
    723            SimdConstant::CreateSimd128((int32_t*)c.bytes()).asInt32x4()[imm()];
    724        break;
    725      case wasm::SimdOp::I64x2ExtractLane:
    726        return MConstant::NewInt64(
    727            alloc, SimdConstant::CreateSimd128((int64_t*)c.bytes())
    728                       .asInt64x2()[imm()]);
    729      case wasm::SimdOp::F32x4ExtractLane:
    730        return MWasmFloatConstant::NewFloat32(
    731            alloc, SimdConstant::CreateSimd128((float*)c.bytes())
    732                       .asFloat32x4()[imm()]);
    733      case wasm::SimdOp::F64x2ExtractLane:
    734        return MWasmFloatConstant::NewDouble(
    735            alloc, SimdConstant::CreateSimd128((double*)c.bytes())
    736                       .asFloat64x2()[imm()]);
    737      default:
    738 #  ifdef DEBUG
    739        logging.release();
    740 #  endif
    741        return this;
    742    }
    743    return MConstant::NewInt32(alloc, i32Result);
    744  }
    745 #  ifdef DEBUG
    746  logging.release();
    747 #  endif
    748  return this;
    749 }
    750 #endif  // ENABLE_WASM_SIMD
    751 
    752 MDefinition* MWasmUnsignedToDouble::foldsTo(TempAllocator& alloc) {
    753  if (input()->isConstant()) {
    754    return MConstant::NewDouble(alloc,
    755                                uint32_t(input()->toConstant()->toInt32()));
    756  }
    757 
    758  return this;
    759 }
    760 
    761 MDefinition* MWasmUnsignedToFloat32::foldsTo(TempAllocator& alloc) {
    762  if (input()->isConstant()) {
    763    double dval = double(uint32_t(input()->toConstant()->toInt32()));
    764    if (IsFloat32Representable(dval)) {
    765      return MConstant::NewFloat32(alloc, float(dval));
    766    }
    767  }
    768 
    769  return this;
    770 }
    771 
    772 MWasmCallCatchable* MWasmCallCatchable::New(
    773    TempAllocator& alloc, const wasm::CallSiteDesc& desc,
    774    const wasm::CalleeDesc& callee, const Args& args,
    775    uint32_t stackArgAreaSizeUnaligned, uint32_t tryNoteIndex,
    776    MBasicBlock* fallthroughBlock, MBasicBlock* prePadBlock,
    777    MDefinition* tableAddressOrRef) {
    778  MWasmCallCatchable* call = new (alloc)
    779      MWasmCallCatchable(desc, callee, stackArgAreaSizeUnaligned, tryNoteIndex);
    780 
    781  call->setSuccessor(FallthroughBranchIndex, fallthroughBlock);
    782  call->setSuccessor(PrePadBranchIndex, prePadBlock);
    783 
    784  MOZ_ASSERT_IF(callee.isTable() || callee.isFuncRef(), tableAddressOrRef);
    785  if (!call->initWithArgs(alloc, call, args, tableAddressOrRef)) {
    786    return nullptr;
    787  }
    788 
    789  return call;
    790 }
    791 
    792 MWasmCallCatchable* MWasmCallCatchable::NewBuiltinInstanceMethodCall(
    793    TempAllocator& alloc, const wasm::CallSiteDesc& desc,
    794    const wasm::SymbolicAddress builtin, wasm::FailureMode failureMode,
    795    wasm::Trap failureTrap, const ABIArg& instanceArg, const Args& args,
    796    uint32_t stackArgAreaSizeUnaligned, uint32_t tryNoteIndex,
    797    MBasicBlock* fallthroughBlock, MBasicBlock* prePadBlock) {
    798  auto callee = wasm::CalleeDesc::builtinInstanceMethod(builtin);
    799  MWasmCallCatchable* call = MWasmCallCatchable::New(
    800      alloc, desc, callee, args, stackArgAreaSizeUnaligned, tryNoteIndex,
    801      fallthroughBlock, prePadBlock, nullptr);
    802  if (!call) {
    803    return nullptr;
    804  }
    805 
    806  MOZ_ASSERT(instanceArg != ABIArg());
    807  call->instanceArg_ = instanceArg;
    808  call->builtinMethodFailureMode_ = failureMode;
    809  call->builtinMethodFailureTrap_ = failureTrap;
    810  return call;
    811 }
    812 
    813 MWasmCallUncatchable* MWasmCallUncatchable::New(
    814    TempAllocator& alloc, const wasm::CallSiteDesc& desc,
    815    const wasm::CalleeDesc& callee, const Args& args,
    816    uint32_t stackArgAreaSizeUnaligned, MDefinition* tableAddressOrRef) {
    817  MWasmCallUncatchable* call =
    818      new (alloc) MWasmCallUncatchable(desc, callee, stackArgAreaSizeUnaligned);
    819 
    820  MOZ_ASSERT_IF(callee.isTable() || callee.isFuncRef(), tableAddressOrRef);
    821  if (!call->initWithArgs(alloc, call, args, tableAddressOrRef)) {
    822    return nullptr;
    823  }
    824 
    825  return call;
    826 }
    827 
    828 MWasmCallUncatchable* MWasmCallUncatchable::NewBuiltinInstanceMethodCall(
    829    TempAllocator& alloc, const wasm::CallSiteDesc& desc,
    830    const wasm::SymbolicAddress builtin, wasm::FailureMode failureMode,
    831    wasm::Trap failureTrap, const ABIArg& instanceArg, const Args& args,
    832    uint32_t stackArgAreaSizeUnaligned) {
    833  auto callee = wasm::CalleeDesc::builtinInstanceMethod(builtin);
    834  MWasmCallUncatchable* call = MWasmCallUncatchable::New(
    835      alloc, desc, callee, args, stackArgAreaSizeUnaligned, nullptr);
    836  if (!call) {
    837    return nullptr;
    838  }
    839 
    840  MOZ_ASSERT(instanceArg != ABIArg());
    841  call->instanceArg_ = instanceArg;
    842  call->builtinMethodFailureMode_ = failureMode;
    843  call->builtinMethodFailureTrap_ = failureTrap;
    844  return call;
    845 }
    846 
    847 MWasmReturnCall* MWasmReturnCall::New(TempAllocator& alloc,
    848                                      const wasm::CallSiteDesc& desc,
    849                                      const wasm::CalleeDesc& callee,
    850                                      const Args& args,
    851                                      uint32_t stackArgAreaSizeUnaligned,
    852                                      MDefinition* tableAddressOrRef) {
    853  MWasmReturnCall* call =
    854      new (alloc) MWasmReturnCall(desc, callee, stackArgAreaSizeUnaligned);
    855 
    856  MOZ_ASSERT_IF(callee.isTable() || callee.isFuncRef(), tableAddressOrRef);
    857  if (!call->initWithArgs(alloc, call, args, tableAddressOrRef)) {
    858    return nullptr;
    859  }
    860 
    861  return call;
    862 }
    863 
    864 MIonToWasmCall* MIonToWasmCall::New(TempAllocator& alloc,
    865                                    WasmInstanceObject* instanceObj,
    866                                    const wasm::FuncExport& funcExport) {
    867  const wasm::FuncType& funcType =
    868      instanceObj->instance().codeMeta().getFuncType(funcExport.funcIndex());
    869  const wasm::ValTypeVector& results = funcType.results();
    870  MIRType resultType = MIRType::Value;
    871  // At the JS boundary some wasm types must be represented as a Value, and in
    872  // addition a void return requires an Undefined value.
    873  if (results.length() > 0 && !results[0].isEncodedAsJSValueOnEscape()) {
    874    MOZ_ASSERT(results.length() == 1,
    875               "multiple returns not implemented for inlined Wasm calls");
    876    resultType = results[0].toMIRType();
    877  }
    878 
    879  auto* ins = new (alloc) MIonToWasmCall(instanceObj, resultType, funcExport);
    880  if (!ins->init(alloc, funcType.args().length())) {
    881    return nullptr;
    882  }
    883  return ins;
    884 }
    885 
    886 #ifdef DEBUG
    887 bool MIonToWasmCall::isConsistentFloat32Use(MUse* use) const {
    888  const wasm::FuncType& funcType =
    889      instance()->codeMeta().getFuncType(funcExport_.funcIndex());
    890  return funcType.args()[use->index()].kind() == wasm::ValType::F32;
    891 }
    892 #endif
    893 
    894 bool MWasmShiftSimd128::congruentTo(const MDefinition* ins) const {
    895  if (!ins->isWasmShiftSimd128()) {
    896    return false;
    897  }
    898  return ins->toWasmShiftSimd128()->simdOp() == simdOp_ &&
    899         congruentIfOperandsEqual(ins);
    900 }
    901 
    902 bool MWasmShuffleSimd128::congruentTo(const MDefinition* ins) const {
    903  if (!ins->isWasmShuffleSimd128()) {
    904    return false;
    905  }
    906  return ins->toWasmShuffleSimd128()->shuffle().equals(&shuffle_) &&
    907         congruentIfOperandsEqual(ins);
    908 }
    909 
    910 bool MWasmUnarySimd128::congruentTo(const MDefinition* ins) const {
    911  if (!ins->isWasmUnarySimd128()) {
    912    return false;
    913  }
    914  return ins->toWasmUnarySimd128()->simdOp() == simdOp_ &&
    915         congruentIfOperandsEqual(ins);
    916 }
    917 
    918 #ifdef ENABLE_WASM_SIMD
    919 MWasmShuffleSimd128* jit::BuildWasmShuffleSimd128(TempAllocator& alloc,
    920                                                  const int8_t* control,
    921                                                  MDefinition* lhs,
    922                                                  MDefinition* rhs) {
    923  SimdShuffle s =
    924      AnalyzeSimdShuffle(SimdConstant::CreateX16(control), lhs, rhs);
    925  switch (s.opd) {
    926    case SimdShuffle::Operand::LEFT:
    927      // When SimdShuffle::Operand is LEFT the right operand is not used,
    928      // lose reference to rhs.
    929      rhs = lhs;
    930      break;
    931    case SimdShuffle::Operand::RIGHT:
    932      // When SimdShuffle::Operand is RIGHT the left operand is not used,
    933      // lose reference to lhs.
    934      lhs = rhs;
    935      break;
    936    default:
    937      break;
    938  }
    939  return MWasmShuffleSimd128::New(alloc, lhs, rhs, s);
    940 }
    941 #endif  // ENABLE_WASM_SIMD
    942 
    943 static MDefinition* FoldTrivialWasmTests(TempAllocator& alloc,
    944                                         wasm::RefType sourceType,
    945                                         wasm::RefType destType) {
    946  // Upcasts are trivially valid.
    947  if (wasm::RefType::isSubTypeOf(sourceType, destType)) {
    948    return MConstant::NewInt32(alloc, 1);
    949  }
    950 
    951  // If two types are completely disjoint, then all casts between them are
    952  // impossible.
    953  if (!wasm::RefType::castPossible(destType, sourceType)) {
    954    return MConstant::NewInt32(alloc, 0);
    955  }
    956 
    957  return nullptr;
    958 }
    959 
    960 static MDefinition* FoldTrivialWasmCasts(MDefinition* ref,
    961                                         wasm::RefType sourceType,
    962                                         wasm::RefType destType) {
    963  // Upcasts are trivially valid.
    964  if (wasm::RefType::isSubTypeOf(sourceType, destType)) {
    965    return ref;
    966  }
    967 
    968  // We can't fold invalid casts to a trap instruction, because that will
    969  // confuse GVN which assumes the folded to instruction has the same type
    970  // as the original instruction.
    971 
    972  return nullptr;
    973 }
    974 
    975 MDefinition* MWasmRefTestAbstract::foldsTo(TempAllocator& alloc) {
    976  if (ref()->wasmRefType().isNothing()) {
    977    return this;
    978  }
    979 
    980  MDefinition* folded =
    981      FoldTrivialWasmTests(alloc, ref()->wasmRefType().value(), destType());
    982  if (folded) {
    983    return folded;
    984  }
    985  return this;
    986 }
    987 
    988 MDefinition* MWasmRefTestConcrete::foldsTo(TempAllocator& alloc) {
    989  if (ref()->wasmRefType().isNothing()) {
    990    return this;
    991  }
    992 
    993  MDefinition* folded =
    994      FoldTrivialWasmTests(alloc, ref()->wasmRefType().value(), destType());
    995  if (folded) {
    996    return folded;
    997  }
    998  return this;
    999 }
   1000 
   1001 MDefinition* MWasmRefCastAbstract::foldsTo(TempAllocator& alloc) {
   1002  if (ref()->wasmRefType().isNothing()) {
   1003    return this;
   1004  }
   1005 
   1006  MDefinition* folded =
   1007      FoldTrivialWasmCasts(ref(), ref()->wasmRefType().value(), destType());
   1008  if (folded) {
   1009    return folded;
   1010  }
   1011  return this;
   1012 }
   1013 
   1014 MDefinition* MWasmRefCastConcrete::foldsTo(TempAllocator& alloc) {
   1015  if (ref()->wasmRefType().isNothing()) {
   1016    return this;
   1017  }
   1018 
   1019  MDefinition* folded =
   1020      FoldTrivialWasmCasts(ref(), ref()->wasmRefType().value(), destType());
   1021  if (folded) {
   1022    return folded;
   1023  }
   1024  return this;
   1025 }
   1026 
   1027 MDefinition* MWasmRefAsNonNull::foldsTo(TempAllocator& alloc) {
   1028  wasm::MaybeRefType inputType = ref()->wasmRefType();
   1029  if (inputType.isSome() && !inputType.value().isNullable()) {
   1030    return ref();
   1031  }
   1032  return this;
   1033 }
   1034 
   1035 bool MWasmStructState::init() {
   1036  // Reserve the size for the number of fields.
   1037  return fields_.resize(
   1038      wasmStruct_->toWasmNewStructObject()->structType().fields_.length());
   1039 }
   1040 
   1041 MWasmStructState* MWasmStructState::New(TempAllocator& alloc,
   1042                                        MDefinition* structObject) {
   1043  MWasmStructState* state = new (alloc) MWasmStructState(alloc, structObject);
   1044  if (!state->init()) {
   1045    return nullptr;
   1046  }
   1047  return state;
   1048 }
   1049 
   1050 MWasmStructState* MWasmStructState::Copy(TempAllocator& alloc,
   1051                                         MWasmStructState* state) {
   1052  MDefinition* newWasmStruct = state->wasmStruct();
   1053  MWasmStructState* res = new (alloc) MWasmStructState(alloc, newWasmStruct);
   1054  if (!res || !res->init()) {
   1055    return nullptr;
   1056  }
   1057  for (size_t i = 0; i < state->numFields(); i++) {
   1058    res->setField(i, state->getField(i));
   1059  }
   1060  return res;
   1061 }