tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

WasmStubs.cpp (132589B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 *
      4 * Copyright 2015 Mozilla Foundation
      5 *
      6 * Licensed under the Apache License, Version 2.0 (the "License");
      7 * you may not use this file except in compliance with the License.
      8 * You may obtain a copy of the License at
      9 *
     10 *     http://www.apache.org/licenses/LICENSE-2.0
     11 *
     12 * Unless required by applicable law or agreed to in writing, software
     13 * distributed under the License is distributed on an "AS IS" BASIS,
     14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     15 * See the License for the specific language governing permissions and
     16 * limitations under the License.
     17 */
     18 
     19 #include "wasm/WasmStubs.h"
     20 
     21 #include <algorithm>
     22 #include <type_traits>
     23 
     24 #include "jit/ABIArgGenerator.h"
     25 #include "jit/JitFrames.h"
     26 #include "jit/JitRuntime.h"
     27 #include "jit/RegisterAllocator.h"
     28 #include "js/Printf.h"
     29 #include "util/Memory.h"
     30 #include "wasm/WasmCode.h"
     31 #include "wasm/WasmGenerator.h"
     32 #include "wasm/WasmInstance.h"
     33 #include "wasm/WasmPI.h"
     34 
     35 #include "jit/MacroAssembler-inl.h"
     36 #include "wasm/WasmInstance-inl.h"
     37 
     38 using namespace js;
     39 using namespace js::jit;
     40 using namespace js::wasm;
     41 
     42 using mozilla::DebugOnly;
     43 using mozilla::Maybe;
     44 using mozilla::Nothing;
     45 using mozilla::Some;
     46 
     47 using MIRTypeVector = Vector<jit::MIRType, 8, SystemAllocPolicy>;
     48 using ABIArgMIRTypeIter = jit::ABIArgIter<MIRTypeVector>;
     49 
     50 /*****************************************************************************/
     51 // ABIResultIter implementation
     52 
     53 static uint32_t ResultStackSize(ValType type) {
     54  switch (type.kind()) {
     55    case ValType::I32:
     56      return ABIResult::StackSizeOfInt32;
     57    case ValType::I64:
     58      return ABIResult::StackSizeOfInt64;
     59    case ValType::F32:
     60      return ABIResult::StackSizeOfFloat;
     61    case ValType::F64:
     62      return ABIResult::StackSizeOfDouble;
     63 #ifdef ENABLE_WASM_SIMD
     64    case ValType::V128:
     65      return ABIResult::StackSizeOfV128;
     66 #endif
     67    case ValType::Ref:
     68      return ABIResult::StackSizeOfPtr;
     69    default:
     70      MOZ_CRASH("Unexpected result type");
     71  }
     72 }
     73 
     74 // Compute the size of the stack slot that the wasm ABI requires be allocated
     75 // for a particular MIRType.  Note that this sometimes differs from the
     76 // MIRType's natural size.  See also ResultStackSize above and ABIResult::size()
     77 // and ABIResultIter below.
     78 
     79 uint32_t js::wasm::MIRTypeToABIResultSize(jit::MIRType type) {
     80  switch (type) {
     81    case MIRType::Int32:
     82      return ABIResult::StackSizeOfInt32;
     83    case MIRType::Int64:
     84      return ABIResult::StackSizeOfInt64;
     85    case MIRType::Float32:
     86      return ABIResult::StackSizeOfFloat;
     87    case MIRType::Double:
     88      return ABIResult::StackSizeOfDouble;
     89 #ifdef ENABLE_WASM_SIMD
     90    case MIRType::Simd128:
     91      return ABIResult::StackSizeOfV128;
     92 #endif
     93    case MIRType::Pointer:
     94    case MIRType::WasmAnyRef:
     95      return ABIResult::StackSizeOfPtr;
     96    default:
     97      MOZ_CRASH("MIRTypeToABIResultSize - unhandled case");
     98  }
     99 }
    100 
    101 uint32_t ABIResult::size() const { return ResultStackSize(type()); }
    102 
    103 void ABIResultIter::settleRegister(ValType type) {
    104  MOZ_ASSERT(!done());
    105  MOZ_ASSERT_IF(direction_ == Next, index() < MaxRegisterResults);
    106  MOZ_ASSERT_IF(direction_ == Prev, index() >= count_ - MaxRegisterResults);
    107  static_assert(MaxRegisterResults == 1, "expected a single register result");
    108 
    109  switch (type.kind()) {
    110    case ValType::I32:
    111      cur_ = ABIResult(type, ReturnReg);
    112      break;
    113    case ValType::I64:
    114      cur_ = ABIResult(type, ReturnReg64);
    115      break;
    116    case ValType::F32:
    117      cur_ = ABIResult(type, ReturnFloat32Reg);
    118      break;
    119    case ValType::F64:
    120      cur_ = ABIResult(type, ReturnDoubleReg);
    121      break;
    122    case ValType::Ref:
    123      cur_ = ABIResult(type, ReturnReg);
    124      break;
    125 #ifdef ENABLE_WASM_SIMD
    126    case ValType::V128:
    127      cur_ = ABIResult(type, ReturnSimd128Reg);
    128      break;
    129 #endif
    130    default:
    131      MOZ_CRASH("Unexpected result type");
    132  }
    133 }
    134 
    135 void ABIResultIter::settleNext() {
    136  MOZ_ASSERT(direction_ == Next);
    137  MOZ_ASSERT(!done());
    138 
    139  uint32_t typeIndex = count_ - index_ - 1;
    140  ValType type = type_[typeIndex];
    141 
    142  if (index_ < MaxRegisterResults) {
    143    settleRegister(type);
    144    return;
    145  }
    146 
    147  cur_ = ABIResult(type, nextStackOffset_);
    148  nextStackOffset_ += ResultStackSize(type);
    149 }
    150 
    151 void ABIResultIter::settlePrev() {
    152  MOZ_ASSERT(direction_ == Prev);
    153  MOZ_ASSERT(!done());
    154  uint32_t typeIndex = index_;
    155  ValType type = type_[typeIndex];
    156 
    157  if (count_ - index_ - 1 < MaxRegisterResults) {
    158    settleRegister(type);
    159    return;
    160  }
    161 
    162  uint32_t size = ResultStackSize(type);
    163  MOZ_ASSERT(nextStackOffset_ >= size);
    164  nextStackOffset_ -= size;
    165  cur_ = ABIResult(type, nextStackOffset_);
    166 }
    167 
    168 #ifdef WASM_CODEGEN_DEBUG
    169 template <class Closure>
    170 static void GenPrint(DebugChannel channel, MacroAssembler& masm,
    171                     const Maybe<Register>& taken, SymbolicAddress builtin,
    172                     Closure passArgAndCall) {
    173  if (!IsCodegenDebugEnabled(channel)) {
    174    return;
    175  }
    176 
    177  AllocatableRegisterSet regs(RegisterSet::All());
    178  LiveRegisterSet save(regs.asLiveSet());
    179  masm.PushRegsInMask(save);
    180 
    181  if (taken) {
    182    regs.take(taken.value());
    183  }
    184  Register temp = regs.takeAnyGeneral();
    185 
    186  {
    187    MOZ_ASSERT(MaybeGetJitContext(),
    188               "codegen debug checks require a jit context");
    189 #  ifdef JS_CODEGEN_ARM64
    190    if (IsCompilingWasm()) {
    191      masm.setupWasmABICall(builtin);
    192    } else {
    193      // JS ARM64 has an extra stack pointer which is not managed in WASM.
    194      masm.setupUnalignedABICall(temp);
    195    }
    196 #  else
    197    masm.setupUnalignedABICall(temp);
    198 #  endif
    199    passArgAndCall(IsCompilingWasm(), temp);
    200  }
    201 
    202  masm.PopRegsInMask(save);
    203 }
    204 
    205 static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
    206                      const char* fmt, ...) {
    207  va_list ap;
    208  va_start(ap, fmt);
    209  UniqueChars str = JS_vsmprintf(fmt, ap);
    210  va_end(ap);
    211 
    212  GenPrint(channel, masm, Nothing(), SymbolicAddress::PrintText,
    213           [&](bool inWasm, Register temp) {
    214             // If we've gone this far, it means we're actually using the
    215             // debugging strings. In this case, we leak them! This is only for
    216             // debugging, and doing the right thing is cumbersome (in Ion, it'd
    217             // mean add a vec of strings to the IonScript; in wasm, it'd mean
    218             // add it to the current Module and serialize it properly).
    219             const char* text = str.release();
    220 
    221             masm.movePtr(ImmPtr((void*)text, ImmPtr::NoCheckToken()), temp);
    222             masm.passABIArg(temp);
    223             if (inWasm) {
    224               masm.callDebugWithABI(SymbolicAddress::PrintText);
    225             } else {
    226               using Fn = void (*)(const char* output);
    227               masm.callWithABI<Fn, PrintText>(
    228                   ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
    229             }
    230           });
    231 }
    232 
    233 static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
    234                          const Register& src) {
    235  GenPrint(channel, masm, Some(src), SymbolicAddress::PrintI32,
    236           [&](bool inWasm, Register _temp) {
    237             masm.passABIArg(src);
    238             if (inWasm) {
    239               masm.callDebugWithABI(SymbolicAddress::PrintI32);
    240             } else {
    241               using Fn = void (*)(int32_t val);
    242               masm.callWithABI<Fn, PrintI32>(
    243                   ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
    244             }
    245           });
    246 }
    247 
    248 static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
    249                        const Register& src) {
    250  GenPrint(channel, masm, Some(src), SymbolicAddress::PrintPtr,
    251           [&](bool inWasm, Register _temp) {
    252             masm.passABIArg(src);
    253             if (inWasm) {
    254               masm.callDebugWithABI(SymbolicAddress::PrintPtr);
    255             } else {
    256               using Fn = void (*)(uint8_t* val);
    257               masm.callWithABI<Fn, PrintPtr>(
    258                   ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
    259             }
    260           });
    261 }
    262 
    263 static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
    264                        const Register64& src) {
    265 #  if JS_BITS_PER_WORD == 64
    266  GenPrintf(channel, masm, "i64 ");
    267  GenPrintIsize(channel, masm, src.reg);
    268 #  else
    269  GenPrintf(channel, masm, "i64(");
    270  GenPrintIsize(channel, masm, src.low);
    271  GenPrintIsize(channel, masm, src.high);
    272  GenPrintf(channel, masm, ") ");
    273 #  endif
    274 }
    275 
    276 static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
    277                        const FloatRegister& src) {
    278  GenPrint(channel, masm, Nothing(), SymbolicAddress::PrintF32,
    279           [&](bool inWasm, Register temp) {
    280             masm.passABIArg(src, ABIType::Float32);
    281             if (inWasm) {
    282               masm.callDebugWithABI(SymbolicAddress::PrintF32);
    283             } else {
    284               using Fn = void (*)(float val);
    285               masm.callWithABI<Fn, PrintF32>(
    286                   ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
    287             }
    288           });
    289 }
    290 
    291 static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
    292                        const FloatRegister& src) {
    293  GenPrint(channel, masm, Nothing(), SymbolicAddress::PrintF64,
    294           [&](bool inWasm, Register temp) {
    295             masm.passABIArg(src, ABIType::Float64);
    296             if (inWasm) {
    297               masm.callDebugWithABI(SymbolicAddress::PrintF64);
    298             } else {
    299               using Fn = void (*)(double val);
    300               masm.callWithABI<Fn, PrintF64>(
    301                   ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
    302             }
    303           });
    304 }
    305 
    306 #  ifdef ENABLE_WASM_SIMD
    307 static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
    308                         const FloatRegister& src) {
    309  // TODO: We might try to do something meaningful here once SIMD data are
    310  // aligned and hence C++-ABI compliant.  For now, just make ourselves visible.
    311  GenPrintf(channel, masm, "v128");
    312 }
    313 #  endif
    314 #else
    315 static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
    316                      const char* fmt, ...) {}
    317 static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
    318                          const Register& src) {}
    319 static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
    320                        const Register& src) {}
    321 static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
    322                        const Register64& src) {}
    323 static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
    324                        const FloatRegister& src) {}
    325 static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
    326                        const FloatRegister& src) {}
    327 #  ifdef ENABLE_WASM_SIMD
    328 static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
    329                         const FloatRegister& src) {}
    330 #  endif
    331 #endif
    332 
    333 static bool FinishOffsets(MacroAssembler& masm, Offsets* offsets) {
    334  // On old ARM hardware, constant pools could be inserted and they need to
    335  // be flushed before considering the size of the masm.
    336  masm.flushBuffer();
    337  offsets->end = masm.size();
    338  return !masm.oom();
    339 }
    340 
    341 template <class VectorT>
    342 static unsigned StackArgBytesHelper(const VectorT& args, ABIKind kind) {
    343  ABIArgIter<VectorT> iter(args, kind);
    344  while (!iter.done()) {
    345    iter++;
    346  }
    347  return iter.stackBytesConsumedSoFar();
    348 }
    349 
    350 template <class VectorT>
    351 static unsigned StackArgBytesForNativeABI(const VectorT& args) {
    352  return StackArgBytesHelper<VectorT>(args, ABIKind::System);
    353 }
    354 
    355 template <class VectorT>
    356 static unsigned StackArgBytesForWasmABI(const VectorT& args) {
    357  return StackArgBytesHelper<VectorT>(args, ABIKind::Wasm);
    358 }
    359 
    360 static unsigned StackArgBytesForWasmABI(const FuncType& funcType) {
    361  ArgTypeVector args(funcType);
    362  return StackArgBytesForWasmABI(args);
    363 }
    364 
    365 static void SetupABIArguments(MacroAssembler& masm, const FuncExport& fe,
    366                              const FuncType& funcType, Register argv,
    367                              Register scratch) {
    368  // Copy parameters out of argv and into the registers/stack-slots specified by
    369  // the wasm ABI.
    370  //
    371  // SetupABIArguments are only used for C++ -> wasm calls through callExport(),
    372  // and V128 and Ref types (other than externref) are not currently allowed.
    373  ArgTypeVector args(funcType);
    374  for (ABIArgIter iter(args, ABIKind::Wasm); !iter.done(); iter++) {
    375    unsigned argOffset = iter.index() * sizeof(ExportArg);
    376    Address src(argv, argOffset);
    377    MIRType type = iter.mirType();
    378    switch (iter->kind()) {
    379      case ABIArg::GPR:
    380        if (type == MIRType::Int32) {
    381          masm.load32(src, iter->gpr());
    382        } else if (type == MIRType::Int64) {
    383          masm.load64(src, iter->gpr64());
    384        } else if (type == MIRType::WasmAnyRef) {
    385          masm.loadPtr(src, iter->gpr());
    386        } else if (type == MIRType::StackResults) {
    387          MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
    388          masm.loadPtr(src, iter->gpr());
    389        } else {
    390          MOZ_CRASH("unknown GPR type");
    391        }
    392        break;
    393 #ifdef JS_CODEGEN_REGISTER_PAIR
    394      case ABIArg::GPR_PAIR:
    395        if (type == MIRType::Int64) {
    396          masm.load64(src, iter->gpr64());
    397        } else {
    398          MOZ_CRASH("wasm uses hardfp for function calls.");
    399        }
    400        break;
    401 #endif
    402      case ABIArg::FPU: {
    403        static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
    404                      "ExportArg must be big enough to store SIMD values");
    405        switch (type) {
    406          case MIRType::Double:
    407            masm.loadDouble(src, iter->fpu());
    408            break;
    409          case MIRType::Float32:
    410            masm.loadFloat32(src, iter->fpu());
    411            break;
    412          case MIRType::Simd128:
    413 #ifdef ENABLE_WASM_SIMD
    414            // This is only used by the testing invoke path,
    415            // wasmLosslessInvoke, and is guarded against in normal JS-API
    416            // call paths.
    417            masm.loadUnalignedSimd128(src, iter->fpu());
    418            break;
    419 #else
    420            MOZ_CRASH("V128 not supported in SetupABIArguments");
    421 #endif
    422          default:
    423            MOZ_CRASH("unexpected FPU type");
    424            break;
    425        }
    426        break;
    427      }
    428      case ABIArg::Stack:
    429        switch (type) {
    430          case MIRType::Int32:
    431            masm.load32(src, scratch);
    432            masm.storePtr(scratch, Address(masm.getStackPointer(),
    433                                           iter->offsetFromArgBase()));
    434            break;
    435          case MIRType::Int64: {
    436            RegisterOrSP sp = masm.getStackPointer();
    437            masm.copy64(src, Address(sp, iter->offsetFromArgBase()), scratch);
    438            break;
    439          }
    440          case MIRType::WasmAnyRef:
    441            masm.loadPtr(src, scratch);
    442            masm.storePtr(scratch, Address(masm.getStackPointer(),
    443                                           iter->offsetFromArgBase()));
    444            break;
    445          case MIRType::Double: {
    446            ScratchDoubleScope fpscratch(masm);
    447            masm.loadDouble(src, fpscratch);
    448            masm.storeDouble(fpscratch, Address(masm.getStackPointer(),
    449                                                iter->offsetFromArgBase()));
    450            break;
    451          }
    452          case MIRType::Float32: {
    453            ScratchFloat32Scope fpscratch(masm);
    454            masm.loadFloat32(src, fpscratch);
    455            masm.storeFloat32(fpscratch, Address(masm.getStackPointer(),
    456                                                 iter->offsetFromArgBase()));
    457            break;
    458          }
    459          case MIRType::Simd128: {
    460 #ifdef ENABLE_WASM_SIMD
    461            // This is only used by the testing invoke path,
    462            // wasmLosslessInvoke, and is guarded against in normal JS-API
    463            // call paths.
    464            ScratchSimd128Scope fpscratch(masm);
    465            masm.loadUnalignedSimd128(src, fpscratch);
    466            masm.storeUnalignedSimd128(
    467                fpscratch,
    468                Address(masm.getStackPointer(), iter->offsetFromArgBase()));
    469            break;
    470 #else
    471            MOZ_CRASH("V128 not supported in SetupABIArguments");
    472 #endif
    473          }
    474          case MIRType::StackResults: {
    475            MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
    476            masm.loadPtr(src, scratch);
    477            masm.storePtr(scratch, Address(masm.getStackPointer(),
    478                                           iter->offsetFromArgBase()));
    479            break;
    480          }
    481          default:
    482            MOZ_CRASH("unexpected stack arg type");
    483        }
    484        break;
    485      case ABIArg::Uninitialized:
    486        MOZ_CRASH("Uninitialized ABIArg kind");
    487    }
    488  }
    489 }
    490 
    491 static void StoreRegisterResult(MacroAssembler& masm, const FuncExport& fe,
    492                                const FuncType& funcType, Register loc) {
    493  ResultType results = ResultType::Vector(funcType.results());
    494  DebugOnly<bool> sawRegisterResult = false;
    495  for (ABIResultIter iter(results); !iter.done(); iter.next()) {
    496    const ABIResult& result = iter.cur();
    497    if (result.inRegister()) {
    498      MOZ_ASSERT(!sawRegisterResult);
    499      sawRegisterResult = true;
    500      switch (result.type().kind()) {
    501        case ValType::I32:
    502          masm.store32(result.gpr(), Address(loc, 0));
    503          break;
    504        case ValType::I64:
    505          masm.store64(result.gpr64(), Address(loc, 0));
    506          break;
    507        case ValType::V128:
    508 #ifdef ENABLE_WASM_SIMD
    509          masm.storeUnalignedSimd128(result.fpr(), Address(loc, 0));
    510          break;
    511 #else
    512          MOZ_CRASH("V128 not supported in StoreABIReturn");
    513 #endif
    514        case ValType::F32:
    515          masm.storeFloat32(result.fpr(), Address(loc, 0));
    516          break;
    517        case ValType::F64:
    518          masm.storeDouble(result.fpr(), Address(loc, 0));
    519          break;
    520        case ValType::Ref:
    521          masm.storePtr(result.gpr(), Address(loc, 0));
    522          break;
    523      }
    524    }
    525  }
    526  MOZ_ASSERT(sawRegisterResult == (results.length() > 0));
    527 }
    528 
    529 #if defined(JS_CODEGEN_ARM)
    530 // The ARM system ABI also includes d15 & s31 in the non volatile float
    531 // registers. Also exclude lr (a.k.a. r14) as we preserve it manually.
    532 static const LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
    533    GeneralRegisterSet(Registers::NonVolatileMask &
    534                       ~(Registers::SetType(1) << Registers::lr)),
    535    FloatRegisterSet(FloatRegisters::NonVolatileMask |
    536                     (FloatRegisters::SetType(1) << FloatRegisters::d15) |
    537                     (FloatRegisters::SetType(1) << FloatRegisters::s31)));
    538 #elif defined(JS_CODEGEN_ARM64)
    539 // Exclude the Link Register (x30) because it is preserved manually.
    540 //
    541 // Include x16 (scratch) to make a 16-byte aligned amount of integer registers.
    542 // Include d31 (scratch) to make a 16-byte aligned amount of floating registers.
    543 static const LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
    544    GeneralRegisterSet((Registers::NonVolatileMask &
    545                        ~(Registers::SetType(1) << Registers::lr)) |
    546                       (Registers::SetType(1) << Registers::x16)),
    547    FloatRegisterSet(FloatRegisters::NonVolatileMask |
    548                     FloatRegisters::NonAllocatableMask));
    549 #else
    550 static const LiveRegisterSet NonVolatileRegs =
    551    LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
    552                    FloatRegisterSet(FloatRegisters::NonVolatileMask));
    553 #endif
    554 
    555 #ifdef JS_CODEGEN_ARM64
    556 static const unsigned WasmPushSize = 16;
    557 #else
    558 static const unsigned WasmPushSize = sizeof(void*);
    559 #endif
    560 
    561 static void AssertExpectedSP(MacroAssembler& masm) {
    562 #ifdef JS_CODEGEN_ARM64
    563  MOZ_ASSERT(sp.Is(masm.GetStackPointer64()));
    564 #  ifdef DEBUG
    565  // Since we're asserting that SP is the currently active stack pointer,
    566  // let's also in effect assert that PSP is dead -- by setting it to 1, so as
    567  // to cause to cause any attempts to use it to segfault in an easily
    568  // identifiable way.
    569  masm.asVIXL().Mov(PseudoStackPointer64, 1);
    570 #  endif
    571 #endif
    572 }
    573 
    574 template <class Operand>
    575 static void WasmPush(MacroAssembler& masm, const Operand& op) {
    576 #ifdef JS_CODEGEN_ARM64
    577  // Allocate a pad word so that SP can remain properly aligned.  |op| will be
    578  // written at the lower-addressed of the two words pushed here.
    579  masm.reserveStack(WasmPushSize);
    580  masm.storePtr(op, Address(masm.getStackPointer(), 0));
    581 #else
    582  masm.Push(op);
    583 #endif
    584 }
    585 
    586 static void WasmPop(MacroAssembler& masm, Register r) {
    587 #ifdef JS_CODEGEN_ARM64
    588  // Also pop the pad word allocated by WasmPush.
    589  masm.loadPtr(Address(masm.getStackPointer(), 0), r);
    590  masm.freeStack(WasmPushSize);
    591 #else
    592  masm.Pop(r);
    593 #endif
    594 }
    595 
    596 static void MoveSPForJitABI(MacroAssembler& masm) {
    597 #ifdef JS_CODEGEN_ARM64
    598  masm.moveStackPtrTo(PseudoStackPointer);
    599 #endif
    600 }
    601 
    602 static void CallFuncExport(MacroAssembler& masm, const FuncExport& fe,
    603                           const Maybe<ImmPtr>& funcPtr) {
    604  MOZ_ASSERT(fe.hasEagerStubs() == !funcPtr);
    605  MoveSPForJitABI(masm);
    606  if (funcPtr) {
    607    masm.call(*funcPtr);
    608  } else {
    609    masm.call(CallSiteDesc(CallSiteKind::Func), fe.funcIndex());
    610  }
    611 }
    612 
    613 // Generate a stub that enters wasm from a C++ caller via the native ABI. The
    614 // signature of the entry point is Module::ExportFuncPtr. The exported wasm
    615 // function has an ABI derived from its specific signature, so this function
    616 // must map from the ABI of ExportFuncPtr to the export's signature's ABI.
    617 static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe,
    618                                const FuncType& funcType,
    619                                const Maybe<ImmPtr>& funcPtr,
    620                                Offsets* offsets) {
    621  AutoCreatedBy acb(masm, "GenerateInterpEntry");
    622 
    623  AssertExpectedSP(masm);
    624 
    625  // UBSAN expects that the word before a C++ function pointer is readable for
    626  // some sort of generated assertion.
    627  //
    628  // These interp entry points can sometimes be output at the beginning of a
    629  // code page allocation, which will cause access violations when called with
    630  // UBSAN enabled.
    631  //
    632  // Insert some padding in this case by inserting a breakpoint before we align
    633  // our code. This breakpoint will misalign the code buffer (which was aligned
    634  // due to being at the beginning of the buffer), which will then be aligned
    635  // and have at least one word of padding before this entry point.
    636  if (masm.currentOffset() == 0) {
    637    masm.breakpoint();
    638  }
    639 
    640  masm.haltingAlign(CodeAlignment);
    641 
    642  // Double check that the first word is available for UBSAN; see above.
    643  static_assert(CodeAlignment >= sizeof(uintptr_t));
    644  MOZ_ASSERT_IF(!masm.oom(), masm.currentOffset() >= sizeof(uintptr_t));
    645 
    646  offsets->begin = masm.currentOffset();
    647 
    648  // Save the return address if it wasn't already saved by the call insn.
    649 #ifdef JS_USE_LINK_REGISTER
    650 #  if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS64) || \
    651      defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
    652  masm.pushReturnAddress();
    653 #  elif defined(JS_CODEGEN_ARM64)
    654  // WasmPush updates framePushed() unlike pushReturnAddress(), but that's
    655  // cancelled by the setFramePushed() below.
    656  WasmPush(masm, lr);
    657 #  else
    658  MOZ_CRASH("Implement this");
    659 #  endif
    660 #endif
    661 
    662  // Save all caller non-volatile registers before we clobber them here and in
    663  // the wasm callee (which does not preserve non-volatile registers).
    664  masm.setFramePushed(0);
    665  masm.PushRegsInMask(NonVolatileRegs);
    666 
    667  const unsigned nonVolatileRegsPushSize =
    668      MacroAssembler::PushRegsInMaskSizeInBytes(NonVolatileRegs);
    669 
    670  MOZ_ASSERT(masm.framePushed() == nonVolatileRegsPushSize);
    671 
    672  // Put the 'argv' argument into a non-argument/return/instance register so
    673  // that we can use 'argv' while we fill in the arguments for the wasm callee.
    674  // Use a second non-argument/return register as temporary scratch.
    675  Register argv = ABINonArgReturnReg0;
    676  Register scratch = ABINonArgReturnReg1;
    677 
    678  // scratch := SP
    679  masm.moveStackPtrTo(scratch);
    680 
    681  // Dynamically align the stack since ABIStackAlignment is not necessarily
    682  // WasmStackAlignment. Preserve SP so it can be restored after the call.
    683 #ifdef JS_CODEGEN_ARM64
    684  static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
    685 #else
    686  masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
    687 #endif
    688  masm.assertStackAlignment(WasmStackAlignment);
    689 
    690  // Create a fake frame: just previous RA and an FP.
    691  const size_t FakeFrameSize = 2 * sizeof(void*);
    692 #ifdef JS_CODEGEN_ARM64
    693  masm.Ldr(ARMRegister(ABINonArgReturnReg0, 64),
    694           MemOperand(ARMRegister(scratch, 64), nonVolatileRegsPushSize));
    695 #else
    696  masm.Push(Address(scratch, nonVolatileRegsPushSize));
    697 #endif
    698  // Store fake wasm register state. Ensure the frame pointer passed by the C++
    699  // caller doesn't have the ExitFPTag bit set to not confuse frame iterators.
    700  // This bit shouldn't be set if C++ code is using frame pointers, so this has
    701  // no effect on native stack unwinders.
    702  masm.andPtr(Imm32(int32_t(~ExitFPTag)), FramePointer);
    703 #ifdef JS_CODEGEN_ARM64
    704  masm.asVIXL().Push(ARMRegister(ABINonArgReturnReg0, 64),
    705                     ARMRegister(FramePointer, 64));
    706  masm.moveStackPtrTo(FramePointer);
    707 #else
    708  masm.Push(FramePointer);
    709 #endif
    710 
    711  masm.moveStackPtrTo(FramePointer);
    712  masm.setFramePushed(0);
    713 #ifdef JS_CODEGEN_ARM64
    714  DebugOnly<size_t> fakeFramePushed = 0;
    715 #else
    716  DebugOnly<size_t> fakeFramePushed = sizeof(void*);
    717  masm.Push(scratch);
    718 #endif
    719 
    720  // Read the arguments of wasm::ExportFuncPtr according to the native ABI.
    721  // The entry stub's frame is 1 word.
    722  const unsigned argBase = sizeof(void*) + nonVolatileRegsPushSize;
    723  ABIArgGenerator abi(ABIKind::System);
    724  ABIArg arg;
    725 
    726  // arg 1: ExportArg*
    727  arg = abi.next(MIRType::Pointer);
    728  if (arg.kind() == ABIArg::GPR) {
    729    masm.movePtr(arg.gpr(), argv);
    730  } else {
    731    masm.loadPtr(Address(scratch, argBase + arg.offsetFromArgBase()), argv);
    732  }
    733 
    734  // Arg 2: Instance*
    735  arg = abi.next(MIRType::Pointer);
    736  if (arg.kind() == ABIArg::GPR) {
    737    masm.movePtr(arg.gpr(), InstanceReg);
    738  } else {
    739    masm.loadPtr(Address(scratch, argBase + arg.offsetFromArgBase()),
    740                 InstanceReg);
    741  }
    742 
    743  WasmPush(masm, InstanceReg);
    744 
    745  // Save 'argv' on the stack so that we can recover it after the call.
    746  WasmPush(masm, argv);
    747 
    748  MOZ_ASSERT(masm.framePushed() == 2 * WasmPushSize + fakeFramePushed,
    749             "expected instance, argv, and fake frame");
    750  uint32_t frameSizeBeforeCall = masm.framePushed();
    751 
    752  // Align (missing) results area to WasmStackAlignment boudary. Return calls
    753  // expect arguments to not overlap with results or other slots.
    754  unsigned aligned =
    755      AlignBytes(masm.framePushed() + FakeFrameSize, WasmStackAlignment);
    756  masm.reserveStack(aligned - masm.framePushed() + FakeFrameSize);
    757 
    758  // Reserve stack space for the wasm call.
    759  unsigned argDecrement = StackDecrementForCall(
    760      WasmStackAlignment, aligned, StackArgBytesForWasmABI(funcType));
    761  masm.reserveStack(argDecrement);
    762 
    763  // Copy parameters out of argv and into the wasm ABI registers/stack-slots.
    764  SetupABIArguments(masm, fe, funcType, argv, scratch);
    765 
    766  masm.loadWasmPinnedRegsFromInstance(mozilla::Nothing());
    767 
    768  masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
    769                                     WasmCalleeInstanceOffsetBeforeCall));
    770 
    771  // Call into the real function. Note that, due to the throw stub, fp, instance
    772  // and pinned registers may be clobbered.
    773  masm.assertStackAlignment(WasmStackAlignment);
    774  CallFuncExport(masm, fe, funcPtr);
    775  masm.assertStackAlignment(WasmStackAlignment);
    776 
    777  // Set the return value based on whether InstanceReg is the
    778  // InterpFailInstanceReg magic value (set by the exception handler).
    779  Label success, join;
    780  masm.branchPtr(Assembler::NotEqual, InstanceReg, Imm32(InterpFailInstanceReg),
    781                 &success);
    782  masm.move32(Imm32(false), scratch);
    783  masm.jump(&join);
    784  masm.bind(&success);
    785  masm.move32(Imm32(true), scratch);
    786  masm.bind(&join);
    787 
    788  // Pop the arguments pushed after the dynamic alignment.
    789  masm.setFramePushed(frameSizeBeforeCall);
    790  masm.freeStackTo(frameSizeBeforeCall);
    791 
    792  // Recover the 'argv' pointer which was saved before aligning the stack.
    793  WasmPop(masm, argv);
    794 
    795  WasmPop(masm, InstanceReg);
    796 
    797  // Pop the stack pointer to its value right before dynamic alignment.
    798 #ifdef JS_CODEGEN_ARM64
    799  static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
    800  masm.setFramePushed(FakeFrameSize);
    801  masm.freeStack(FakeFrameSize);
    802 #else
    803  masm.PopStackPtr();
    804 #endif
    805 
    806  // Store the register result, if any, in argv[0].
    807  // No widening is required, as the value leaves ReturnReg.
    808  StoreRegisterResult(masm, fe, funcType, argv);
    809 
    810  masm.move32(scratch, ReturnReg);
    811 
    812  // Restore clobbered non-volatile registers of the caller.
    813  masm.setFramePushed(nonVolatileRegsPushSize);
    814  masm.PopRegsInMask(NonVolatileRegs);
    815  MOZ_ASSERT(masm.framePushed() == 0);
    816 
    817 #if defined(JS_CODEGEN_ARM64)
    818  masm.setFramePushed(WasmPushSize);
    819  WasmPop(masm, lr);
    820  masm.abiret();
    821 #else
    822  masm.ret();
    823 #endif
    824 
    825  return FinishOffsets(masm, offsets);
    826 }
    827 
    828 #ifdef JS_PUNBOX64
    829 static const ValueOperand ScratchValIonEntry = ValueOperand(ABINonArgReg0);
    830 #else
    831 static const ValueOperand ScratchValIonEntry =
    832    ValueOperand(ABINonArgReg0, ABINonArgReg1);
    833 #endif
    834 static const Register ScratchIonEntry = ABINonArgReg2;
    835 
    836 static void CallSymbolicAddress(MacroAssembler& masm, bool isAbsolute,
    837                                SymbolicAddress sym) {
    838  if (isAbsolute) {
    839    masm.call(ImmPtr(SymbolicAddressTarget(sym), ImmPtr::NoCheckToken()));
    840  } else {
    841    masm.call(sym);
    842  }
    843 }
    844 
    845 // Load instance's instance from the callee.
    846 static void GenerateJitEntryLoadInstance(MacroAssembler& masm) {
    847  // ScratchIonEntry := callee => JSFunction*
    848  unsigned offset = JitFrameLayout::offsetOfCalleeToken();
    849  masm.loadFunctionFromCalleeToken(Address(FramePointer, offset),
    850                                   ScratchIonEntry);
    851 
    852  // ScratchIonEntry := callee->getExtendedSlot(WASM_INSTANCE_SLOT)->toPrivate()
    853  //                 => Instance*
    854  offset = FunctionExtended::offsetOfExtendedSlot(
    855      FunctionExtended::WASM_INSTANCE_SLOT);
    856  masm.loadPrivate(Address(ScratchIonEntry, offset), InstanceReg);
    857 }
    858 
    859 // Creates a JS fake exit frame for wasm, so the frame iterators just use
    860 // JSJit frame iteration.
    861 //
    862 // Note: the caller must ensure InstanceReg is valid.
    863 static void GenerateJitEntryThrow(MacroAssembler& masm) {
    864  AssertExpectedSP(masm);
    865 
    866  MOZ_ASSERT(masm.framePushed() == 0);
    867  MoveSPForJitABI(masm);
    868 
    869  masm.loadPtr(Address(InstanceReg, Instance::offsetOfCx()), ScratchIonEntry);
    870  masm.enterFakeExitFrameForWasm(ScratchIonEntry, ScratchIonEntry,
    871                                 ExitFrameType::WasmGenericJitEntry);
    872 
    873  masm.loadPtr(Address(InstanceReg, Instance::offsetOfJSJitExceptionHandler()),
    874               ScratchIonEntry);
    875  masm.jump(ScratchIonEntry);
    876 }
    877 
    878 // Helper function for allocating a BigInt and initializing it from an I64 in
    879 // GenerateJitEntry.  The return result is written to scratch.
    880 //
    881 // Note that this will create a new frame and must not - in its current form -
    882 // be called from a context where there is already another stub frame on the
    883 // stack, as that confuses unwinding during profiling.  This was a problem for
    884 // its use from GenerateImportJitExit, see bug 1754258.  Therefore,
    885 // FuncType::canHaveJitExit prevents the present function from being called for
    886 // exits.
    887 static void GenerateBigIntInitialization(MacroAssembler& masm,
    888                                         unsigned bytesPushedByPrologue,
    889                                         Register64 input, Register scratch,
    890                                         const FuncExport& fe, Label* fail) {
    891 #if JS_BITS_PER_WORD == 32
    892  MOZ_ASSERT(input.low != scratch);
    893  MOZ_ASSERT(input.high != scratch);
    894 #else
    895  MOZ_ASSERT(input.reg != scratch);
    896 #endif
    897 
    898  MOZ_ASSERT(masm.framePushed() == 0);
    899 
    900  // We need to avoid clobbering other argument registers and the input.
    901  AllocatableRegisterSet regs(RegisterSet::Volatile());
    902  LiveRegisterSet save(regs.asLiveSet());
    903  masm.PushRegsInMask(save);
    904 
    905  unsigned frameSize = StackDecrementForCall(
    906      ABIStackAlignment, masm.framePushed() + bytesPushedByPrologue, 0);
    907  masm.reserveStack(frameSize);
    908  masm.assertStackAlignment(ABIStackAlignment);
    909 
    910  CallSymbolicAddress(masm, !fe.hasEagerStubs(),
    911                      SymbolicAddress::AllocateBigInt);
    912  masm.storeCallPointerResult(scratch);
    913 
    914  masm.assertStackAlignment(ABIStackAlignment);
    915  masm.freeStack(frameSize);
    916 
    917  LiveRegisterSet ignore;
    918  ignore.add(scratch);
    919  masm.PopRegsInMaskIgnore(save, ignore);
    920 
    921  masm.branchTestPtr(Assembler::Zero, scratch, scratch, fail);
    922  masm.initializeBigInt64(Scalar::BigInt64, scratch, input);
    923 }
    924 
    925 // Generate a stub that enters wasm from a jit code caller via the jit ABI.
    926 //
    927 // ARM64 note: This does not save the PseudoStackPointer so we must be sure to
    928 // recompute it on every return path, be it normal return or exception return.
    929 // The JIT code we return to assumes it is correct.
    930 
    931 static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
    932                             const FuncExport& fe, const FuncType& funcType,
    933                             const Maybe<ImmPtr>& funcPtr,
    934                             CallableOffsets* offsets) {
    935  AutoCreatedBy acb(masm, "GenerateJitEntry");
    936 
    937  AssertExpectedSP(masm);
    938 
    939  RegisterOrSP sp = masm.getStackPointer();
    940 
    941  GenerateJitEntryPrologue(masm, offsets);
    942 
    943  // The jit caller has set up the following stack layout (sp grows to the
    944  // left):
    945  // <-- retAddr | descriptor | callee | argc | this | arg1..N
    946  //
    947  // GenerateJitEntryPrologue has additionally pushed the caller's frame
    948  // pointer. The stack pointer is now JitStackAlignment-aligned.
    949  //
    950  // We initialize an ExitFooterFrame (with ExitFrameType::WasmGenericJitEntry)
    951  // immediately below the frame pointer to ensure FP is a valid JS JIT exit
    952  // frame.
    953 
    954  MOZ_ASSERT(masm.framePushed() == 0);
    955 
    956  if (funcType.hasUnexposableArgOrRet()) {
    957    GenerateJitEntryLoadInstance(masm);
    958    CallSymbolicAddress(masm, !fe.hasEagerStubs(),
    959                        SymbolicAddress::ReportV128JSCall);
    960    GenerateJitEntryThrow(masm);
    961    return FinishOffsets(masm, offsets);
    962  }
    963 
    964  // Avoid overlapping aligned stack arguments area with ExitFooterFrame.
    965  const unsigned AlignedExitFooterFrameSize =
    966      AlignBytes(ExitFooterFrame::Size(), WasmStackAlignment);
    967  unsigned normalBytesNeeded =
    968      AlignedExitFooterFrameSize + StackArgBytesForWasmABI(funcType);
    969 
    970  MIRTypeVector coerceArgTypes;
    971  MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Int32));
    972  MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
    973  MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
    974  unsigned oolBytesNeeded =
    975      AlignedExitFooterFrameSize + StackArgBytesForWasmABI(coerceArgTypes);
    976 
    977  unsigned bytesNeeded = std::max(normalBytesNeeded, oolBytesNeeded);
    978 
    979  // Note the jit caller ensures the stack is aligned *after* the call
    980  // instruction.
    981  unsigned frameSize = StackDecrementForCall(WasmStackAlignment,
    982                                             masm.framePushed(), bytesNeeded);
    983 
    984  // Reserve stack space for wasm ABI arguments, set up like this:
    985  // <-- ABI args | padding
    986  masm.reserveStack(frameSize);
    987 
    988  MOZ_ASSERT(masm.framePushed() == frameSize);
    989 
    990  // Initialize the ExitFooterFrame.
    991  static_assert(ExitFooterFrame::Size() == sizeof(uintptr_t));
    992  masm.storePtr(ImmWord(uint32_t(ExitFrameType::WasmGenericJitEntry)),
    993                Address(FramePointer, -int32_t(ExitFooterFrame::Size())));
    994 
    995  GenerateJitEntryLoadInstance(masm);
    996 
    997  FloatRegister scratchF = ABINonArgDoubleReg;
    998  Register scratchG = ScratchIonEntry;
    999  ValueOperand scratchV = ScratchValIonEntry;
   1000 
   1001  GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
   1002            fe.funcIndex());
   1003 
   1004  // We do two loops:
   1005  // - one loop up-front will make sure that all the Value tags fit the
   1006  // expected signature argument types. If at least one inline conversion
   1007  // fails, we just jump to the OOL path which will call into C++. Inline
   1008  // conversions are ordered in the way we expect them to happen the most.
   1009  // - the second loop will unbox the arguments into the right registers.
   1010  Label oolCall;
   1011  for (size_t i = 0; i < funcType.args().length(); i++) {
   1012    Address jitArgAddr(FramePointer, JitFrameLayout::offsetOfActualArg(i));
   1013    masm.loadValue(jitArgAddr, scratchV);
   1014 
   1015    Label next;
   1016    switch (funcType.args()[i].kind()) {
   1017      case ValType::I32: {
   1018        Label isDouble, isUndefinedOrNull, isBoolean;
   1019        {
   1020          ScratchTagScope tag(masm, scratchV);
   1021          masm.splitTagForTest(scratchV, tag);
   1022 
   1023          // For int32 inputs, just skip.
   1024          masm.branchTestInt32(Assembler::Equal, tag, &next);
   1025 
   1026          masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
   1027          masm.branchTestUndefined(Assembler::Equal, tag, &isUndefinedOrNull);
   1028          masm.branchTestNull(Assembler::Equal, tag, &isUndefinedOrNull);
   1029          masm.branchTestBoolean(Assembler::Equal, tag, &isBoolean);
   1030 
   1031          // Other types (symbol, object, strings) go to the C++ call.
   1032          masm.jump(&oolCall);
   1033        }
   1034 
   1035        Label storeBack;
   1036 
   1037        // For double inputs, unbox, truncate and store back.
   1038        masm.bind(&isDouble);
   1039        {
   1040          masm.unboxDouble(scratchV, scratchF);
   1041          masm.branchTruncateDoubleMaybeModUint32(scratchF, scratchG, &oolCall);
   1042          masm.jump(&storeBack);
   1043        }
   1044 
   1045        // For null or undefined, store 0.
   1046        masm.bind(&isUndefinedOrNull);
   1047        {
   1048          masm.storeValue(Int32Value(0), jitArgAddr);
   1049          masm.jump(&next);
   1050        }
   1051 
   1052        // For booleans, store the number value back.
   1053        masm.bind(&isBoolean);
   1054        masm.unboxBoolean(scratchV, scratchG);
   1055        // fallthrough:
   1056 
   1057        masm.bind(&storeBack);
   1058        masm.storeValue(JSVAL_TYPE_INT32, scratchG, jitArgAddr);
   1059        break;
   1060      }
   1061      case ValType::I64: {
   1062        // For BigInt inputs, just skip. Otherwise go to C++ for other
   1063        // types that require creating a new BigInt or erroring.
   1064        masm.branchTestBigInt(Assembler::NotEqual, scratchV, &oolCall);
   1065        break;
   1066      }
   1067      case ValType::F32:
   1068      case ValType::F64: {
   1069        // Note we can reuse the same code for f32/f64 here, since for the
   1070        // case of f32, the conversion of f64 to f32 will happen in the
   1071        // second loop.
   1072 
   1073        Label isInt32OrBoolean, isUndefined, isNull;
   1074        {
   1075          ScratchTagScope tag(masm, scratchV);
   1076          masm.splitTagForTest(scratchV, tag);
   1077 
   1078          // For double inputs, just skip.
   1079          masm.branchTestDouble(Assembler::Equal, tag, &next);
   1080 
   1081          masm.branchTestInt32(Assembler::Equal, tag, &isInt32OrBoolean);
   1082          masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
   1083          masm.branchTestNull(Assembler::Equal, tag, &isNull);
   1084          masm.branchTestBoolean(Assembler::Equal, tag, &isInt32OrBoolean);
   1085 
   1086          // Other types (symbol, object, strings) go to the C++ call.
   1087          masm.jump(&oolCall);
   1088        }
   1089 
   1090        // For int32 and boolean inputs, convert and rebox.
   1091        masm.bind(&isInt32OrBoolean);
   1092        {
   1093          masm.convertInt32ToDouble(scratchV.payloadOrValueReg(), scratchF);
   1094          masm.boxDouble(scratchF, jitArgAddr);
   1095          masm.jump(&next);
   1096        }
   1097 
   1098        // For undefined (missing argument), store NaN.
   1099        masm.bind(&isUndefined);
   1100        {
   1101          masm.storeValue(DoubleValue(JS::GenericNaN()), jitArgAddr);
   1102          masm.jump(&next);
   1103        }
   1104 
   1105        // +null is 0.
   1106        masm.bind(&isNull);
   1107        {
   1108          masm.storeValue(DoubleValue(0.), jitArgAddr);
   1109        }
   1110        break;
   1111      }
   1112      case ValType::Ref: {
   1113        // Guarded against by temporarilyUnsupportedReftypeForEntry()
   1114        MOZ_RELEASE_ASSERT(funcType.args()[i].refType().isExtern());
   1115        masm.branchValueConvertsToWasmAnyRefInline(scratchV, scratchG, scratchF,
   1116                                                   &next);
   1117        masm.jump(&oolCall);
   1118        break;
   1119      }
   1120      case ValType::V128: {
   1121        // Guarded against by hasUnexposableArgOrRet()
   1122        MOZ_CRASH("unexpected argument type when calling from the jit");
   1123      }
   1124      default: {
   1125        MOZ_CRASH("unexpected argument type when calling from the jit");
   1126      }
   1127    }
   1128    masm.nopAlign(CodeAlignment);
   1129    masm.bind(&next);
   1130  }
   1131 
   1132  Label rejoinBeforeCall;
   1133  masm.bind(&rejoinBeforeCall);
   1134 
   1135  // Convert all the expected values to unboxed values on the stack.
   1136  ArgTypeVector args(funcType);
   1137  for (ABIArgIter iter(args, ABIKind::Wasm); !iter.done(); iter++) {
   1138    Address argv(FramePointer, JitFrameLayout::offsetOfActualArg(iter.index()));
   1139    bool isStackArg = iter->kind() == ABIArg::Stack;
   1140    switch (iter.mirType()) {
   1141      case MIRType::Int32: {
   1142        Register target = isStackArg ? ScratchIonEntry : iter->gpr();
   1143        masm.unboxInt32(argv, target);
   1144        GenPrintIsize(DebugChannel::Function, masm, target);
   1145        if (isStackArg) {
   1146          masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
   1147        }
   1148        break;
   1149      }
   1150      case MIRType::Int64: {
   1151        // The coercion has provided a BigInt value by this point, which
   1152        // we need to convert to an I64 here.
   1153        if (isStackArg) {
   1154          Address dst(sp, iter->offsetFromArgBase());
   1155          Register src = scratchV.payloadOrValueReg();
   1156 #if JS_BITS_PER_WORD == 64
   1157          Register64 scratch64(scratchG);
   1158 #else
   1159          Register64 scratch64(scratchG, ABINonArgReg3);
   1160 #endif
   1161          masm.unboxBigInt(argv, src);
   1162          masm.loadBigInt64(src, scratch64);
   1163          GenPrintI64(DebugChannel::Function, masm, scratch64);
   1164          masm.store64(scratch64, dst);
   1165        } else {
   1166          Register src = scratchG;
   1167          Register64 target = iter->gpr64();
   1168          masm.unboxBigInt(argv, src);
   1169          masm.loadBigInt64(src, target);
   1170          GenPrintI64(DebugChannel::Function, masm, target);
   1171        }
   1172        break;
   1173      }
   1174      case MIRType::Float32: {
   1175        FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
   1176        masm.unboxDouble(argv, ABINonArgDoubleReg);
   1177        masm.convertDoubleToFloat32(ABINonArgDoubleReg, target);
   1178        GenPrintF32(DebugChannel::Function, masm, target.asSingle());
   1179        if (isStackArg) {
   1180          masm.storeFloat32(target, Address(sp, iter->offsetFromArgBase()));
   1181        }
   1182        break;
   1183      }
   1184      case MIRType::Double: {
   1185        FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
   1186        masm.unboxDouble(argv, target);
   1187        GenPrintF64(DebugChannel::Function, masm, target);
   1188        if (isStackArg) {
   1189          masm.storeDouble(target, Address(sp, iter->offsetFromArgBase()));
   1190        }
   1191        break;
   1192      }
   1193      case MIRType::WasmAnyRef: {
   1194        ValueOperand src = ScratchValIonEntry;
   1195        Register target = isStackArg ? ScratchIonEntry : iter->gpr();
   1196        masm.loadValue(argv, src);
   1197        // The loop before should ensure that all values that require boxing
   1198        // have been taken care of.
   1199        Label join;
   1200        Label fail;
   1201        masm.convertValueToWasmAnyRef(src, target, scratchF, &fail);
   1202        masm.jump(&join);
   1203        masm.bind(&fail);
   1204        masm.breakpoint();
   1205        masm.bind(&join);
   1206        GenPrintPtr(DebugChannel::Function, masm, target);
   1207        if (isStackArg) {
   1208          masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
   1209        }
   1210        break;
   1211      }
   1212      default: {
   1213        MOZ_CRASH("unexpected input argument when calling from jit");
   1214      }
   1215    }
   1216  }
   1217 
   1218  GenPrintf(DebugChannel::Function, masm, "\n");
   1219 
   1220  // Setup wasm register state.
   1221  masm.loadWasmPinnedRegsFromInstance(mozilla::Nothing());
   1222 
   1223  masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
   1224                                     WasmCalleeInstanceOffsetBeforeCall));
   1225 
   1226  // Call into the real function.
   1227  masm.assertStackAlignment(WasmStackAlignment);
   1228  CallFuncExport(masm, fe, funcPtr);
   1229  masm.assertStackAlignment(WasmStackAlignment);
   1230 
   1231  GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
   1232            fe.funcIndex());
   1233 
   1234  // Pop frame. We set the stack pointer immediately after calling Wasm code
   1235  // because the current stack pointer might not match the one before the call
   1236  // if the callee performed a tail call.
   1237  masm.moveToStackPtr(FramePointer);
   1238  masm.setFramePushed(0);
   1239 
   1240  // Store the return value in the JSReturnOperand.
   1241  Label exception;
   1242  const ValTypeVector& results = funcType.results();
   1243  if (results.length() == 0) {
   1244    GenPrintf(DebugChannel::Function, masm, "void");
   1245    masm.moveValue(UndefinedValue(), JSReturnOperand);
   1246  } else {
   1247    MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
   1248    switch (results[0].kind()) {
   1249      case ValType::I32:
   1250        GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
   1251 #ifdef JS_64BIT
   1252        // boxNonDouble requires a widened int32 value.
   1253        masm.widenInt32(ReturnReg);
   1254 #endif
   1255        masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand);
   1256        break;
   1257      case ValType::F32: {
   1258        masm.canonicalizeFloat(ReturnFloat32Reg);
   1259        masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
   1260        GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
   1261        ScratchDoubleScope fpscratch(masm);
   1262        masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
   1263        break;
   1264      }
   1265      case ValType::F64: {
   1266        masm.canonicalizeDouble(ReturnDoubleReg);
   1267        GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
   1268        ScratchDoubleScope fpscratch(masm);
   1269        masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
   1270        break;
   1271      }
   1272      case ValType::I64: {
   1273        GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
   1274        MOZ_ASSERT(masm.framePushed() == 0);
   1275        GenerateBigIntInitialization(masm, 0, ReturnReg64, scratchG, fe,
   1276                                     &exception);
   1277        masm.boxNonDouble(JSVAL_TYPE_BIGINT, scratchG, JSReturnOperand);
   1278        break;
   1279      }
   1280      case ValType::V128: {
   1281        MOZ_CRASH("unexpected return type when calling from ion to wasm");
   1282      }
   1283      case ValType::Ref: {
   1284        GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
   1285        masm.convertWasmAnyRefToValue(InstanceReg, ReturnReg, JSReturnOperand,
   1286                                      WasmJitEntryReturnScratch);
   1287        break;
   1288      }
   1289    }
   1290  }
   1291 
   1292  GenPrintf(DebugChannel::Function, masm, "\n");
   1293 
   1294  AssertExpectedSP(masm);
   1295  GenerateJitEntryEpilogue(masm, offsets);
   1296  MOZ_ASSERT(masm.framePushed() == 0);
   1297 
   1298  // Generate an OOL call to the C++ conversion path.
   1299  bool hasFallThroughForException = false;
   1300  if (oolCall.used()) {
   1301    masm.bind(&oolCall);
   1302    masm.setFramePushed(frameSize);
   1303 
   1304    // Baseline and Ion call C++ runtime via BuiltinThunk with wasm abi, so to
   1305    // unify the BuiltinThunk's interface we call it here with wasm abi.
   1306    jit::ABIArgIter<MIRTypeVector> argsIter(
   1307        coerceArgTypes, ABIForBuiltin(SymbolicAddress::CoerceInPlace_JitEntry));
   1308 
   1309    // argument 0: function index.
   1310    if (argsIter->kind() == ABIArg::GPR) {
   1311      masm.movePtr(ImmWord(fe.funcIndex()), argsIter->gpr());
   1312    } else {
   1313      masm.storePtr(ImmWord(fe.funcIndex()),
   1314                    Address(sp, argsIter->offsetFromArgBase()));
   1315    }
   1316    argsIter++;
   1317 
   1318    // argument 1: instance
   1319    if (argsIter->kind() == ABIArg::GPR) {
   1320      masm.movePtr(InstanceReg, argsIter->gpr());
   1321    } else {
   1322      masm.storePtr(InstanceReg, Address(sp, argsIter->offsetFromArgBase()));
   1323    }
   1324    argsIter++;
   1325 
   1326    // argument 2: effective address of start of argv
   1327    Address argv(FramePointer, JitFrameLayout::offsetOfActualArgs());
   1328    if (argsIter->kind() == ABIArg::GPR) {
   1329      masm.computeEffectiveAddress(argv, argsIter->gpr());
   1330    } else {
   1331      masm.computeEffectiveAddress(argv, ScratchIonEntry);
   1332      masm.storePtr(ScratchIonEntry,
   1333                    Address(sp, argsIter->offsetFromArgBase()));
   1334    }
   1335    argsIter++;
   1336    MOZ_ASSERT(argsIter.done());
   1337 
   1338    masm.assertStackAlignment(ABIStackAlignment);
   1339    CallSymbolicAddress(masm, !fe.hasEagerStubs(),
   1340                        SymbolicAddress::CoerceInPlace_JitEntry);
   1341    masm.assertStackAlignment(ABIStackAlignment);
   1342 
   1343    // No widening is required, as the return value is used as a bool.
   1344    masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg,
   1345                      &rejoinBeforeCall);
   1346 
   1347    MOZ_ASSERT(masm.framePushed() == frameSize);
   1348    masm.freeStack(frameSize);
   1349    hasFallThroughForException = true;
   1350  }
   1351 
   1352  if (exception.used() || hasFallThroughForException) {
   1353    masm.bind(&exception);
   1354    MOZ_ASSERT(masm.framePushed() == 0);
   1355    GenerateJitEntryThrow(masm);
   1356  }
   1357 
   1358  return FinishOffsets(masm, offsets);
   1359 }
   1360 
   1361 void wasm::GenerateDirectCallFromJit(MacroAssembler& masm, const FuncExport& fe,
   1362                                     const Instance& inst,
   1363                                     const JitCallStackArgVector& stackArgs,
   1364                                     Register scratch, uint32_t* callOffset) {
   1365  MOZ_ASSERT(!IsCompilingWasm());
   1366 
   1367  const FuncType& funcType = inst.codeMeta().getFuncType(fe.funcIndex());
   1368 
   1369  size_t framePushedAtStart = masm.framePushed();
   1370 
   1371  // Note, if code here pushes a reference value into the frame for its own
   1372  // purposes (and not just as an argument to the callee) then the frame must be
   1373  // traced in TraceJitExitFrame, see the case there for DirectWasmJitCall.  The
   1374  // callee will trace values that are pushed as arguments, however.
   1375 
   1376  // Push a special frame descriptor that indicates the frame size so we can
   1377  // directly iterate from the current JIT frame without an extra call.
   1378  // Note: buildFakeExitFrame pushes an ExitFrameLayout containing the current
   1379  // frame pointer. We also use this to restore the frame pointer after the
   1380  // call.
   1381  *callOffset = masm.buildFakeExitFrame(scratch);
   1382  // FP := ExitFrameLayout*
   1383  masm.moveStackPtrTo(FramePointer);
   1384  size_t framePushedAtFakeFrame = masm.framePushed();
   1385  masm.setFramePushed(0);
   1386  masm.loadJSContext(scratch);
   1387  masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::DirectWasmJitCall);
   1388 
   1389  static_assert(ExitFrameLayout::SizeWithFooter() % WasmStackAlignment == 0);
   1390  MOZ_ASSERT(
   1391      (masm.framePushed() + framePushedAtFakeFrame) % WasmStackAlignment == 0);
   1392 
   1393  // Move stack arguments to their final locations.
   1394  unsigned bytesNeeded = StackArgBytesForWasmABI(funcType);
   1395  bytesNeeded = StackDecrementForCall(
   1396      WasmStackAlignment, framePushedAtFakeFrame + masm.framePushed(),
   1397      bytesNeeded);
   1398  if (bytesNeeded) {
   1399    masm.reserveStack(bytesNeeded);
   1400  }
   1401  size_t fakeFramePushed = masm.framePushed();
   1402 
   1403  GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
   1404            fe.funcIndex());
   1405 
   1406  ArgTypeVector args(funcType);
   1407  for (ABIArgIter iter(args, ABIKind::Wasm); !iter.done(); iter++) {
   1408    MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != scratch);
   1409    MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != FramePointer);
   1410    if (iter->kind() != ABIArg::Stack) {
   1411      switch (iter.mirType()) {
   1412        case MIRType::Int32:
   1413          GenPrintIsize(DebugChannel::Function, masm, iter->gpr());
   1414          break;
   1415        case MIRType::Int64:
   1416          GenPrintI64(DebugChannel::Function, masm, iter->gpr64());
   1417          break;
   1418        case MIRType::Float32:
   1419          GenPrintF32(DebugChannel::Function, masm, iter->fpu());
   1420          break;
   1421        case MIRType::Double:
   1422          GenPrintF64(DebugChannel::Function, masm, iter->fpu());
   1423          break;
   1424        case MIRType::WasmAnyRef:
   1425          GenPrintPtr(DebugChannel::Function, masm, iter->gpr());
   1426          break;
   1427        case MIRType::StackResults:
   1428          MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
   1429          GenPrintPtr(DebugChannel::Function, masm, iter->gpr());
   1430          break;
   1431        default:
   1432          MOZ_CRASH("ion to wasm fast path can only handle i32/f32/f64");
   1433      }
   1434      continue;
   1435    }
   1436 
   1437    Address dst(masm.getStackPointer(), iter->offsetFromArgBase());
   1438 
   1439    const JitCallStackArg& stackArg = stackArgs[iter.index()];
   1440    switch (stackArg.tag()) {
   1441      case JitCallStackArg::Tag::Imm32:
   1442        GenPrintf(DebugChannel::Function, masm, "%d ", stackArg.imm32());
   1443        masm.storePtr(ImmWord(stackArg.imm32()), dst);
   1444        break;
   1445      case JitCallStackArg::Tag::GPR:
   1446        MOZ_ASSERT(stackArg.gpr() != scratch);
   1447        MOZ_ASSERT(stackArg.gpr() != FramePointer);
   1448        GenPrintIsize(DebugChannel::Function, masm, stackArg.gpr());
   1449        masm.storePtr(stackArg.gpr(), dst);
   1450        break;
   1451      case JitCallStackArg::Tag::FPU:
   1452        switch (iter.mirType()) {
   1453          case MIRType::Double:
   1454            GenPrintF64(DebugChannel::Function, masm, stackArg.fpu());
   1455            masm.storeDouble(stackArg.fpu(), dst);
   1456            break;
   1457          case MIRType::Float32:
   1458            GenPrintF32(DebugChannel::Function, masm, stackArg.fpu());
   1459            masm.storeFloat32(stackArg.fpu(), dst);
   1460            break;
   1461          default:
   1462            MOZ_CRASH(
   1463                "unexpected MIR type for a float register in wasm fast call");
   1464        }
   1465        break;
   1466      case JitCallStackArg::Tag::Address: {
   1467        // The address offsets were valid *before* we pushed our frame.
   1468        Address src = stackArg.addr();
   1469        MOZ_ASSERT(src.base == masm.getStackPointer());
   1470        src.offset += int32_t(framePushedAtFakeFrame + fakeFramePushed -
   1471                              framePushedAtStart);
   1472        switch (iter.mirType()) {
   1473          case MIRType::Double: {
   1474            ScratchDoubleScope fpscratch(masm);
   1475            GenPrintF64(DebugChannel::Function, masm, fpscratch);
   1476            masm.loadDouble(src, fpscratch);
   1477            masm.storeDouble(fpscratch, dst);
   1478            break;
   1479          }
   1480          case MIRType::Float32: {
   1481            ScratchFloat32Scope fpscratch(masm);
   1482            masm.loadFloat32(src, fpscratch);
   1483            GenPrintF32(DebugChannel::Function, masm, fpscratch);
   1484            masm.storeFloat32(fpscratch, dst);
   1485            break;
   1486          }
   1487          case MIRType::Int32: {
   1488            masm.loadPtr(src, scratch);
   1489            GenPrintIsize(DebugChannel::Function, masm, scratch);
   1490            masm.storePtr(scratch, dst);
   1491            break;
   1492          }
   1493          case MIRType::WasmAnyRef: {
   1494            masm.loadPtr(src, scratch);
   1495            GenPrintPtr(DebugChannel::Function, masm, scratch);
   1496            masm.storePtr(scratch, dst);
   1497            break;
   1498          }
   1499          case MIRType::StackResults: {
   1500            MOZ_CRASH("multi-value in ion to wasm fast path unimplemented");
   1501          }
   1502          default: {
   1503            MOZ_CRASH("unexpected MIR type for a stack slot in wasm fast call");
   1504          }
   1505        }
   1506        break;
   1507      }
   1508      case JitCallStackArg::Tag::Undefined: {
   1509        MOZ_CRASH("can't happen because of arg.kind() check");
   1510      }
   1511    }
   1512  }
   1513 
   1514  GenPrintf(DebugChannel::Function, masm, "\n");
   1515 
   1516  // Load instance; from now on, InstanceReg is live.
   1517  masm.movePtr(ImmPtr(&inst), InstanceReg);
   1518  masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
   1519                                     WasmCalleeInstanceOffsetBeforeCall));
   1520  masm.loadWasmPinnedRegsFromInstance(mozilla::Nothing());
   1521 
   1522  // Actual call.
   1523  const CodeBlock& codeBlock = inst.code().funcCodeBlock(fe.funcIndex());
   1524  const CodeRange& codeRange = codeBlock.codeRange(fe);
   1525  void* callee = const_cast<uint8_t*>(codeBlock.base()) +
   1526                 codeRange.funcUncheckedCallEntry();
   1527 
   1528  masm.assertStackAlignment(WasmStackAlignment);
   1529  MoveSPForJitABI(masm);
   1530  masm.callJit(ImmPtr(callee));
   1531 #ifdef JS_CODEGEN_ARM64
   1532  // WASM does not always keep PSP in sync with SP.  So reinitialize it as it
   1533  // might be clobbered either by WASM or by any C++ calls within.
   1534  masm.initPseudoStackPtr();
   1535 #endif
   1536  masm.freeStackTo(fakeFramePushed);
   1537  masm.assertStackAlignment(WasmStackAlignment);
   1538 
   1539  // Store the return value in the appropriate place.
   1540  GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
   1541            fe.funcIndex());
   1542  const ValTypeVector& results = funcType.results();
   1543  if (results.length() == 0) {
   1544    masm.moveValue(UndefinedValue(), JSReturnOperand);
   1545    GenPrintf(DebugChannel::Function, masm, "void");
   1546  } else {
   1547    MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
   1548    switch (results[0].kind()) {
   1549      case wasm::ValType::I32:
   1550        // The return value is in ReturnReg, which is what Ion expects.
   1551        GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
   1552 #ifdef JS_64BIT
   1553        masm.widenInt32(ReturnReg);
   1554 #endif
   1555        break;
   1556      case wasm::ValType::I64:
   1557        // The return value is in ReturnReg64, which is what Ion expects.
   1558        GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
   1559        break;
   1560      case wasm::ValType::F32:
   1561        masm.canonicalizeFloat(ReturnFloat32Reg);
   1562        GenPrintF32(DebugChannel::Function, masm, ReturnFloat32Reg);
   1563        break;
   1564      case wasm::ValType::F64:
   1565        masm.canonicalizeDouble(ReturnDoubleReg);
   1566        GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
   1567        break;
   1568      case wasm::ValType::Ref:
   1569        GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
   1570        // The call to wasm above preserves the InstanceReg, we don't
   1571        // need to reload it here.
   1572        masm.convertWasmAnyRefToValue(InstanceReg, ReturnReg, JSReturnOperand,
   1573                                      WasmJitEntryReturnScratch);
   1574        break;
   1575      case wasm::ValType::V128:
   1576        MOZ_CRASH("unexpected return type when calling from ion to wasm");
   1577    }
   1578  }
   1579 
   1580  GenPrintf(DebugChannel::Function, masm, "\n");
   1581 
   1582  // Restore the frame pointer.
   1583  masm.loadPtr(Address(FramePointer, 0), FramePointer);
   1584  masm.setFramePushed(fakeFramePushed + framePushedAtFakeFrame);
   1585 
   1586  // Free args + frame descriptor.
   1587  masm.leaveExitFrame(bytesNeeded + ExitFrameLayout::Size());
   1588 
   1589  MOZ_ASSERT(framePushedAtStart == masm.framePushed());
   1590 }
   1591 
   1592 static void StackCopy(MacroAssembler& masm, MIRType type, Register scratch,
   1593                      Address src, Address dst) {
   1594  if (type == MIRType::Int32) {
   1595    masm.load32(src, scratch);
   1596    GenPrintIsize(DebugChannel::Import, masm, scratch);
   1597    masm.store32(scratch, dst);
   1598  } else if (type == MIRType::Int64) {
   1599 #if JS_BITS_PER_WORD == 32
   1600    MOZ_RELEASE_ASSERT(src.base != scratch && dst.base != scratch);
   1601    GenPrintf(DebugChannel::Import, masm, "i64(");
   1602    masm.load32(LowWord(src), scratch);
   1603    GenPrintIsize(DebugChannel::Import, masm, scratch);
   1604    masm.store32(scratch, LowWord(dst));
   1605    masm.load32(HighWord(src), scratch);
   1606    GenPrintIsize(DebugChannel::Import, masm, scratch);
   1607    masm.store32(scratch, HighWord(dst));
   1608    GenPrintf(DebugChannel::Import, masm, ") ");
   1609 #else
   1610    Register64 scratch64(scratch);
   1611    masm.load64(src, scratch64);
   1612    GenPrintIsize(DebugChannel::Import, masm, scratch);
   1613    masm.store64(scratch64, dst);
   1614 #endif
   1615  } else if (type == MIRType::WasmAnyRef || type == MIRType::Pointer ||
   1616             type == MIRType::StackResults) {
   1617    masm.loadPtr(src, scratch);
   1618    GenPrintPtr(DebugChannel::Import, masm, scratch);
   1619    masm.storePtr(scratch, dst);
   1620  } else if (type == MIRType::Float32) {
   1621    ScratchFloat32Scope fpscratch(masm);
   1622    masm.loadFloat32(src, fpscratch);
   1623    GenPrintF32(DebugChannel::Import, masm, fpscratch);
   1624    masm.storeFloat32(fpscratch, dst);
   1625  } else if (type == MIRType::Double) {
   1626    ScratchDoubleScope fpscratch(masm);
   1627    masm.loadDouble(src, fpscratch);
   1628    GenPrintF64(DebugChannel::Import, masm, fpscratch);
   1629    masm.storeDouble(fpscratch, dst);
   1630 #ifdef ENABLE_WASM_SIMD
   1631  } else if (type == MIRType::Simd128) {
   1632    ScratchSimd128Scope fpscratch(masm);
   1633    masm.loadUnalignedSimd128(src, fpscratch);
   1634    GenPrintV128(DebugChannel::Import, masm, fpscratch);
   1635    masm.storeUnalignedSimd128(fpscratch, dst);
   1636 #endif
   1637  } else {
   1638    MOZ_CRASH("StackCopy: unexpected type");
   1639  }
   1640 }
   1641 
   1642 static void FillArgumentArrayForInterpExit(MacroAssembler& masm,
   1643                                           unsigned funcImportIndex,
   1644                                           const FuncType& funcType,
   1645                                           unsigned argOffset,
   1646                                           Register scratch) {
   1647  // This is `sizeof(Frame)` because the wasm ABIArgIter handles adding the
   1648  // offsets of the shadow stack area and the instance slots.
   1649  const unsigned offsetFromFPToCallerStackArgs = sizeof(Frame);
   1650 
   1651  GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; arguments ",
   1652            funcImportIndex);
   1653 
   1654  ArgTypeVector args(funcType);
   1655  for (ABIArgIter i(args, ABIKind::Wasm); !i.done(); i++) {
   1656    Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
   1657 
   1658    MIRType type = i.mirType();
   1659    MOZ_ASSERT(args.isSyntheticStackResultPointerArg(i.index()) ==
   1660               (type == MIRType::StackResults));
   1661    switch (i->kind()) {
   1662      case ABIArg::GPR:
   1663        if (type == MIRType::Int32) {
   1664          GenPrintIsize(DebugChannel::Import, masm, i->gpr());
   1665          masm.store32(i->gpr(), dst);
   1666        } else if (type == MIRType::Int64) {
   1667          GenPrintI64(DebugChannel::Import, masm, i->gpr64());
   1668          masm.store64(i->gpr64(), dst);
   1669        } else if (type == MIRType::WasmAnyRef) {
   1670          GenPrintPtr(DebugChannel::Import, masm, i->gpr());
   1671          masm.storePtr(i->gpr(), dst);
   1672        } else if (type == MIRType::StackResults) {
   1673          GenPrintPtr(DebugChannel::Import, masm, i->gpr());
   1674          masm.storePtr(i->gpr(), dst);
   1675        } else {
   1676          MOZ_CRASH(
   1677              "FillArgumentArrayForInterpExit, ABIArg::GPR: unexpected type");
   1678        }
   1679        break;
   1680 #ifdef JS_CODEGEN_REGISTER_PAIR
   1681      case ABIArg::GPR_PAIR:
   1682        if (type == MIRType::Int64) {
   1683          GenPrintI64(DebugChannel::Import, masm, i->gpr64());
   1684          masm.store64(i->gpr64(), dst);
   1685        } else {
   1686          MOZ_CRASH("wasm uses hardfp for function calls.");
   1687        }
   1688        break;
   1689 #endif
   1690      case ABIArg::FPU: {
   1691        FloatRegister srcReg = i->fpu();
   1692        if (type == MIRType::Double) {
   1693          GenPrintF64(DebugChannel::Import, masm, srcReg);
   1694          masm.storeDouble(srcReg, dst);
   1695        } else if (type == MIRType::Float32) {
   1696          // Preserve the NaN pattern in the input.
   1697          GenPrintF32(DebugChannel::Import, masm, srcReg);
   1698          masm.storeFloat32(srcReg, dst);
   1699        } else if (type == MIRType::Simd128) {
   1700          // The value should never escape; the call will be stopped later as
   1701          // the import is being called.  But we should generate something sane
   1702          // here for the boxed case since a debugger or the stack walker may
   1703          // observe something.
   1704          ScratchDoubleScope dscratch(masm);
   1705          masm.loadConstantDouble(0, dscratch);
   1706          GenPrintF64(DebugChannel::Import, masm, dscratch);
   1707          masm.storeDouble(dscratch, dst);
   1708        } else {
   1709          MOZ_CRASH("Unknown MIRType in wasm exit stub");
   1710        }
   1711        break;
   1712      }
   1713      case ABIArg::Stack: {
   1714        Address src(FramePointer,
   1715                    offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
   1716        if (type == MIRType::Simd128) {
   1717          // As above.  StackCopy does not know this trick.
   1718          ScratchDoubleScope dscratch(masm);
   1719          masm.loadConstantDouble(0, dscratch);
   1720          GenPrintF64(DebugChannel::Import, masm, dscratch);
   1721          masm.storeDouble(dscratch, dst);
   1722        } else {
   1723          StackCopy(masm, type, scratch, src, dst);
   1724        }
   1725        break;
   1726      }
   1727      case ABIArg::Uninitialized:
   1728        MOZ_CRASH("Uninitialized ABIArg kind");
   1729    }
   1730  }
   1731  GenPrintf(DebugChannel::Import, masm, "\n");
   1732 }
   1733 
   1734 // Note, this may destroy the values in incoming argument registers as a result
   1735 // of Spectre mitigation.
   1736 static void FillArgumentArrayForJitExit(MacroAssembler& masm, Register instance,
   1737                                        unsigned funcImportIndex,
   1738                                        const FuncType& funcType,
   1739                                        unsigned argOffset, Register scratch,
   1740                                        Register scratch2, Label* throwLabel) {
   1741  MOZ_ASSERT(scratch != scratch2);
   1742 
   1743  // This is `sizeof(Frame)` because the wasm ABIArgIter handles adding the
   1744  // offsets of the shadow stack area and the instance slots.
   1745  const unsigned offsetFromFPToCallerStackArgs = sizeof(Frame);
   1746 
   1747  // This loop does not root the values that are being constructed in
   1748  // for the arguments. Allocations that are generated by code either
   1749  // in the loop or called from it should be NoGC allocations.
   1750  GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; arguments ",
   1751            funcImportIndex);
   1752 
   1753  ArgTypeVector args(funcType);
   1754  for (ABIArgIter i(args, ABIKind::Wasm); !i.done(); i++) {
   1755    Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
   1756 
   1757    MIRType type = i.mirType();
   1758    MOZ_ASSERT(args.isSyntheticStackResultPointerArg(i.index()) ==
   1759               (type == MIRType::StackResults));
   1760    switch (i->kind()) {
   1761      case ABIArg::GPR:
   1762        if (type == MIRType::Int32) {
   1763          GenPrintIsize(DebugChannel::Import, masm, i->gpr());
   1764          masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dst);
   1765        } else if (type == MIRType::Int64) {
   1766          // FuncType::canHaveJitExit should prevent this.  Also see comments
   1767          // at GenerateBigIntInitialization.
   1768          MOZ_CRASH("Should not happen");
   1769        } else if (type == MIRType::WasmAnyRef) {
   1770          // This works also for FuncRef because it is distinguishable from
   1771          // a boxed AnyRef.
   1772          masm.movePtr(i->gpr(), scratch2);
   1773          GenPrintPtr(DebugChannel::Import, masm, scratch2);
   1774          masm.convertWasmAnyRefToValue(instance, scratch2, dst, scratch);
   1775        } else if (type == MIRType::StackResults) {
   1776          MOZ_CRASH("Multi-result exit to JIT unimplemented");
   1777        } else {
   1778          MOZ_CRASH(
   1779              "FillArgumentArrayForJitExit, ABIArg::GPR: unexpected type");
   1780        }
   1781        break;
   1782 #ifdef JS_CODEGEN_REGISTER_PAIR
   1783      case ABIArg::GPR_PAIR:
   1784        if (type == MIRType::Int64) {
   1785          // FuncType::canHaveJitExit should prevent this.  Also see comments
   1786          // at GenerateBigIntInitialization.
   1787          MOZ_CRASH("Should not happen");
   1788        } else {
   1789          MOZ_CRASH("wasm uses hardfp for function calls.");
   1790        }
   1791        break;
   1792 #endif
   1793      case ABIArg::FPU: {
   1794        FloatRegister srcReg = i->fpu();
   1795        if (type == MIRType::Double) {
   1796          // Preserve the NaN pattern in the input.
   1797          ScratchDoubleScope fpscratch(masm);
   1798          masm.moveDouble(srcReg, fpscratch);
   1799          masm.canonicalizeDouble(fpscratch);
   1800          GenPrintF64(DebugChannel::Import, masm, fpscratch);
   1801          masm.boxDouble(fpscratch, dst);
   1802        } else if (type == MIRType::Float32) {
   1803          // JS::Values can't store Float32, so convert to a Double.
   1804          ScratchDoubleScope fpscratch(masm);
   1805          masm.convertFloat32ToDouble(srcReg, fpscratch);
   1806          masm.canonicalizeDouble(fpscratch);
   1807          GenPrintF64(DebugChannel::Import, masm, fpscratch);
   1808          masm.boxDouble(fpscratch, dst);
   1809        } else if (type == MIRType::Simd128) {
   1810          // The value should never escape; the call will be stopped later as
   1811          // the import is being called.  But we should generate something sane
   1812          // here for the boxed case since a debugger or the stack walker may
   1813          // observe something.
   1814          ScratchDoubleScope dscratch(masm);
   1815          masm.loadConstantDouble(0, dscratch);
   1816          GenPrintF64(DebugChannel::Import, masm, dscratch);
   1817          masm.boxDouble(dscratch, dst);
   1818        } else {
   1819          MOZ_CRASH("Unknown MIRType in wasm exit stub");
   1820        }
   1821        break;
   1822      }
   1823      case ABIArg::Stack: {
   1824        Address src(FramePointer,
   1825                    offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
   1826        if (type == MIRType::Int32) {
   1827          masm.load32(src, scratch);
   1828          GenPrintIsize(DebugChannel::Import, masm, scratch);
   1829          masm.storeValue(JSVAL_TYPE_INT32, scratch, dst);
   1830        } else if (type == MIRType::Int64) {
   1831          // FuncType::canHaveJitExit should prevent this.  Also see comments
   1832          // at GenerateBigIntInitialization.
   1833          MOZ_CRASH("Should not happen");
   1834        } else if (type == MIRType::WasmAnyRef) {
   1835          // This works also for FuncRef because it is distinguishable from a
   1836          // boxed AnyRef.
   1837          masm.loadPtr(src, scratch);
   1838          GenPrintPtr(DebugChannel::Import, masm, scratch);
   1839          masm.convertWasmAnyRefToValue(instance, scratch, dst, scratch2);
   1840        } else if (IsFloatingPointType(type)) {
   1841          ScratchDoubleScope dscratch(masm);
   1842          FloatRegister fscratch = dscratch.asSingle();
   1843          if (type == MIRType::Float32) {
   1844            masm.loadFloat32(src, fscratch);
   1845            masm.convertFloat32ToDouble(fscratch, dscratch);
   1846          } else {
   1847            masm.loadDouble(src, dscratch);
   1848          }
   1849          masm.canonicalizeDouble(dscratch);
   1850          GenPrintF64(DebugChannel::Import, masm, dscratch);
   1851          masm.boxDouble(dscratch, dst);
   1852        } else if (type == MIRType::Simd128) {
   1853          // The value should never escape; the call will be stopped later as
   1854          // the import is being called.  But we should generate something
   1855          // sane here for the boxed case since a debugger or the stack walker
   1856          // may observe something.
   1857          ScratchDoubleScope dscratch(masm);
   1858          masm.loadConstantDouble(0, dscratch);
   1859          GenPrintF64(DebugChannel::Import, masm, dscratch);
   1860          masm.boxDouble(dscratch, dst);
   1861        } else {
   1862          MOZ_CRASH(
   1863              "FillArgumentArrayForJitExit, ABIArg::Stack: unexpected type");
   1864        }
   1865        break;
   1866      }
   1867      case ABIArg::Uninitialized:
   1868        MOZ_CRASH("Uninitialized ABIArg kind");
   1869    }
   1870  }
   1871  GenPrintf(DebugChannel::Import, masm, "\n");
   1872 }
   1873 
   1874 // Generate a wrapper function with the standard intra-wasm call ABI which
   1875 // simply calls an import. This wrapper function allows any import to be treated
   1876 // like a normal wasm function for the purposes of exports and table calls. In
   1877 // particular, the wrapper function provides:
   1878 //  - a table entry, so JS imports can be put into tables
   1879 //  - normal entries, so that, if the import is re-exported, an entry stub can
   1880 //    be generated and called without any special cases
   1881 static bool GenerateImportFunction(jit::MacroAssembler& masm,
   1882                                   uint32_t funcImportInstanceOffset,
   1883                                   const FuncType& funcType,
   1884                                   CallIndirectId callIndirectId,
   1885                                   FuncOffsets* offsets, StackMaps* stackMaps) {
   1886  AutoCreatedBy acb(masm, "wasm::GenerateImportFunction");
   1887 
   1888  AssertExpectedSP(masm);
   1889 
   1890  GenerateFunctionPrologue(masm, callIndirectId, Nothing(), offsets);
   1891 
   1892  MOZ_ASSERT(masm.framePushed() == 0);
   1893  const unsigned sizeOfInstanceSlot = sizeof(void*);
   1894  unsigned framePushed = StackDecrementForCall(
   1895      WasmStackAlignment,
   1896      sizeof(Frame),  // pushed by prologue
   1897      StackArgBytesForWasmABI(funcType) + sizeOfInstanceSlot);
   1898 
   1899  Label stackOverflowTrap;
   1900  masm.wasmReserveStackChecked(framePushed, &stackOverflowTrap);
   1901 
   1902  MOZ_ASSERT(masm.framePushed() == framePushed);
   1903 
   1904  masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
   1905                                     framePushed - sizeOfInstanceSlot));
   1906 
   1907  // The argument register state is already setup by our caller. We just need
   1908  // to be sure not to clobber it before the call.
   1909  Register scratch = ABINonArgReg0;
   1910 
   1911  // Copy our frame's stack arguments to the callee frame's stack argument.
   1912  //
   1913  // Note offsetFromFPToCallerStackArgs is sizeof(Frame) because the
   1914  // WasmABIArgIter accounts for both the ShadowStackSpace and the instance
   1915  // fields of FrameWithInstances.
   1916  unsigned offsetFromFPToCallerStackArgs = sizeof(Frame);
   1917  ArgTypeVector args(funcType);
   1918  for (ABIArgIter i(args, ABIKind::Wasm); !i.done(); i++) {
   1919    if (i->kind() != ABIArg::Stack) {
   1920      continue;
   1921    }
   1922 
   1923    Address src(FramePointer,
   1924                offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
   1925    Address dst(masm.getStackPointer(), i->offsetFromArgBase());
   1926    GenPrintf(DebugChannel::Import, masm,
   1927              "calling exotic import function with arguments: ");
   1928    StackCopy(masm, i.mirType(), scratch, src, dst);
   1929    GenPrintf(DebugChannel::Import, masm, "\n");
   1930  }
   1931 
   1932  // Call the import exit stub.
   1933  CallSiteDesc desc(CallSiteKind::Import);
   1934  MoveSPForJitABI(masm);
   1935  masm.wasmCallImport(desc, CalleeDesc::import(funcImportInstanceOffset));
   1936 
   1937  // Restore the instance register and pinned regs, per wasm function ABI.
   1938  masm.loadPtr(
   1939      Address(masm.getStackPointer(), framePushed - sizeOfInstanceSlot),
   1940      InstanceReg);
   1941  masm.loadWasmPinnedRegsFromInstance(mozilla::Nothing());
   1942 
   1943  // Restore cx->realm.
   1944  masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
   1945 
   1946  GenerateFunctionEpilogue(masm, framePushed, offsets);
   1947 
   1948  // Emit the stack overflow trap as OOL code.
   1949  masm.bind(&stackOverflowTrap);
   1950  masm.wasmTrap(wasm::Trap::StackOverflow, wasm::TrapSiteDesc());
   1951 
   1952  return FinishOffsets(masm, offsets);
   1953 }
   1954 
   1955 static const unsigned STUBS_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
   1956 
   1957 // Generate a stub that is called via the internal ABI derived from the
   1958 // signature of the import and calls into an appropriate callImport C++
   1959 // function, having boxed all the ABI arguments into a homogeneous Value array.
   1960 static bool GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi,
   1961                                     const FuncType& funcType,
   1962                                     uint32_t funcImportIndex,
   1963                                     Label* throwLabel,
   1964                                     CallableOffsets* offsets) {
   1965  AutoCreatedBy acb(masm, "GenerateImportInterpExit");
   1966 
   1967  AssertExpectedSP(masm);
   1968  masm.setFramePushed(0);
   1969 
   1970  // Argument types for Instance::callImport_*:
   1971  static const MIRType typeArray[] = {MIRType::Pointer,   // Instance*
   1972                                      MIRType::Pointer,   // funcImportIndex
   1973                                      MIRType::Int32,     // argc
   1974                                      MIRType::Pointer};  // argv
   1975  MIRTypeVector invokeArgTypes;
   1976  MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, std::size(typeArray)));
   1977 
   1978  // At the point of the call, the stack layout is:
   1979  //
   1980  //  | stack args | padding | argv[] | padding | retaddr | caller stack | ...
   1981  //  ^
   1982  //  +-- sp
   1983  //
   1984  // The padding between stack args and argv ensures that argv is aligned on a
   1985  // Value boundary. The padding between argv and retaddr ensures that sp is
   1986  // aligned.  The caller stack includes a ShadowStackArea and the instance
   1987  // fields before the args, see WasmFrame.h.
   1988  //
   1989  // The 'double' alignment is correct since the argv[] is a Value array.
   1990  unsigned argOffset =
   1991      AlignBytes(StackArgBytesForNativeABI(invokeArgTypes), sizeof(double));
   1992  // The abiArgCount includes a stack result pointer argument if needed.
   1993  unsigned abiArgCount = ArgTypeVector(funcType).lengthWithStackResults();
   1994  unsigned argBytes = std::max<size_t>(1, abiArgCount) * sizeof(Value);
   1995  unsigned frameAlignment =
   1996      ComputeByteAlignment(sizeof(Frame), ABIStackAlignment);
   1997  unsigned framePushed = AlignBytes(argOffset + argBytes, ABIStackAlignment);
   1998  GenerateExitPrologue(masm, ExitReason::Fixed::ImportInterp,
   1999                       /*switchToMainStack*/ true,
   2000                       /*framePushedPreSwitch*/ frameAlignment,
   2001                       /*framePushedPostSwitch*/ framePushed, offsets);
   2002 
   2003  // Fill the argument array.
   2004  Register scratch = ABINonArgReturnReg0;
   2005  FillArgumentArrayForInterpExit(masm, funcImportIndex, funcType, argOffset,
   2006                                 scratch);
   2007 
   2008  // Prepare the arguments for the call to Instance::callImport_*.
   2009  ABIArgMIRTypeIter i(invokeArgTypes, ABIKind::System);
   2010 
   2011  // argument 0: Instance*
   2012  if (i->kind() == ABIArg::GPR) {
   2013    masm.movePtr(InstanceReg, i->gpr());
   2014  } else {
   2015    masm.storePtr(InstanceReg,
   2016                  Address(masm.getStackPointer(), i->offsetFromArgBase()));
   2017  }
   2018  i++;
   2019 
   2020  // argument 1: funcImportIndex
   2021  if (i->kind() == ABIArg::GPR) {
   2022    masm.mov(ImmWord(funcImportIndex), i->gpr());
   2023  } else {
   2024    masm.store32(Imm32(funcImportIndex),
   2025                 Address(masm.getStackPointer(), i->offsetFromArgBase()));
   2026  }
   2027  i++;
   2028 
   2029  // argument 2: argc
   2030  unsigned argc = abiArgCount;
   2031  if (i->kind() == ABIArg::GPR) {
   2032    masm.mov(ImmWord(argc), i->gpr());
   2033  } else {
   2034    masm.store32(Imm32(argc),
   2035                 Address(masm.getStackPointer(), i->offsetFromArgBase()));
   2036  }
   2037  i++;
   2038 
   2039  // argument 3: argv
   2040  Address argv(masm.getStackPointer(), argOffset);
   2041  if (i->kind() == ABIArg::GPR) {
   2042    masm.computeEffectiveAddress(argv, i->gpr());
   2043  } else {
   2044    masm.computeEffectiveAddress(argv, scratch);
   2045    masm.storePtr(scratch,
   2046                  Address(masm.getStackPointer(), i->offsetFromArgBase()));
   2047  }
   2048  i++;
   2049  MOZ_ASSERT(i.done());
   2050 
   2051  // Make the call, test whether it succeeded, and extract the return value.
   2052  masm.assertStackAlignment(ABIStackAlignment);
   2053  masm.call(SymbolicAddress::CallImport_General);
   2054  masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
   2055 
   2056  ResultType resultType = ResultType::Vector(funcType.results());
   2057  ValType registerResultType;
   2058  for (ABIResultIter iter(resultType); !iter.done(); iter.next()) {
   2059    if (iter.cur().inRegister()) {
   2060      MOZ_ASSERT(!registerResultType.isValid());
   2061      registerResultType = iter.cur().type();
   2062    }
   2063  }
   2064  if (!registerResultType.isValid()) {
   2065    GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
   2066              funcImportIndex);
   2067    GenPrintf(DebugChannel::Import, masm, "void");
   2068  } else {
   2069    switch (registerResultType.kind()) {
   2070      case ValType::I32:
   2071        masm.load32(argv, ReturnReg);
   2072        // No widening is required, as we know the value comes from an i32 load.
   2073        GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
   2074                  funcImportIndex);
   2075        GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
   2076        break;
   2077      case ValType::I64:
   2078        masm.load64(argv, ReturnReg64);
   2079        GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
   2080                  funcImportIndex);
   2081        GenPrintI64(DebugChannel::Import, masm, ReturnReg64);
   2082        break;
   2083      case ValType::V128:
   2084        // Note, CallImport_Rtt/V128 currently always throws, so we should never
   2085        // reach this point.
   2086        masm.breakpoint();
   2087        break;
   2088      case ValType::F32:
   2089        masm.loadFloat32(argv, ReturnFloat32Reg);
   2090        GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
   2091                  funcImportIndex);
   2092        GenPrintF32(DebugChannel::Import, masm, ReturnFloat32Reg);
   2093        break;
   2094      case ValType::F64:
   2095        masm.loadDouble(argv, ReturnDoubleReg);
   2096        GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
   2097                  funcImportIndex);
   2098        GenPrintF64(DebugChannel::Import, masm, ReturnDoubleReg);
   2099        break;
   2100      case ValType::Ref:
   2101        masm.loadPtr(argv, ReturnReg);
   2102        GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
   2103                  funcImportIndex);
   2104        GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
   2105        break;
   2106    }
   2107  }
   2108 
   2109  GenPrintf(DebugChannel::Import, masm, "\n");
   2110 
   2111  // The native ABI preserves the instance, heap and global registers since they
   2112  // are non-volatile.
   2113  MOZ_ASSERT(NonVolatileRegs.has(InstanceReg));
   2114 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) ||      \
   2115    defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS64) || \
   2116    defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
   2117  MOZ_ASSERT(NonVolatileRegs.has(HeapReg));
   2118 #endif
   2119 
   2120  GenerateExitEpilogue(masm, ExitReason::Fixed::ImportInterp,
   2121                       /*switchToMainStack*/ true, offsets);
   2122 
   2123  return FinishOffsets(masm, offsets);
   2124 }
   2125 
   2126 // Generate a stub that is called via the internal ABI derived from the
   2127 // signature of the import and calls into a compatible JIT function,
   2128 // having boxed all the ABI arguments into the JIT stack frame layout.
   2129 static bool GenerateImportJitExit(MacroAssembler& masm,
   2130                                  uint32_t funcImportInstanceOffset,
   2131                                  const FuncType& funcType,
   2132                                  unsigned funcImportIndex,
   2133                                  uint32_t fallbackOffset, Label* throwLabel,
   2134                                  ImportOffsets* offsets) {
   2135  AutoCreatedBy acb(masm, "GenerateImportJitExit");
   2136 
   2137  AssertExpectedSP(masm);
   2138 
   2139  // The JS ABI uses the following layout:
   2140  //
   2141  //   | caller's frame | <-  aligned to WasmStackAlignment
   2142  //   +----------------+
   2143  //   | return address | <- RetAddrAndFP
   2144  //   | caller fp      | <---- fp/current sp
   2145  //   +................+
   2146  //   | saved instance | <-- Stored relative to fp, not sp
   2147  //   | padding?       |
   2148  //   | undefined args?|
   2149  //   | ...            |
   2150  //   | defined args   |
   2151  //   | ...            |
   2152  //   | this           | <- If this is JitStack-aligned, the layout will be too
   2153  //   +................+
   2154  //   | callee token   | <- PreFrame
   2155  //   | descriptor     | <---- sp after allocating stack frame
   2156  //   +----------------+
   2157  //   | return address |
   2158  //   | frame pointer  | <- Must be JitStack-aligned
   2159  //
   2160  // Note: WasmStackAlignment requires that sp be WasmStackAlignment-aligned
   2161  // when calling, *before* pushing the return address and frame pointer. The JS
   2162  // ABI requires that sp be JitStackAlignment-aligned *after* pushing the
   2163  // return address and frame pointer.
   2164  static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
   2165 
   2166  // We allocate a full 8 bytes for the instance register, even on 32-bit,
   2167  // because alignment padding will round it up anyway. Treating it as 8 bytes
   2168  // is easier in the OOL underflow path.
   2169  const unsigned sizeOfInstanceSlot = sizeof(Value);
   2170  const unsigned sizeOfRetAddrAndFP = 2 * sizeof(void*);
   2171  const unsigned sizeOfPreFrame =
   2172      WasmToJSJitFrameLayout::Size() - sizeOfRetAddrAndFP;
   2173 
   2174  // These values are used if there is no arguments underflow.
   2175  // If we need to push extra undefined arguments, we calculate them
   2176  // dynamically in an out-of-line path.
   2177  const unsigned sizeOfThisAndArgs =
   2178      (1 + funcType.args().length()) * sizeof(Value);
   2179  const unsigned totalJitFrameBytes = sizeOfRetAddrAndFP + sizeOfPreFrame +
   2180                                      sizeOfThisAndArgs + sizeOfInstanceSlot;
   2181  const unsigned jitFramePushed =
   2182      StackDecrementForCall(JitStackAlignment,
   2183                            sizeof(Frame),  // pushed by prologue
   2184                            totalJitFrameBytes) -
   2185      sizeOfRetAddrAndFP;
   2186 
   2187  // Generate a minimal prologue. Don't allocate a stack frame until we know
   2188  // how big it should be.
   2189  GenerateJitExitPrologue(masm, fallbackOffset, offsets);
   2190 
   2191  // 1. Allocate the stack frame.
   2192  // 1.1. Get the callee. This must be a JSFunction if we're using this JIT
   2193  // exit.
   2194  Register callee = ABINonArgReturnReg0;
   2195  Register scratch = ABINonArgReturnReg1;
   2196  Register scratch2 = ABINonVolatileReg;
   2197  masm.loadPtr(
   2198      Address(InstanceReg, Instance::offsetInData(
   2199                               funcImportInstanceOffset +
   2200                               offsetof(FuncImportInstanceData, callable))),
   2201      callee);
   2202 
   2203  // 1.2 Check to see if we are passing enough arguments. If not, we have to
   2204  // allocate a larger stack frame and push `undefined` for the extra args.
   2205  // (Passing too many arguments is not a problem; the JS ABI expects at *least*
   2206  // numFormals arguments.)
   2207  Label argUnderflow, argUnderflowRejoin;
   2208  Register numFormals = scratch2;
   2209  unsigned argc = funcType.args().length();
   2210  masm.loadFunctionArgCount(callee, numFormals);
   2211  masm.branch32(Assembler::GreaterThan, numFormals, Imm32(argc), &argUnderflow);
   2212 
   2213  // Otherwise, we can compute the stack frame size statically.
   2214  masm.subFromStackPtr(Imm32(jitFramePushed));
   2215  masm.bind(&argUnderflowRejoin);
   2216 
   2217  // 2. Descriptor.
   2218  size_t argOffset = 0;
   2219  uint32_t descriptor =
   2220      MakeFrameDescriptorForJitCall(FrameType::WasmToJSJit, argc);
   2221  masm.storePtr(ImmWord(uintptr_t(descriptor)),
   2222                Address(masm.getStackPointer(), argOffset));
   2223  argOffset += sizeof(size_t);
   2224 
   2225  // 3. Callee.
   2226  masm.storePtr(callee, Address(masm.getStackPointer(), argOffset));
   2227  argOffset += sizeof(size_t);
   2228  MOZ_ASSERT(argOffset == sizeOfPreFrame);
   2229 
   2230  // 3. |this| value.
   2231  masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
   2232  argOffset += sizeof(Value);
   2233 
   2234  // 4. Fill the arguments.
   2235  FillArgumentArrayForJitExit(masm, InstanceReg, funcImportIndex, funcType,
   2236                              argOffset, scratch, scratch2, throwLabel);
   2237 
   2238  // 5. Preserve the instance register. We store it at a fixed negative offset
   2239  // to the frame pointer so that we can recover it after the call without
   2240  // needing to know how many arguments were passed.
   2241  Address savedInstanceReg(FramePointer, -int32_t(sizeof(size_t)));
   2242  masm.storePtr(InstanceReg, savedInstanceReg);
   2243 
   2244  // 6. Load callee executable entry point.
   2245  masm.loadJitCodeRaw(callee, callee);
   2246 
   2247  masm.assertStackAlignment(JitStackAlignment, sizeOfRetAddrAndFP);
   2248 #ifdef JS_CODEGEN_ARM64
   2249  AssertExpectedSP(masm);
   2250  // Manually resync PSP.  Omitting this causes eg tests/wasm/import-export.js
   2251  // to segfault.
   2252  masm.moveStackPtrTo(PseudoStackPointer);
   2253 #endif
   2254  masm.callJitNoProfiler(callee);
   2255 
   2256  // Note that there might be a GC thing in the JSReturnOperand now.
   2257  // In all the code paths from here:
   2258  // - either the value is unboxed because it was a primitive and we don't
   2259  //   need to worry about rooting anymore.
   2260  // - or the value needs to be rooted, but nothing can cause a GC between
   2261  //   here and CoerceInPlace, which roots before coercing to a primitive.
   2262 
   2263  // The JIT callee clobbers all registers other than the frame pointer, so
   2264  // restore InstanceReg here.
   2265  masm.assertStackAlignment(JitStackAlignment, sizeOfRetAddrAndFP);
   2266  masm.loadPtr(savedInstanceReg, InstanceReg);
   2267 
   2268  // The frame was aligned for the JIT ABI such that
   2269  //   (sp - 2 * sizeof(void*)) % JitStackAlignment == 0
   2270  // But now we possibly want to call one of several different C++ functions,
   2271  // so subtract 2 * sizeof(void*) so that sp is aligned for an ABI call.
   2272  static_assert(ABIStackAlignment <= JitStackAlignment, "subsumes");
   2273  masm.subFromStackPtr(Imm32(sizeOfRetAddrAndFP));
   2274  masm.assertStackAlignment(ABIStackAlignment);
   2275 
   2276 #ifdef DEBUG
   2277  {
   2278    Label ok;
   2279    masm.branchTestMagic(Assembler::NotEqual, JSReturnOperand, &ok);
   2280    masm.breakpoint();
   2281    masm.bind(&ok);
   2282  }
   2283 #endif
   2284 
   2285  GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
   2286            funcImportIndex);
   2287 
   2288  Label oolConvert;
   2289  const ValTypeVector& results = funcType.results();
   2290  if (results.length() == 0) {
   2291    GenPrintf(DebugChannel::Import, masm, "void");
   2292  } else {
   2293    MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
   2294    switch (results[0].kind()) {
   2295      case ValType::I32:
   2296        // No widening is required, as the return value does not come to us in
   2297        // ReturnReg.
   2298        masm.truncateValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg,
   2299                                  &oolConvert);
   2300        GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
   2301        break;
   2302      case ValType::I64:
   2303        // No fastpath for now, go immediately to ool case
   2304        masm.jump(&oolConvert);
   2305        break;
   2306      case ValType::V128:
   2307        // Unreachable as callImport should not call the stub.
   2308        masm.breakpoint();
   2309        break;
   2310      case ValType::F32:
   2311        masm.convertValueToFloat32(JSReturnOperand, ReturnFloat32Reg,
   2312                                   &oolConvert);
   2313        GenPrintF32(DebugChannel::Import, masm, ReturnFloat32Reg);
   2314        break;
   2315      case ValType::F64:
   2316        masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg,
   2317                                  &oolConvert);
   2318        GenPrintF64(DebugChannel::Import, masm, ReturnDoubleReg);
   2319        break;
   2320      case ValType::Ref:
   2321        // Guarded by temporarilyUnsupportedReftypeForExit()
   2322        MOZ_RELEASE_ASSERT(results[0].refType().isExtern());
   2323        masm.convertValueToWasmAnyRef(JSReturnOperand, ReturnReg,
   2324                                      ABINonArgDoubleReg, &oolConvert);
   2325        GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
   2326        break;
   2327    }
   2328  }
   2329 
   2330  GenPrintf(DebugChannel::Import, masm, "\n");
   2331 
   2332  Label done;
   2333  masm.bind(&done);
   2334 
   2335  masm.moveToStackPtr(FramePointer);
   2336  GenerateJitExitEpilogue(masm, offsets);
   2337 
   2338  masm.bind(&argUnderflow);
   2339  // We aren't passing enough arguments.
   2340  //
   2341  // Compute the size of the stack frame (in Value-sized slots). On 32-bit, the
   2342  // instance reg slot is 4 bytes of data and 4 bytes of alignment padding.
   2343  Register numSlots = scratch;
   2344  static_assert(sizeof(WasmToJSJitFrameLayout) % JitStackAlignment == 0);
   2345  MOZ_ASSERT(sizeOfPreFrame % sizeof(Value) == 0);
   2346  const uint32_t numSlotsForPreFrame = sizeOfPreFrame / sizeof(Value);
   2347  const uint32_t extraSlots = numSlotsForPreFrame + 2;  // this + instance
   2348  if (JitStackValueAlignment == 1) {
   2349    // If we only need 8-byte alignment, no padding is necessary.
   2350    masm.add32(Imm32(extraSlots), numFormals, numSlots);
   2351  } else {
   2352    MOZ_ASSERT(JitStackValueAlignment == 2);
   2353    MOZ_ASSERT(sizeOfRetAddrAndFP == sizeOfPreFrame);
   2354    // We have to allocate space for the preframe, `this`, the arguments, and
   2355    // the saved instance. While doing so, we have to ensure that `this` is
   2356    // aligned to JitStackAlignment, which will in turn guarantee the correct
   2357    // alignment of the frame layout in the callee. To ensure alignment, we can
   2358    // add padding between the arguments and the saved instance. sp was aligned
   2359    // to WasmStackAlignment before pushing the return address / frame pointer
   2360    // for this stub.
   2361    //
   2362    //                           (this)  (args)           (instance)
   2363    // numSlots:        PreFrame + 1 + numFormals + padding + 1
   2364    // aligned if even:            1 + numFormals + padding + 1 + RetAddrAndFP
   2365    //
   2366    // Conveniently, since numSlotsForPreFrame and numSlotsForRetAddrAndFP are
   2367    // the same, these calculations give the same value. So we can ensure
   2368    // alignment by rounding numSlots up to the next even number.
   2369    masm.add32(Imm32(extraSlots + 1), numFormals, numSlots);
   2370    masm.and32(Imm32(~1), numSlots);
   2371  }
   2372 
   2373  // Adjust the stack pointer
   2374  masm.lshift32(Imm32(3), scratch);
   2375  masm.subFromStackPtr(scratch);
   2376 
   2377  // Fill the undefined arguments.
   2378  Label loop;
   2379  masm.bind(&loop);
   2380  masm.sub32(Imm32(1), numFormals);
   2381  BaseValueIndex argAddr(masm.getStackPointer(), numFormals,
   2382                         2 * sizeof(uintptr_t) +  // descriptor + callee
   2383                             sizeof(Value));      // this
   2384  masm.storeValue(UndefinedValue(), BaseValueIndex(masm.getStackPointer(),
   2385                                                   numFormals, argOffset));
   2386  masm.branch32(Assembler::Above, numFormals, Imm32(argc), &loop);
   2387  masm.jump(&argUnderflowRejoin);
   2388 
   2389  if (oolConvert.used()) {
   2390    masm.bind(&oolConvert);
   2391 
   2392    // Coercion calls use the following stack layout (sp grows to the left):
   2393    //   | args | padding | Value argv[1] | padding | exit Frame |
   2394    MIRTypeVector coerceArgTypes;
   2395    MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
   2396    unsigned offsetToCoerceArgv =
   2397        AlignBytes(StackArgBytesForNativeABI(coerceArgTypes), sizeof(Value));
   2398    masm.assertStackAlignment(ABIStackAlignment);
   2399 
   2400    // Store return value into argv[0].
   2401    masm.storeValue(JSReturnOperand,
   2402                    Address(masm.getStackPointer(), offsetToCoerceArgv));
   2403 
   2404    // From this point, it's safe to reuse the scratch register (which
   2405    // might be part of the JSReturnOperand).
   2406 
   2407    // The JIT might have clobbered exitFP at this point. Since there's
   2408    // going to be a CoerceInPlace call, pretend we're still doing the JIT
   2409    // call by restoring our tagged exitFP.
   2410    LoadActivation(masm, InstanceReg, scratch);
   2411    SetExitFP(masm, ExitReason::Fixed::ImportJit, scratch, scratch2);
   2412 
   2413    // argument 0: argv
   2414    ABIArgMIRTypeIter i(coerceArgTypes, ABIKind::System);
   2415    Address argv(masm.getStackPointer(), offsetToCoerceArgv);
   2416    if (i->kind() == ABIArg::GPR) {
   2417      masm.computeEffectiveAddress(argv, i->gpr());
   2418    } else {
   2419      masm.computeEffectiveAddress(argv, scratch);
   2420      masm.storePtr(scratch,
   2421                    Address(masm.getStackPointer(), i->offsetFromArgBase()));
   2422    }
   2423    i++;
   2424    MOZ_ASSERT(i.done());
   2425 
   2426    // Call coercion function. Note that right after the call, the value of
   2427    // FP is correct because FP is non-volatile in the native ABI.
   2428    masm.assertStackAlignment(ABIStackAlignment);
   2429    const ValTypeVector& results = funcType.results();
   2430    if (results.length() > 0) {
   2431      // NOTE that once there can be more than one result and we can box some of
   2432      // the results (as we must for AnyRef), pointer and already-boxed results
   2433      // must be rooted while subsequent results are boxed.
   2434      MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
   2435      switch (results[0].kind()) {
   2436        case ValType::I32:
   2437          masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
   2438          masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
   2439          masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv),
   2440                          ReturnReg);
   2441          // No widening is required, as we generate a known-good value in a
   2442          // safe way here.
   2443          break;
   2444        case ValType::I64: {
   2445          masm.call(SymbolicAddress::CoerceInPlace_ToBigInt);
   2446          masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
   2447          Address argv(masm.getStackPointer(), offsetToCoerceArgv);
   2448          masm.unboxBigInt(argv, scratch);
   2449          masm.loadBigInt64(scratch, ReturnReg64);
   2450          break;
   2451        }
   2452        case ValType::F64:
   2453        case ValType::F32:
   2454          masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
   2455          masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
   2456          masm.unboxDouble(Address(masm.getStackPointer(), offsetToCoerceArgv),
   2457                           ReturnDoubleReg);
   2458          if (results[0].kind() == ValType::F32) {
   2459            masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
   2460          }
   2461          break;
   2462        case ValType::Ref:
   2463          // Guarded by temporarilyUnsupportedReftypeForExit()
   2464          MOZ_RELEASE_ASSERT(results[0].refType().isExtern());
   2465          masm.call(SymbolicAddress::BoxValue_Anyref);
   2466          masm.branchWasmAnyRefIsNull(true, ReturnReg, throwLabel);
   2467          break;
   2468        default:
   2469          MOZ_CRASH("Unsupported convert type");
   2470      }
   2471    }
   2472 
   2473    // Maintain the invariant that exitFP is either unset or not set to a
   2474    // wasm tagged exitFP, per the jit exit contract.
   2475    LoadActivation(masm, InstanceReg, scratch);
   2476    ClearExitFP(masm, scratch);
   2477 
   2478    masm.jump(&done);
   2479  }
   2480 
   2481  return FinishOffsets(masm, offsets);
   2482 }
   2483 
   2484 struct ABIFunctionArgs {
   2485  ABIFunctionType abiType;
   2486  size_t len;
   2487 
   2488  explicit ABIFunctionArgs(ABIFunctionType sig)
   2489      : abiType(ABIFunctionType(sig >> ABITypeArgShift)) {
   2490    len = 0;
   2491    uint64_t i = uint64_t(abiType);
   2492    while (i) {
   2493      i = i >> ABITypeArgShift;
   2494      len++;
   2495    }
   2496  }
   2497 
   2498  size_t length() const { return len; }
   2499 
   2500  MIRType operator[](size_t i) const {
   2501    MOZ_ASSERT(i < len);
   2502    uint64_t abi = uint64_t(abiType);
   2503    size_t argAtLSB = len - 1;
   2504    while (argAtLSB != i) {
   2505      abi = abi >> ABITypeArgShift;
   2506      argAtLSB--;
   2507    }
   2508    return ToMIRType(ABIType(abi & ABITypeArgMask));
   2509  }
   2510 };
   2511 
   2512 bool wasm::GenerateBuiltinThunk(MacroAssembler& masm, ABIFunctionType abiType,
   2513                                bool switchToMainStack, ExitReason exitReason,
   2514                                void* funcPtr, CallableOffsets* offsets) {
   2515  AssertExpectedSP(masm);
   2516  masm.setFramePushed(0);
   2517 
   2518  ABIFunctionArgs args(abiType);
   2519  unsigned frameAlignment =
   2520      ComputeByteAlignment(sizeof(Frame), ABIStackAlignment);
   2521  unsigned framePushed =
   2522      AlignBytes(StackArgBytesForNativeABI(args), ABIStackAlignment);
   2523  GenerateExitPrologue(masm, exitReason, switchToMainStack,
   2524                       /*framePushedPreSwitch*/ frameAlignment,
   2525                       /*framePushedPostSwitch*/ framePushed, offsets);
   2526 
   2527  // Copy out and convert caller arguments, if needed. We are translating from
   2528  // the wasm ABI to the system ABI.
   2529  Register scratch = ABINonArgReturnReg0;
   2530 
   2531  // Use two arg iterators to track the different offsets that arguments must
   2532  // go. We are translating from the wasm ABI to the system ABI.
   2533  ABIArgIter selfArgs(args, ABIKind::Wasm);
   2534  ABIArgIter callArgs(args, ABIKind::System);
   2535 
   2536  // `selfArgs` gives us offsets from 'arg base' which is the SP immediately
   2537  // before our frame is added. We must add `sizeof(Frame)` now that the
   2538  // prologue has executed to access our stack args.
   2539  unsigned offsetFromFPToCallerStackArgs = sizeof(wasm::Frame);
   2540 
   2541  for (; !selfArgs.done(); selfArgs++, callArgs++) {
   2542    // This loop doesn't handle all the possible cases of differing ABI's and
   2543    // relies on the wasm argument ABI being very close to the system ABI.
   2544    MOZ_ASSERT(!callArgs.done());
   2545    MOZ_ASSERT(selfArgs->argInRegister() == callArgs->argInRegister());
   2546    MOZ_ASSERT(selfArgs.mirType() == callArgs.mirType());
   2547 
   2548    if (selfArgs->argInRegister()) {
   2549 #ifdef JS_CODEGEN_ARM
   2550      // The system ABI may use soft-FP, while the wasm ABI will always use
   2551      // hard-FP. We must adapt FP args in this case.
   2552      if (!ARMFlags::UseHardFpABI() &&
   2553          IsFloatingPointType(selfArgs.mirType())) {
   2554        FloatRegister input = selfArgs->fpu();
   2555        if (selfArgs.mirType() == MIRType::Float32) {
   2556          masm.ma_vxfer(input, Register::FromCode(input.id()));
   2557        } else if (selfArgs.mirType() == MIRType::Double) {
   2558          uint32_t regId = input.singleOverlay().id();
   2559          masm.ma_vxfer(input, Register::FromCode(regId),
   2560                        Register::FromCode(regId + 1));
   2561        }
   2562      }
   2563 #endif
   2564      continue;
   2565    }
   2566 
   2567    Address src(FramePointer,
   2568                offsetFromFPToCallerStackArgs + selfArgs->offsetFromArgBase());
   2569    Address dst(masm.getStackPointer(), callArgs->offsetFromArgBase());
   2570    StackCopy(masm, selfArgs.mirType(), scratch, src, dst);
   2571  }
   2572  // If selfArgs is done, callArgs must be done.
   2573  MOZ_ASSERT(callArgs.done());
   2574 
   2575  // Call into the native builtin function
   2576  masm.assertStackAlignment(ABIStackAlignment);
   2577  MoveSPForJitABI(masm);
   2578  masm.call(ImmPtr(funcPtr, ImmPtr::NoCheckToken()));
   2579 
   2580 #if defined(JS_CODEGEN_X64)
   2581  // No widening is required, as the caller will widen.
   2582 #elif defined(JS_CODEGEN_X86)
   2583  // The wasm ABI always uses SSE for floating point returns, and so we must
   2584  // convert the x87 FP stack result over.
   2585  Operand op(esp, 0);
   2586  MIRType retType = ToMIRType(ABIType(
   2587      std::underlying_type_t<ABIFunctionType>(abiType) & ABITypeArgMask));
   2588  if (retType == MIRType::Float32) {
   2589    masm.fstp32(op);
   2590    masm.loadFloat32(op, ReturnFloat32Reg);
   2591  } else if (retType == MIRType::Double) {
   2592    masm.fstp(op);
   2593    masm.loadDouble(op, ReturnDoubleReg);
   2594  }
   2595 #elif defined(JS_CODEGEN_ARM)
   2596  // We must adapt the system soft-fp return value from a GPR to a FPR.
   2597  MIRType retType = ToMIRType(ABIType(
   2598      std::underlying_type_t<ABIFunctionType>(abiType) & ABITypeArgMask));
   2599  if (!ARMFlags::UseHardFpABI() && IsFloatingPointType(retType)) {
   2600    masm.ma_vxfer(r0, r1, d0);
   2601  }
   2602 #endif
   2603 
   2604  GenerateExitEpilogue(masm, exitReason, switchToMainStack, offsets);
   2605  return FinishOffsets(masm, offsets);
   2606 }
   2607 
   2608 #if defined(JS_CODEGEN_ARM)
   2609 static const LiveRegisterSet RegsToPreserve(
   2610    GeneralRegisterSet(Registers::AllMask &
   2611                       ~((Registers::SetType(1) << Registers::sp) |
   2612                         (Registers::SetType(1) << Registers::pc))),
   2613    FloatRegisterSet(FloatRegisters::AllDoubleMask));
   2614 #  ifdef ENABLE_WASM_SIMD
   2615 #    error "high lanes of SIMD registers need to be saved too."
   2616 #  endif
   2617 #elif defined(JS_CODEGEN_MIPS64)
   2618 static const LiveRegisterSet RegsToPreserve(
   2619    GeneralRegisterSet(Registers::AllMask &
   2620                       ~((Registers::SetType(1) << Registers::k0) |
   2621                         (Registers::SetType(1) << Registers::k1) |
   2622                         (Registers::SetType(1) << Registers::sp) |
   2623                         (Registers::SetType(1) << Registers::zero))),
   2624    FloatRegisterSet(FloatRegisters::AllDoubleMask));
   2625 #  ifdef ENABLE_WASM_SIMD
   2626 #    error "high lanes of SIMD registers need to be saved too."
   2627 #  endif
   2628 #elif defined(JS_CODEGEN_LOONG64)
   2629 static const LiveRegisterSet RegsToPreserve(
   2630    GeneralRegisterSet(Registers::AllMask &
   2631                       ~((uint32_t(1) << Registers::tp) |
   2632                         (uint32_t(1) << Registers::fp) |
   2633                         (uint32_t(1) << Registers::sp) |
   2634                         (uint32_t(1) << Registers::zero))),
   2635    FloatRegisterSet(FloatRegisters::AllDoubleMask));
   2636 #  ifdef ENABLE_WASM_SIMD
   2637 #    error "high lanes of SIMD registers need to be saved too."
   2638 #  endif
   2639 #elif defined(JS_CODEGEN_RISCV64)
   2640 static const LiveRegisterSet RegsToPreserve(
   2641    GeneralRegisterSet(Registers::AllMask &
   2642                       ~((uint32_t(1) << Registers::tp) |
   2643                         (uint32_t(1) << Registers::fp) |
   2644                         (uint32_t(1) << Registers::sp) |
   2645                         (uint32_t(1) << Registers::zero))),
   2646    FloatRegisterSet(FloatRegisters::AllDoubleMask));
   2647 #  ifdef ENABLE_WASM_SIMD
   2648 #    error "high lanes of SIMD registers need to be saved too."
   2649 #  endif
   2650 #elif defined(JS_CODEGEN_ARM64)
   2651 // We assume that traps do not happen while lr is live. This both ensures that
   2652 // the size of RegsToPreserve is a multiple of 2 (preserving WasmStackAlignment)
   2653 // and gives us a register to clobber in the return path.
   2654 static const LiveRegisterSet RegsToPreserve(
   2655    GeneralRegisterSet(Registers::AllMask &
   2656                       ~((Registers::SetType(1) << RealStackPointer.code()) |
   2657                         (Registers::SetType(1) << Registers::lr))),
   2658 #  ifdef ENABLE_WASM_SIMD
   2659    FloatRegisterSet(FloatRegisters::AllSimd128Mask));
   2660 #  else
   2661    // If SIMD is not enabled, it's pointless to save/restore the upper 64
   2662    // bits of each vector register.
   2663    FloatRegisterSet(FloatRegisters::AllDoubleMask));
   2664 #  endif
   2665 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
   2666 // It's correct to use FloatRegisters::AllMask even when SIMD is not enabled;
   2667 // PushRegsInMask strips out the high lanes of the XMM registers in this case,
   2668 // while the singles will be stripped as they are aliased by the larger doubles.
   2669 static const LiveRegisterSet RegsToPreserve(
   2670    GeneralRegisterSet(Registers::AllMask &
   2671                       ~(Registers::SetType(1) << Registers::StackPointer)),
   2672    FloatRegisterSet(FloatRegisters::AllMask));
   2673 #else
   2674 static const LiveRegisterSet RegsToPreserve(
   2675    GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllDoubleMask));
   2676 #  ifdef ENABLE_WASM_SIMD
   2677 #    error "no SIMD support"
   2678 #  endif
   2679 #endif
   2680 
   2681 // Generate a RegisterOffsets which describes the locations of the GPRs as saved
   2682 // by GenerateTrapExit.  FP registers are ignored.  Note that the values
   2683 // stored in the RegisterOffsets are offsets in words downwards from the top of
   2684 // the save area.  That is, a higher value implies a lower address.
   2685 void wasm::GenerateTrapExitRegisterOffsets(RegisterOffsets* offsets,
   2686                                           size_t* numWords) {
   2687  // This is the number of words pushed by the initial WasmPush().
   2688  *numWords = WasmPushSize / sizeof(void*);
   2689  MOZ_ASSERT(*numWords == TrapExitDummyValueOffsetFromTop + 1);
   2690 
   2691  // And these correspond to the PushRegsInMask() that immediately follows.
   2692  for (GeneralRegisterBackwardIterator iter(RegsToPreserve.gprs()); iter.more();
   2693       ++iter) {
   2694    offsets->setOffset(*iter, *numWords);
   2695    (*numWords)++;
   2696  }
   2697 }
   2698 
   2699 // Generate a stub which calls WasmReportTrap() and can be executed by having
   2700 // the signal handler redirect PC from any trapping instruction.
   2701 static bool GenerateTrapExit(MacroAssembler& masm, Label* throwLabel,
   2702                             Offsets* offsets) {
   2703  AssertExpectedSP(masm);
   2704  masm.haltingAlign(CodeAlignment);
   2705 
   2706  masm.setFramePushed(0);
   2707 
   2708  offsets->begin = masm.currentOffset();
   2709 
   2710  // Traps can only happen at well-defined program points. However, since
   2711  // traps may resume and the optimal assumption for the surrounding code is
   2712  // that registers are not clobbered, we need to preserve all registers in
   2713  // the trap exit. One simplifying assumption is that flags may be clobbered.
   2714  // Push a dummy word to use as return address below.
   2715  WasmPush(masm, ImmWord(TrapExitDummyValue));
   2716  unsigned framePushedBeforePreserve = masm.framePushed();
   2717  masm.PushRegsInMask(RegsToPreserve);
   2718  unsigned offsetOfReturnWord = masm.framePushed() - framePushedBeforePreserve;
   2719 
   2720  // Load the instance register from the wasm::FrameWithInstances. Normally we
   2721  // are only guaranteed to have a valid instance there if the frame was a
   2722  // cross-instance call, however wasm::HandleTrap in the signal handler is
   2723  // kind enough to store the active instance into that slot for us.
   2724  masm.loadPtr(
   2725      Address(FramePointer, wasm::FrameWithInstances::calleeInstanceOffset()),
   2726      InstanceReg);
   2727 
   2728  // Grab the stack pointer before we do any stack switches or dynamic
   2729  // alignment. Store it in a register that won't be used in the stack switch
   2730  // operation.
   2731  Register originalStackPointer = ABINonArgReg3;
   2732  masm.moveStackPtrTo(originalStackPointer);
   2733 
   2734 #ifdef ENABLE_WASM_JSPI
   2735  GenerateExitPrologueMainStackSwitch(masm, InstanceReg, ABINonArgReg0,
   2736                                      ABINonArgReg1, ABINonArgReg2);
   2737 #endif
   2738 
   2739  // We know that StackPointer is word-aligned, but not necessarily
   2740  // stack-aligned, so we need to align it dynamically. After we've aligned the
   2741  // stack, we store the original stack pointer in a slot on the stack.
   2742  // We're careful to not break stack alignment with that slot.
   2743  masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
   2744  masm.reserveStack(ABIStackAlignment);
   2745  masm.storePtr(originalStackPointer, Address(masm.getStackPointer(), 0));
   2746 
   2747  // Push the shadow stack space for the call if we need to. This won't break
   2748  // stack alignment.
   2749  if (ShadowStackSpace) {
   2750    masm.subFromStackPtr(Imm32(ShadowStackSpace));
   2751  }
   2752 
   2753  // Call the WasmHandleTrap function.
   2754  masm.assertStackAlignment(ABIStackAlignment);
   2755  masm.call(SymbolicAddress::HandleTrap);
   2756 
   2757  // WasmHandleTrap returns null if control should transfer to the throw stub.
   2758  // That will unwind the stack, and so we don't need to pop anything from the
   2759  // stack ourselves.
   2760  masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
   2761 
   2762  // Remove the shadow stack space that we added.
   2763  if (ShadowStackSpace) {
   2764    masm.addToStackPtr(Imm32(ShadowStackSpace));
   2765  }
   2766 
   2767 #ifdef ENABLE_WASM_JSPI
   2768  // We don't need to reload the InstanceReg because it is non-volatile in the
   2769  // system ABI.
   2770  MOZ_ASSERT(NonVolatileRegs.has(InstanceReg));
   2771  LoadActivation(masm, InstanceReg, ABINonArgReturnReg0);
   2772  GenerateExitEpilogueMainStackReturn(masm, InstanceReg, ABINonArgReturnReg0,
   2773                                      ABINonArgReturnReg1);
   2774 #endif
   2775 
   2776  // Get the original stack pointer back for before we dynamically aligned it.
   2777  // This will switch the SP back to the original stack we were on. Be careful
   2778  // not to use the return register for this, which is live.
   2779  masm.loadPtr(Address(masm.getStackPointer(), 0), ABINonArgReturnReg0);
   2780  masm.moveToStackPtr(ABINonArgReturnReg0);
   2781 
   2782  // Otherwise, the return value is the TrapData::resumePC we must jump to.
   2783  // We must restore register state before jumping, which will clobber
   2784  // ReturnReg, so store ReturnReg in the above-reserved stack slot which we
   2785  // use to jump to via ret.
   2786  masm.storePtr(ReturnReg, Address(masm.getStackPointer(), offsetOfReturnWord));
   2787  masm.PopRegsInMask(RegsToPreserve);
   2788 #ifdef JS_CODEGEN_ARM64
   2789  WasmPop(masm, lr);
   2790  masm.abiret();
   2791 #else
   2792  masm.ret();
   2793 #endif
   2794 
   2795  return FinishOffsets(masm, offsets);
   2796 }
   2797 
   2798 static void ClobberWasmRegsForLongJmp(MacroAssembler& masm, Register jumpReg) {
   2799  // Get the set of all registers that are allocatable in wasm functions
   2800  AllocatableGeneralRegisterSet gprs(GeneralRegisterSet::All());
   2801  RegisterAllocator::takeWasmRegisters(gprs);
   2802  // Remove the instance register from this set as landing pads require it to be
   2803  // valid
   2804  gprs.take(InstanceReg);
   2805  // Remove a specified register that will be used for the longjmp
   2806  gprs.take(jumpReg);
   2807  // Set all of these registers to zero
   2808  for (GeneralRegisterIterator iter(gprs.asLiveSet()); iter.more(); ++iter) {
   2809    Register reg = *iter;
   2810    masm.xorPtr(reg, reg);
   2811  }
   2812 
   2813  // Get the set of all floating point registers that are allocatable in wasm
   2814  // functions
   2815  AllocatableFloatRegisterSet fprs(FloatRegisterSet::All());
   2816  // Set all of these registers to NaN. We attempt for this to be a signalling
   2817  // NaN, but the bit format for signalling NaNs are implementation defined
   2818  // and so this is just best effort.
   2819  Maybe<FloatRegister> regNaN;
   2820  for (FloatRegisterIterator iter(fprs.asLiveSet()); iter.more(); ++iter) {
   2821    FloatRegister reg = *iter;
   2822    if (!reg.isDouble()) {
   2823      continue;
   2824    }
   2825    if (regNaN) {
   2826      masm.moveDouble(*regNaN, reg);
   2827      continue;
   2828    }
   2829    masm.loadConstantDouble(std::numeric_limits<double>::signaling_NaN(), reg);
   2830    regNaN = Some(reg);
   2831  }
   2832 }
   2833 
   2834 // Generates code to jump to a Wasm catch handler after unwinding the stack.
   2835 // The |rfe| register stores a pointer to the ResumeFromException struct
   2836 // allocated on the stack.
   2837 void wasm::GenerateJumpToCatchHandler(MacroAssembler& masm, Register rfe,
   2838                                      Register scratch1, Register scratch2) {
   2839  masm.loadPtr(Address(rfe, ResumeFromException::offsetOfInstance()),
   2840               InstanceReg);
   2841  masm.loadWasmPinnedRegsFromInstance(mozilla::Nothing());
   2842  masm.switchToWasmInstanceRealm(scratch1, scratch2);
   2843  masm.loadPtr(Address(rfe, ResumeFromException::offsetOfTarget()), scratch1);
   2844  masm.loadPtr(Address(rfe, ResumeFromException::offsetOfFramePointer()),
   2845               FramePointer);
   2846  masm.loadStackPtr(Address(rfe, ResumeFromException::offsetOfStackPointer()));
   2847  MoveSPForJitABI(masm);
   2848  ClobberWasmRegsForLongJmp(masm, scratch1);
   2849  masm.jump(scratch1);
   2850 }
   2851 
   2852 // Generate a stub that calls the C++ exception handler.
   2853 static bool GenerateThrowStub(MacroAssembler& masm, Label* throwLabel,
   2854                              Offsets* offsets) {
   2855  Register scratch1 = ABINonArgReturnReg0;
   2856 
   2857  AssertExpectedSP(masm);
   2858  masm.haltingAlign(CodeAlignment);
   2859  masm.setFramePushed(0);
   2860 
   2861  masm.bind(throwLabel);
   2862 
   2863  offsets->begin = masm.currentOffset();
   2864 
   2865  // Conservatively, the stack pointer can be unaligned and we must align it
   2866  // dynamically.
   2867  masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
   2868  if (ShadowStackSpace) {
   2869    masm.subFromStackPtr(Imm32(ShadowStackSpace));
   2870  }
   2871 
   2872  // Allocate space for exception or regular resume information.
   2873  masm.reserveStack(sizeof(jit::ResumeFromException));
   2874  masm.moveStackPtrTo(scratch1);
   2875 
   2876  MIRTypeVector handleThrowTypes;
   2877  MOZ_ALWAYS_TRUE(handleThrowTypes.append(MIRType::Pointer));
   2878 
   2879  unsigned frameSize =
   2880      StackDecrementForCall(ABIStackAlignment, masm.framePushed(),
   2881                            StackArgBytesForNativeABI(handleThrowTypes));
   2882  masm.reserveStack(frameSize);
   2883  masm.assertStackAlignment(ABIStackAlignment);
   2884 
   2885  ABIArgMIRTypeIter i(handleThrowTypes, ABIKind::System);
   2886  if (i->kind() == ABIArg::GPR) {
   2887    masm.movePtr(scratch1, i->gpr());
   2888  } else {
   2889    masm.storePtr(scratch1,
   2890                  Address(masm.getStackPointer(), i->offsetFromArgBase()));
   2891  }
   2892  i++;
   2893  MOZ_ASSERT(i.done());
   2894 
   2895  // WasmHandleThrow unwinds JitActivation::wasmExitFP() and initializes the
   2896  // ResumeFromException struct we allocated on the stack.
   2897  //
   2898  // It returns the address of the JIT's exception handler trampoline that we
   2899  // should jump to. This trampoline will return to the interpreter entry or
   2900  // jump to a catch handler.
   2901  masm.call(SymbolicAddress::HandleThrow);
   2902 
   2903  // Ensure the ResumeFromException struct is on top of the stack.
   2904  masm.freeStack(frameSize);
   2905 
   2906  // Jump to the "return value check" code of the JIT's exception handler
   2907  // trampoline. On ARM64 ensure PSP matches SP.
   2908 #ifdef JS_CODEGEN_ARM64
   2909  masm.Mov(PseudoStackPointer64, sp);
   2910 #endif
   2911  masm.jump(ReturnReg);
   2912 
   2913  return FinishOffsets(masm, offsets);
   2914 }
   2915 
   2916 // Generate a stub that handles toggleable enter/leave frame traps or
   2917 // breakpoints.  The stub records the frame pointer (via GenerateExitPrologue)
   2918 // and saves most of registers, so as to not affect the code generated by
   2919 // WasmBaselineCompile.
   2920 static bool GenerateDebugStub(MacroAssembler& masm, Label* throwLabel,
   2921                              CallableOffsets* offsets) {
   2922  AssertExpectedSP(masm);
   2923  masm.haltingAlign(CodeAlignment);
   2924  masm.setFramePushed(0);
   2925 
   2926  GenerateExitPrologue(masm, ExitReason::Fixed::DebugStub,
   2927                       /*switchToMainStack*/ true, 0, 0, offsets);
   2928 
   2929  uint32_t framePushed = masm.framePushed();
   2930 
   2931  // This method might be called with unaligned stack -- aligning and
   2932  // saving old stack pointer at the top.
   2933 #ifdef JS_CODEGEN_ARM64
   2934  // On ARM64 however the stack is always aligned.
   2935  static_assert(ABIStackAlignment == 16, "ARM64 SP alignment");
   2936 #else
   2937  Register scratch = ABINonArgReturnReg0;
   2938  masm.moveStackPtrTo(scratch);
   2939  masm.subFromStackPtr(Imm32(sizeof(intptr_t)));
   2940  masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
   2941  masm.storePtr(scratch, Address(masm.getStackPointer(), 0));
   2942 #endif
   2943 
   2944  if (ShadowStackSpace) {
   2945    masm.subFromStackPtr(Imm32(ShadowStackSpace));
   2946  }
   2947  masm.assertStackAlignment(ABIStackAlignment);
   2948  masm.call(SymbolicAddress::HandleDebugTrap);
   2949 
   2950  masm.branchIfFalseBool(ReturnReg, throwLabel);
   2951 
   2952  if (ShadowStackSpace) {
   2953    masm.addToStackPtr(Imm32(ShadowStackSpace));
   2954  }
   2955 #ifndef JS_CODEGEN_ARM64
   2956  masm.pop(scratch);
   2957  masm.moveToStackPtr(scratch);
   2958 #endif
   2959 
   2960  masm.setFramePushed(framePushed);
   2961 
   2962  GenerateExitEpilogue(masm, ExitReason::Fixed::DebugStub,
   2963                       /*switchToMainStack*/ true, offsets);
   2964 
   2965  return FinishOffsets(masm, offsets);
   2966 }
   2967 
   2968 static bool GenerateRequestTierUpStub(MacroAssembler& masm,
   2969                                      CallableOffsets* offsets) {
   2970  // This is similar to GenerateDebugStub.  As with that routine, all registers
   2971  // are saved, we call out to a C++ helper, then restore the registers.  The
   2972  // helper can't fail, though.
   2973  //
   2974  // On entry to (the code generated by) this routine, we expect the requesting
   2975  // instance pointer to be in InstanceReg, regardless of the platform.
   2976 
   2977  AutoCreatedBy acb(masm, "GenerateRequestTierUpStub");
   2978  AssertExpectedSP(masm);
   2979  masm.haltingAlign(CodeAlignment);
   2980  masm.setFramePushed(0);
   2981 
   2982  GenerateExitPrologue(masm, ExitReason::Fixed::RequestTierUp,
   2983                       /*switchToMainStack*/ false, 0, 0, offsets);
   2984 
   2985  uint32_t framePushed = masm.framePushed();
   2986 
   2987  // This method might be called with unaligned stack -- aligning and
   2988  // saving old stack pointer at the top.
   2989 #ifdef JS_CODEGEN_ARM64
   2990  // On ARM64 however the stack is always aligned.
   2991  static_assert(ABIStackAlignment == 16, "ARM64 SP alignment");
   2992 #else
   2993  Register scratch = ABINonArgReturnReg0;
   2994  masm.moveStackPtrTo(scratch);
   2995  masm.subFromStackPtr(Imm32(sizeof(intptr_t)));
   2996  masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
   2997  masm.storePtr(scratch, Address(masm.getStackPointer(), 0));
   2998 #endif
   2999 
   3000  if (ShadowStackSpace > 0) {
   3001    masm.subFromStackPtr(Imm32(ShadowStackSpace));
   3002  }
   3003  masm.assertStackAlignment(ABIStackAlignment);
   3004 
   3005  // Pass InstanceReg as the first (and only) arg to the C++ routine.  We
   3006  // expect that the only target to pass the first integer arg in memory is
   3007  // x86_32, and handle that specially.
   3008  ABIArgGenerator abi(ABIKind::System);
   3009  ABIArg arg = abi.next(MIRType::Pointer);
   3010 #ifndef JS_CODEGEN_X86
   3011  // The arg rides in a reg.
   3012  MOZ_RELEASE_ASSERT(arg.kind() == ABIArg::GPR);
   3013  masm.movePtr(InstanceReg, arg.gpr());
   3014 #else
   3015  // Ensure we don't need to consider ShadowStackSpace.
   3016  static_assert(ShadowStackSpace == 0);
   3017  // Ensure the ABIArgGenerator is consistent with the code generation
   3018  // assumptions we make here.
   3019  MOZ_RELEASE_ASSERT(arg.kind() == ABIArg::Stack &&
   3020                     arg.offsetFromArgBase() == 0);
   3021  // Get the arg on the stack without messing up the stack alignment.
   3022  masm.subFromStackPtr(Imm32(12));
   3023  masm.push(InstanceReg);
   3024 #endif
   3025 
   3026  masm.call(SymbolicAddress::HandleRequestTierUp);
   3027  // The call can't fail (meaning, if it does fail, we ignore that)
   3028 
   3029 #ifdef JS_CODEGEN_X86
   3030  // Remove the arg and padding we just pushed.
   3031  masm.addToStackPtr(Imm32(16));
   3032 #endif
   3033 
   3034  if (ShadowStackSpace > 0) {
   3035    masm.addToStackPtr(Imm32(ShadowStackSpace));
   3036  }
   3037 #ifndef JS_CODEGEN_ARM64
   3038  masm.pop(scratch);
   3039  masm.moveToStackPtr(scratch);
   3040 #endif
   3041 
   3042  masm.setFramePushed(framePushed);
   3043 
   3044  GenerateExitEpilogue(masm, ExitReason::Fixed::RequestTierUp,
   3045                       /*switchToMainStack*/ false, offsets);
   3046 
   3047  return FinishOffsets(masm, offsets);
   3048 }
   3049 
   3050 static bool GenerateUpdateCallRefMetricsStub(MacroAssembler& masm,
   3051                                             CallableOffsets* offsets) {
   3052  // This is a stub which is entirely self-contained -- it calls no other
   3053  // functions, cannot fail, and creates a minimal stack frame.  It can only
   3054  // use three registers, `regMetrics`, `regFuncRef` and `regScratch`, as set
   3055  // up below, and as described in BaseCompiler::updateCallRefMetrics.  All
   3056  // other registers must remain unchanged.  Also, we may read InstanceReg.
   3057  //
   3058  // `regMetrics` (the CallRefMetrics*) should satisfy
   3059  // CallRefMetrics::invariantsOK() both on entry to and exit from the code
   3060  // generated here.
   3061 
   3062  // `regMetrics` and `regFuncRef` are live at entry, but not `regScratch`.
   3063  const Register regMetrics = WasmCallRefCallScratchReg0;  // CallRefMetrics*
   3064  const Register regFuncRef = WasmCallRefCallScratchReg1;  // FuncExtended*
   3065  const Register regScratch = WasmCallRefCallScratchReg2;  // scratch
   3066 
   3067  // At entry to the stub, `regMetrics` points at the CallRefMetrics,
   3068  // `regFuncRef` points at the FunctionExtended, `regScratch` is available as
   3069  // scratch, `regFuncRef` is known to be non-null, and, if the target0/count0
   3070  // slot is in use, it is known not to match that slot.  The call may or may
   3071  // not be cross-instance.
   3072 
   3073  // Briefly, what we generate here is:
   3074  //
   3075  //   assert(regFuncRef is non-null)
   3076  //
   3077  //   if (regFuncRef is a cross instance call) {
   3078  //     regMetrics->countOther++;
   3079  //     return;
   3080  //   }
   3081  //
   3082  //   assert(regFuncRef != regMetrics->targets[0]);
   3083  //
   3084  //   for (i = 1; i < NUM_SLOTS; i++) {
   3085  //     if (regFuncRef == regMetrics->targets[i]) {
   3086  //       regMetrics->counts[i]++;
   3087  //       if (regMetrics->counts[i-1] <u regMetrics->counts[i]) {
   3088  //         // swap regMetrics->counts[i-1]/[i] and
   3089  //         regMetrics->targets[i-1]/[i]
   3090  //       }
   3091  //       return;
   3092  //     }
   3093  //   }
   3094  //
   3095  //   for (i = 0; i < NUM_SLOTS; i++) {
   3096  //     if (regMetrics->targets[i] is nullptr) {
   3097  //       regMetrics->targets[i] = regFuncRef;
   3098  //       regMetrics->counts[i] = 1;
   3099  //       return;
   3100  //     }
   3101  //   }
   3102  //
   3103  //   regMetrics->countsOther++;
   3104  //   return;
   3105  //
   3106  // And the loops are unrolled.
   3107 
   3108  // Frame setup and unwinding: we generate the absolute minimal frame setup
   3109  // (`push FP; FP := SP` / `pop FP; ret`).  There is no register save/restore
   3110  // in the frame.  The routine created here is a leaf and will neither trap
   3111  // nor invoke GC, so the unwindability requirements are minimal -- only the
   3112  // profiler will need to be able to unwind through it.
   3113 
   3114  // See declaration of CallRefMetrics for comments about assignments of
   3115  // funcrefs to `CallRefMetrics::targets[]` fields.
   3116 
   3117  AutoCreatedBy acb(masm, "GenerateUpdateCallRefMetricsStub");
   3118  Label ret;
   3119 
   3120  AssertExpectedSP(masm);
   3121  masm.haltingAlign(CodeAlignment);
   3122  masm.setFramePushed(0);
   3123 
   3124  GenerateMinimalPrologue(masm, &offsets->begin);
   3125 
   3126 #ifdef DEBUG
   3127  // Assertion: we know the target is non-null at entry, because the in-line
   3128  // code created by BaseCompiler::callRef handles that case.
   3129  // if (regFuncRef == nullptr) {
   3130  //   crash;
   3131  // }
   3132  Label after1;
   3133  masm.branchWasmAnyRefIsNull(/*isNull=*/false, regFuncRef, &after1);
   3134 
   3135  masm.breakpoint();
   3136 
   3137  masm.bind(&after1);
   3138 #endif
   3139 
   3140  // If it is a cross-instance call, add it to the `countOther` bin.
   3141  // regScratch = regFuncRef->instance;
   3142  // if (regScratch != thisInstance) {
   3143  //   regScratch = regMetrics->countOther;
   3144  //   regScratch++;
   3145  //   regMetrics->countOther = regScratch;
   3146  //   return;
   3147  // }
   3148  Label after2;
   3149  const size_t offsetOfInstanceSlot = FunctionExtended::offsetOfExtendedSlot(
   3150      FunctionExtended::WASM_INSTANCE_SLOT);
   3151  masm.loadPtr(Address(regFuncRef, offsetOfInstanceSlot), regScratch);
   3152  masm.branchPtr(Assembler::Equal, InstanceReg, regScratch, &after2);
   3153  //
   3154  const size_t offsetOfCountOther = CallRefMetrics::offsetOfCountOther();
   3155  masm.load32(Address(regMetrics, offsetOfCountOther), regScratch);
   3156  masm.add32(Imm32(1), regScratch);
   3157  masm.store32(regScratch, Address(regMetrics, offsetOfCountOther));
   3158  masm.jump(&ret);
   3159  //
   3160  masm.bind(&after2);
   3161 
   3162 #ifdef DEBUG
   3163  // Assertion: we know it can't be a hit at slot zero, because the inline code
   3164  // also handles that case.
   3165  // regScratch = regMetrics->targets[0];
   3166  // if (regScratch == regFuncRef) {
   3167  //   crash;
   3168  // }
   3169  Label after3;
   3170  const size_t offsetOfTarget0 = CallRefMetrics::offsetOfTarget(0);
   3171  masm.loadPtr(Address(regMetrics, offsetOfTarget0), regScratch);
   3172  masm.branchPtr(Assembler::NotEqual, regScratch, regFuncRef, &after3);
   3173 
   3174  masm.breakpoint();
   3175 
   3176  masm.bind(&after3);
   3177 #endif
   3178 
   3179  // If it matches slot one, increment count, swap with slot zero if needed
   3180  // regScratch = regMetrics->targets[1];
   3181  // if (regFuncRef == regScratch) {
   3182  //   // We need a second temp register (regScratch being the first).
   3183  //   // We no longer need regFuncRef so use that as the second temp.
   3184  //   regScratch = regMetrics->counts[0];
   3185  //   regFuncRef = regMetrics->counts[1];
   3186  //   regFuncRef++;
   3187  //   regMetrics->counts[1] = regFuncRef;
   3188  //   if (regScratch <u regFuncRef) {
   3189  //     // regScratch and regFuncRef
   3190  //     // are regMetrics->counts[0] and [1] respectively
   3191  //     regMetrics->counts[0] = regFuncRef;
   3192  //     regMetrics->counts[1] = regScratch;
   3193  //     regScratch = regMetrics->targets[0];
   3194  //     regFuncRef = regMetrics->targets[1];
   3195  //     regMetrics->targets[0] = regFuncRef;
   3196  //     regMetrics->targets[1] = regScratch;
   3197  //   }
   3198  //   return;
   3199  // }
   3200  // and the same for slots 2, 3, 4, etc
   3201  for (size_t i = 1; i < CallRefMetrics::NUM_SLOTS; i++) {
   3202    Label after4;
   3203    masm.loadPtr(Address(regMetrics, CallRefMetrics::offsetOfTarget(i)),
   3204                 regScratch);
   3205    masm.branchPtr(Assembler::NotEqual, regFuncRef, regScratch, &after4);
   3206 
   3207    masm.load32(Address(regMetrics, CallRefMetrics::offsetOfCount(i - 1)),
   3208                regScratch);
   3209    masm.load32(Address(regMetrics, CallRefMetrics::offsetOfCount(i)),
   3210                regFuncRef);
   3211    masm.add32(Imm32(1), regFuncRef);
   3212    masm.store32(regFuncRef,
   3213                 Address(regMetrics, CallRefMetrics::offsetOfCount(i)));
   3214    masm.branch32(Assembler::AboveOrEqual, regScratch, regFuncRef, &ret);
   3215 
   3216    masm.store32(regFuncRef,
   3217                 Address(regMetrics, CallRefMetrics::offsetOfCount(i - 1)));
   3218    masm.store32(regScratch,
   3219                 Address(regMetrics, CallRefMetrics::offsetOfCount(i)));
   3220    masm.loadPtr(Address(regMetrics, CallRefMetrics::offsetOfTarget(i - 1)),
   3221                 regScratch);
   3222    masm.loadPtr(Address(regMetrics, CallRefMetrics::offsetOfTarget(i)),
   3223                 regFuncRef);
   3224    masm.storePtr(regFuncRef,
   3225                  Address(regMetrics, CallRefMetrics::offsetOfTarget(i - 1)));
   3226    masm.storePtr(regScratch,
   3227                  Address(regMetrics, CallRefMetrics::offsetOfTarget(i)));
   3228    masm.jump(&ret);
   3229 
   3230    masm.bind(&after4);
   3231  }
   3232 
   3233  // Not found.  Use the first unused slot, if available.  This assumes that T
   3234  // is non-null; but that is assured us on entry (and asserted above).  See
   3235  // CallRefMetrics::invariantsOK.
   3236  // if (regMetrics->targets[0] == nullptr) {
   3237  //   regMetrics->targets[0] = regFuncRef;
   3238  //   regMetrics->counts[0] = 1;
   3239  //   return;
   3240  // }
   3241  // and the same for slots 1, 2, 3, 4, etc
   3242  for (size_t i = 0; i < CallRefMetrics::NUM_SLOTS; i++) {
   3243    Label after5;
   3244    masm.loadPtr(Address(regMetrics, CallRefMetrics::offsetOfTarget(i)),
   3245                 regScratch);
   3246    masm.branchWasmAnyRefIsNull(/*isNull=*/false, regScratch, &after5);
   3247 
   3248    masm.storePtr(regFuncRef,
   3249                  Address(regMetrics, CallRefMetrics::offsetOfTarget(i)));
   3250    masm.store32(Imm32(1),
   3251                 Address(regMetrics, CallRefMetrics::offsetOfCount(i)));
   3252    masm.jump(&ret);
   3253 
   3254    masm.bind(&after5);
   3255  }
   3256 
   3257  // Not found, and we don't have a slot with which to track this new target
   3258  // individually.  Instead just increment the "all others" bin.
   3259  // regScratch = regMetrics->countOther;
   3260  // regScratch++;
   3261  // regMetrics->countOther = regScratch;
   3262  // return;
   3263  masm.load32(Address(regMetrics, CallRefMetrics::offsetOfCountOther()),
   3264              regScratch);
   3265  masm.add32(Imm32(1), regScratch);
   3266  masm.store32(regScratch,
   3267               Address(regMetrics, CallRefMetrics::offsetOfCountOther()));
   3268 
   3269  masm.bind(&ret);
   3270 
   3271  MOZ_ASSERT(masm.framePushed() == 0);
   3272  GenerateMinimalEpilogue(masm, &offsets->ret);
   3273 
   3274  return FinishOffsets(masm, offsets);
   3275 }
   3276 
   3277 bool wasm::GenerateEntryStubs(const CodeMetadata& codeMeta,
   3278                              const FuncExportVector& exports,
   3279                              CompiledCode* code) {
   3280  LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE, js::MallocArena);
   3281  TempAllocator alloc(&lifo);
   3282  JitContext jcx;
   3283  WasmMacroAssembler masm(alloc);
   3284  AutoCreatedBy acb(masm, "wasm::GenerateEntryStubs");
   3285 
   3286  // Swap in already-allocated empty vectors to avoid malloc/free.
   3287  if (!code->swap(masm)) {
   3288    return false;
   3289  }
   3290 
   3291  JitSpew(JitSpew_Codegen, "# Emitting wasm export stubs");
   3292 
   3293  Maybe<ImmPtr> noAbsolute;
   3294  for (size_t i = 0; i < exports.length(); i++) {
   3295    const FuncExport& fe = exports[i];
   3296    const FuncType& funcType = codeMeta.getFuncType(fe.funcIndex());
   3297    if (!fe.hasEagerStubs()) {
   3298      continue;
   3299    }
   3300    if (!GenerateEntryStubs(masm, i, fe, funcType, noAbsolute,
   3301                            codeMeta.isAsmJS(), &code->codeRanges)) {
   3302      return false;
   3303    }
   3304  }
   3305 
   3306  masm.finish();
   3307  if (masm.oom()) {
   3308    return false;
   3309  }
   3310 
   3311  return code->swap(masm);
   3312 }
   3313 
   3314 bool wasm::GenerateEntryStubs(MacroAssembler& masm, size_t funcExportIndex,
   3315                              const FuncExport& fe, const FuncType& funcType,
   3316                              const Maybe<ImmPtr>& callee, bool isAsmJS,
   3317                              CodeRangeVector* codeRanges) {
   3318  MOZ_ASSERT(!callee == fe.hasEagerStubs());
   3319  MOZ_ASSERT_IF(isAsmJS, fe.hasEagerStubs());
   3320 
   3321  Offsets offsets;
   3322  if (!GenerateInterpEntry(masm, fe, funcType, callee, &offsets)) {
   3323    return false;
   3324  }
   3325  if (!codeRanges->emplaceBack(CodeRange::InterpEntry, fe.funcIndex(),
   3326                               offsets)) {
   3327    return false;
   3328  }
   3329 
   3330  if (isAsmJS || !funcType.canHaveJitEntry()) {
   3331    return true;
   3332  }
   3333 
   3334  CallableOffsets jitOffsets;
   3335  if (!GenerateJitEntry(masm, funcExportIndex, fe, funcType, callee,
   3336                        &jitOffsets)) {
   3337    return false;
   3338  }
   3339  return codeRanges->emplaceBack(CodeRange::JitEntry, fe.funcIndex(),
   3340                                 jitOffsets);
   3341 }
   3342 
   3343 bool wasm::GenerateProvisionalLazyJitEntryStub(MacroAssembler& masm,
   3344                                               Offsets* offsets) {
   3345  AssertExpectedSP(masm);
   3346  masm.setFramePushed(0);
   3347  offsets->begin = masm.currentOffset();
   3348 
   3349 #ifdef JS_CODEGEN_ARM64
   3350  // Unaligned ABI calls require SP+PSP, but our mode here is SP-only
   3351  masm.SetStackPointer64(PseudoStackPointer64);
   3352  masm.Mov(PseudoStackPointer64, sp);
   3353 #endif
   3354 
   3355 #ifdef JS_USE_LINK_REGISTER
   3356  masm.pushReturnAddress();
   3357 #endif
   3358 
   3359  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
   3360  Register temp = regs.takeAny();
   3361 
   3362  using Fn = void* (*)();
   3363  masm.setupUnalignedABICall(temp);
   3364  masm.callWithABI<Fn, GetContextSensitiveInterpreterStub>(
   3365      ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
   3366 
   3367 #ifdef JS_USE_LINK_REGISTER
   3368  masm.popReturnAddress();
   3369 #endif
   3370 
   3371  masm.jump(ReturnReg);
   3372 
   3373 #ifdef JS_CODEGEN_ARM64
   3374  // Undo the SP+PSP mode
   3375  masm.SetStackPointer64(sp);
   3376 #endif
   3377 
   3378  return FinishOffsets(masm, offsets);
   3379 }
   3380 
   3381 bool wasm::GenerateStubs(const CodeMetadata& codeMeta,
   3382                         const FuncImportVector& imports,
   3383                         const FuncExportVector& exports, CompiledCode* code) {
   3384  LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE, js::MallocArena);
   3385  TempAllocator alloc(&lifo);
   3386  JitContext jcx;
   3387  WasmMacroAssembler masm(alloc);
   3388  AutoCreatedBy acb(masm, "wasm::GenerateStubs");
   3389 
   3390  // Swap in already-allocated empty vectors to avoid malloc/free.
   3391  if (!code->swap(masm)) {
   3392    return false;
   3393  }
   3394 
   3395  Label throwLabel;
   3396 
   3397  JitSpew(JitSpew_Codegen, "# Emitting wasm import stubs");
   3398 
   3399  for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) {
   3400    const FuncImport& fi = imports[funcIndex];
   3401    const FuncType& funcType = codeMeta.getFuncType(funcIndex);
   3402 
   3403    CallIndirectId callIndirectId =
   3404        CallIndirectId::forFunc(codeMeta, funcIndex);
   3405 
   3406    FuncOffsets wrapperOffsets;
   3407    if (!GenerateImportFunction(
   3408            masm, codeMeta.offsetOfFuncImportInstanceData(funcIndex), funcType,
   3409            callIndirectId, &wrapperOffsets, &code->stackMaps)) {
   3410      return false;
   3411    }
   3412    if (!code->codeRanges.emplaceBack(funcIndex, wrapperOffsets,
   3413                                      /* hasUnwindInfo = */ false)) {
   3414      return false;
   3415    }
   3416 
   3417    CallableOffsets interpOffsets;
   3418    if (!GenerateImportInterpExit(masm, fi, funcType, funcIndex, &throwLabel,
   3419                                  &interpOffsets)) {
   3420      return false;
   3421    }
   3422    if (!code->codeRanges.emplaceBack(CodeRange::ImportInterpExit, funcIndex,
   3423                                      interpOffsets)) {
   3424      return false;
   3425    }
   3426 
   3427    // Skip if the function does not have a signature that allows for a JIT
   3428    // exit.
   3429    if (!funcType.canHaveJitExit()) {
   3430      continue;
   3431    }
   3432 
   3433    ImportOffsets jitOffsets;
   3434    if (!GenerateImportJitExit(
   3435            masm, codeMeta.offsetOfFuncImportInstanceData(funcIndex), funcType,
   3436            funcIndex, interpOffsets.begin, &throwLabel, &jitOffsets)) {
   3437      return false;
   3438    }
   3439    if (!code->codeRanges.emplaceBack(CodeRange::ImportJitExit, funcIndex,
   3440                                      jitOffsets)) {
   3441      return false;
   3442    }
   3443  }
   3444 
   3445  JitSpew(JitSpew_Codegen, "# Emitting wasm entry stubs");
   3446 
   3447  Maybe<ImmPtr> noAbsolute;
   3448  for (size_t i = 0; i < exports.length(); i++) {
   3449    const FuncExport& fe = exports[i];
   3450    const FuncType& funcType = codeMeta.getFuncType(fe.funcIndex());
   3451    if (!fe.hasEagerStubs()) {
   3452      continue;
   3453    }
   3454    if (!GenerateEntryStubs(masm, i, fe, funcType, noAbsolute,
   3455                            codeMeta.isAsmJS(), &code->codeRanges)) {
   3456      return false;
   3457    }
   3458  }
   3459 
   3460  JitSpew(JitSpew_Codegen, "# Emitting wasm trap, debug and throw stubs");
   3461 
   3462  Offsets offsets;
   3463 
   3464  if (!GenerateTrapExit(masm, &throwLabel, &offsets)) {
   3465    return false;
   3466  }
   3467  if (!code->codeRanges.emplaceBack(CodeRange::TrapExit, offsets)) {
   3468    return false;
   3469  }
   3470 
   3471  CallableOffsets callableOffsets;
   3472  if (!GenerateDebugStub(masm, &throwLabel, &callableOffsets)) {
   3473    return false;
   3474  }
   3475  if (!code->codeRanges.emplaceBack(CodeRange::DebugStub, callableOffsets)) {
   3476    return false;
   3477  }
   3478 
   3479  if (!GenerateRequestTierUpStub(masm, &callableOffsets)) {
   3480    return false;
   3481  }
   3482  if (!code->codeRanges.emplaceBack(CodeRange::RequestTierUpStub,
   3483                                    callableOffsets)) {
   3484    return false;
   3485  }
   3486 
   3487  if (!GenerateUpdateCallRefMetricsStub(masm, &callableOffsets)) {
   3488    return false;
   3489  }
   3490  if (!code->codeRanges.emplaceBack(CodeRange::UpdateCallRefMetricsStub,
   3491                                    callableOffsets)) {
   3492    return false;
   3493  }
   3494 
   3495  if (!GenerateThrowStub(masm, &throwLabel, &offsets)) {
   3496    return false;
   3497  }
   3498  if (!code->codeRanges.emplaceBack(CodeRange::Throw, offsets)) {
   3499    return false;
   3500  }
   3501 
   3502  masm.finish();
   3503  if (masm.oom()) {
   3504    return false;
   3505  }
   3506 
   3507  return code->swap(masm);
   3508 }