tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

TypedArrayObject-inl.h (24477B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef vm_TypedArrayObject_inl_h
      8 #define vm_TypedArrayObject_inl_h
      9 
     10 /* Utilities and common inline code for TypedArray */
     11 
     12 #include "vm/TypedArrayObject.h"
     13 
     14 #include "mozilla/Assertions.h"
     15 #include "mozilla/Compiler.h"
     16 
     17 #include <algorithm>
     18 #include <type_traits>
     19 
     20 #include "jsnum.h"
     21 
     22 #include "gc/Zone.h"
     23 #include "jit/AtomicOperations.h"
     24 #include "js/Conversions.h"
     25 #include "js/ScalarType.h"  // js::Scalar::Type
     26 #include "js/Value.h"
     27 #include "util/DifferentialTesting.h"
     28 #include "util/Memory.h"
     29 #include "vm/ArrayObject.h"
     30 #include "vm/BigIntType.h"
     31 #include "vm/Float16.h"
     32 #include "vm/NativeObject.h"
     33 #include "vm/Uint8Clamped.h"
     34 
     35 #include "gc/ObjectKind-inl.h"
     36 #include "vm/NativeObject-inl.h"
     37 #include "vm/ObjectOperations-inl.h"
     38 
     39 namespace js {
     40 
     41 // Use static_assert in compilers which support CWG2518. In all other cases
     42 // fall back to MOZ_CRASH.
     43 //
     44 // https://cplusplus.github.io/CWG/issues/2518.html
     45 #if defined(__clang__)
     46 #  define STATIC_ASSERT_IN_UNEVALUATED_CONTEXT 1
     47 #elif MOZ_IS_GCC
     48 #  if MOZ_GCC_VERSION_AT_LEAST(13, 1, 0)
     49 #    define STATIC_ASSERT_IN_UNEVALUATED_CONTEXT 1
     50 #  else
     51 #    define STATIC_ASSERT_IN_UNEVALUATED_CONTEXT 0
     52 #  endif
     53 #else
     54 #  define STATIC_ASSERT_IN_UNEVALUATED_CONTEXT 0
     55 #endif
     56 
     57 template <typename T>
     58 inline auto ToFloatingPoint(T value) {
     59  static_assert(!std::numeric_limits<T>::is_integer);
     60 
     61  if constexpr (std::is_floating_point_v<T>) {
     62    return value;
     63  } else {
     64    return static_cast<double>(value);
     65  }
     66 }
     67 
     68 template <typename To, typename From>
     69 inline To ConvertNumber(From src) {
     70  if constexpr (!std::numeric_limits<From>::is_integer) {
     71    if constexpr (std::is_same_v<From, To>) {
     72      return src;
     73    } else if constexpr (!std::numeric_limits<To>::is_integer) {
     74      return static_cast<To>(ToFloatingPoint(src));
     75    } else if constexpr (std::is_same_v<int8_t, To>) {
     76      return JS::ToInt8(ToFloatingPoint(src));
     77    } else if constexpr (std::is_same_v<uint8_t, To>) {
     78      return JS::ToUint8(ToFloatingPoint(src));
     79    } else if constexpr (std::is_same_v<uint8_clamped, To>) {
     80      return uint8_clamped(ToFloatingPoint(src));
     81    } else if constexpr (std::is_same_v<int16_t, To>) {
     82      return JS::ToInt16(ToFloatingPoint(src));
     83    } else if constexpr (std::is_same_v<uint16_t, To>) {
     84      return JS::ToUint16(ToFloatingPoint(src));
     85    } else if constexpr (std::is_same_v<int32_t, To>) {
     86      return JS::ToInt32(ToFloatingPoint(src));
     87    } else if constexpr (std::is_same_v<uint32_t, To>) {
     88      return JS::ToUint32(ToFloatingPoint(src));
     89    } else {
     90 #if STATIC_ASSERT_IN_UNEVALUATED_CONTEXT
     91      static_assert(false,
     92                    "conversion from floating point to int should have been "
     93                    "handled by specializations above");
     94 #else
     95      MOZ_CRASH(
     96          "conversion from floating point to int should have been "
     97          "handled by specializations above");
     98 #endif
     99    }
    100  } else {
    101    return static_cast<To>(src);
    102  }
    103 }
    104 
    105 #undef STATIC_ASSERT_IN_UNEVALUATED_CONTEXT
    106 
    107 template <typename NativeType>
    108 struct TypeIDOfType;
    109 template <>
    110 struct TypeIDOfType<int8_t> {
    111  static const Scalar::Type id = Scalar::Int8;
    112  static const JSProtoKey protoKey = JSProto_Int8Array;
    113 };
    114 template <>
    115 struct TypeIDOfType<uint8_t> {
    116  static const Scalar::Type id = Scalar::Uint8;
    117  static const JSProtoKey protoKey = JSProto_Uint8Array;
    118 };
    119 template <>
    120 struct TypeIDOfType<int16_t> {
    121  static const Scalar::Type id = Scalar::Int16;
    122  static const JSProtoKey protoKey = JSProto_Int16Array;
    123 };
    124 template <>
    125 struct TypeIDOfType<uint16_t> {
    126  static const Scalar::Type id = Scalar::Uint16;
    127  static const JSProtoKey protoKey = JSProto_Uint16Array;
    128 };
    129 template <>
    130 struct TypeIDOfType<int32_t> {
    131  static const Scalar::Type id = Scalar::Int32;
    132  static const JSProtoKey protoKey = JSProto_Int32Array;
    133 };
    134 template <>
    135 struct TypeIDOfType<uint32_t> {
    136  static const Scalar::Type id = Scalar::Uint32;
    137  static const JSProtoKey protoKey = JSProto_Uint32Array;
    138 };
    139 template <>
    140 struct TypeIDOfType<int64_t> {
    141  static const Scalar::Type id = Scalar::BigInt64;
    142  static const JSProtoKey protoKey = JSProto_BigInt64Array;
    143 };
    144 template <>
    145 struct TypeIDOfType<uint64_t> {
    146  static const Scalar::Type id = Scalar::BigUint64;
    147  static const JSProtoKey protoKey = JSProto_BigUint64Array;
    148 };
    149 template <>
    150 struct TypeIDOfType<float16> {
    151  static const Scalar::Type id = Scalar::Float16;
    152  static const JSProtoKey protoKey = JSProto_Float16Array;
    153 };
    154 template <>
    155 struct TypeIDOfType<float> {
    156  static const Scalar::Type id = Scalar::Float32;
    157  static const JSProtoKey protoKey = JSProto_Float32Array;
    158 };
    159 template <>
    160 struct TypeIDOfType<double> {
    161  static const Scalar::Type id = Scalar::Float64;
    162  static const JSProtoKey protoKey = JSProto_Float64Array;
    163 };
    164 template <>
    165 struct TypeIDOfType<uint8_clamped> {
    166  static const Scalar::Type id = Scalar::Uint8Clamped;
    167  static const JSProtoKey protoKey = JSProto_Uint8ClampedArray;
    168 };
    169 
    170 class SharedOps {
    171 public:
    172  template <typename T>
    173  static T load(SharedMem<T*> addr) {
    174    return js::jit::AtomicOperations::loadSafeWhenRacy(addr);
    175  }
    176 
    177  template <typename T>
    178  static void store(SharedMem<T*> addr, T value) {
    179    js::jit::AtomicOperations::storeSafeWhenRacy(addr, value);
    180  }
    181 
    182  template <typename T>
    183  static void memcpy(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
    184    js::jit::AtomicOperations::memcpySafeWhenRacy(dest, src, size);
    185  }
    186 
    187  template <typename T>
    188  static void memmove(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
    189    js::jit::AtomicOperations::memmoveSafeWhenRacy(dest, src, size);
    190  }
    191 
    192  template <typename T>
    193  static void podCopy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
    194    js::jit::AtomicOperations::podCopySafeWhenRacy(dest, src, nelem);
    195  }
    196 
    197  template <typename T>
    198  static void podMove(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
    199    js::jit::AtomicOperations::podMoveSafeWhenRacy(dest, src, nelem);
    200  }
    201 
    202  static SharedMem<void*> extract(TypedArrayObject* obj) {
    203    return obj->dataPointerEither();
    204  }
    205 };
    206 
    207 class UnsharedOps {
    208 public:
    209  template <typename T>
    210  static T load(SharedMem<T*> addr) {
    211    return *addr.unwrapUnshared();
    212  }
    213 
    214  template <typename T>
    215  static void store(SharedMem<T*> addr, T value) {
    216    *addr.unwrapUnshared() = value;
    217  }
    218 
    219  template <typename T>
    220  static void memcpy(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
    221    ::memcpy(dest.unwrapUnshared(), src.unwrapUnshared(), size);
    222  }
    223 
    224  template <typename T>
    225  static void memmove(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
    226    ::memmove(dest.unwrapUnshared(), src.unwrapUnshared(), size);
    227  }
    228 
    229  template <typename T>
    230  static void podCopy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
    231    // std::copy_n better matches the argument values/types of this
    232    // function, but as noted below it allows the input/output ranges to
    233    // overlap.  std::copy does not, so use it so the compiler has extra
    234    // ability to optimize.
    235    const auto* first = src.unwrapUnshared();
    236    const auto* last = first + nelem;
    237    auto* result = dest.unwrapUnshared();
    238    std::copy(first, last, result);
    239  }
    240 
    241  template <typename T>
    242  static void podMove(SharedMem<T*> dest, SharedMem<T*> src, size_t n) {
    243    // std::copy_n copies from |src| to |dest| starting from |src|, so
    244    // input/output ranges *may* permissibly overlap, as this function
    245    // allows.
    246    const auto* start = src.unwrapUnshared();
    247    auto* result = dest.unwrapUnshared();
    248    std::copy_n(start, n, result);
    249  }
    250 
    251  static SharedMem<void*> extract(TypedArrayObject* obj) {
    252    return SharedMem<void*>::unshared(obj->dataPointerUnshared());
    253  }
    254 };
    255 
    256 template <typename T, typename Ops>
    257 class ElementSpecific {
    258  static constexpr bool canUseBitwiseCopy(Scalar::Type sourceType) {
    259    return CanUseBitwiseCopy(TypeIDOfType<T>::id, sourceType);
    260  }
    261 
    262  template <typename From>
    263  static inline constexpr bool canCopyBitwise =
    264      canUseBitwiseCopy(TypeIDOfType<From>::id);
    265 
    266  template <typename From, typename LoadOps = Ops>
    267  static typename std::enable_if_t<!canCopyBitwise<From>> store(
    268      SharedMem<T*> dest, SharedMem<void*> data, size_t count, size_t offset) {
    269    SharedMem<From*> src = data.cast<From*>() + offset;
    270    for (size_t i = 0; i < count; ++i) {
    271      Ops::store(dest++, ConvertNumber<T>(LoadOps::load(src++)));
    272    }
    273  }
    274 
    275  template <typename From, typename LoadOps = Ops>
    276  static typename std::enable_if_t<canCopyBitwise<From>> store(
    277      SharedMem<T*> dest, SharedMem<void*> data, size_t count, size_t offset) {
    278    MOZ_ASSERT_UNREACHABLE("caller handles bitwise copies");
    279  }
    280 
    281  template <typename LoadOps = Ops, typename U = T>
    282  static typename std::enable_if_t<!std::is_same_v<U, int64_t> &&
    283                                   !std::is_same_v<U, uint64_t>>
    284  storeTo(SharedMem<T*> dest, Scalar::Type type, SharedMem<void*> data,
    285          size_t count, size_t offset) {
    286    static_assert(std::is_same_v<T, U>,
    287                  "template parameter U only used to disable this declaration");
    288    switch (type) {
    289      case Scalar::Int8: {
    290        store<int8_t, LoadOps>(dest, data, count, offset);
    291        break;
    292      }
    293      case Scalar::Uint8:
    294      case Scalar::Uint8Clamped: {
    295        store<uint8_t, LoadOps>(dest, data, count, offset);
    296        break;
    297      }
    298      case Scalar::Int16: {
    299        store<int16_t, LoadOps>(dest, data, count, offset);
    300        break;
    301      }
    302      case Scalar::Uint16: {
    303        store<uint16_t, LoadOps>(dest, data, count, offset);
    304        break;
    305      }
    306      case Scalar::Int32: {
    307        store<int32_t, LoadOps>(dest, data, count, offset);
    308        break;
    309      }
    310      case Scalar::Uint32: {
    311        store<uint32_t, LoadOps>(dest, data, count, offset);
    312        break;
    313      }
    314      case Scalar::Float16: {
    315        store<float16, LoadOps>(dest, data, count, offset);
    316        break;
    317      }
    318      case Scalar::Float32: {
    319        store<float, LoadOps>(dest, data, count, offset);
    320        break;
    321      }
    322      case Scalar::Float64: {
    323        store<double, LoadOps>(dest, data, count, offset);
    324        break;
    325      }
    326      case Scalar::BigInt64:
    327      case Scalar::BigUint64:
    328        MOZ_FALLTHROUGH_ASSERT("unexpected int64/uint64 typed array");
    329      default:
    330        MOZ_CRASH("setFromTypedArray with a typed array with bogus type");
    331    }
    332  }
    333 
    334  template <typename LoadOps = Ops, typename U = T>
    335  static typename std::enable_if_t<std::is_same_v<U, int64_t> ||
    336                                   std::is_same_v<U, uint64_t>>
    337  storeTo(SharedMem<T*> dest, Scalar::Type type, SharedMem<void*> data,
    338          size_t count, size_t offset) {
    339    static_assert(std::is_same_v<T, U>,
    340                  "template parameter U only used to disable this declaration");
    341    MOZ_ASSERT_UNREACHABLE("caller handles int64<>uint64 bitwise copies");
    342  }
    343 
    344 public:
    345  /*
    346   * Copy |source|'s elements into |target|, starting at |target[offset]| from
    347   * |source[sourceOffset]|.
    348   *
    349   * Act as if the assignments occurred from a fresh copy of |source|, in
    350   * case the two memory ranges overlap.
    351   */
    352  static bool setFromTypedArray(TypedArrayObject* target, size_t targetLength,
    353                                TypedArrayObject* source, size_t sourceLength,
    354                                size_t offset, size_t sourceOffset = 0) {
    355    // WARNING: |source| may be an unwrapped typed array from a different
    356    // compartment. Proceed with caution!
    357 
    358    MOZ_ASSERT(TypeIDOfType<T>::id == target->type(),
    359               "calling wrong setFromTypedArray specialization");
    360    MOZ_ASSERT(Scalar::isBigIntType(target->type()) ==
    361                   Scalar::isBigIntType(source->type()),
    362               "can't convert between BigInt and Number");
    363    MOZ_ASSERT(!target->is<ImmutableTypedArrayObject>(),
    364               "target is not an immutable typed array");
    365    MOZ_ASSERT(!target->hasDetachedBuffer(), "target isn't detached");
    366    MOZ_ASSERT(!source->hasDetachedBuffer(), "source isn't detached");
    367    MOZ_ASSERT(*target->length() >= targetLength, "target isn't shrunk");
    368    MOZ_ASSERT(*source->length() >= sourceOffset + sourceLength,
    369               "source isn't shrunk");
    370 
    371    MOZ_ASSERT(offset <= targetLength);
    372    MOZ_ASSERT(sourceLength <= targetLength - offset);
    373 
    374    // Return early when copying no elements.
    375    //
    376    // Note: `SharedMem::cast` asserts the memory is properly aligned. Non-zero
    377    // memory is correctly aligned, this is statically asserted below. Zero
    378    // memory can have a different alignment, so we have to return early.
    379    if (sourceLength == 0) {
    380      return true;
    381    }
    382 
    383    if (TypedArrayObject::sameBuffer(target, source)) {
    384      return setFromOverlappingTypedArray(target, targetLength, source,
    385                                          sourceLength, offset, sourceOffset);
    386    }
    387 
    388    // `malloc` returns memory at least as strictly aligned as for max_align_t
    389    // and the alignment of max_align_t is a multiple of the size of `T`,
    390    // so `SharedMem::cast` will be called with properly aligned memory.
    391    static_assert(alignof(std::max_align_t) % sizeof(T) == 0);
    392 
    393    SharedMem<T*> dest = Ops::extract(target).template cast<T*>() + offset;
    394    SharedMem<void*> data = Ops::extract(source);
    395 
    396    if (canUseBitwiseCopy(source->type())) {
    397      Ops::podCopy(dest, data.template cast<T*>() + sourceOffset, sourceLength);
    398    } else {
    399      storeTo(dest, source->type(), data, sourceLength, sourceOffset);
    400    }
    401 
    402    return true;
    403  }
    404 
    405  /*
    406   * Copy |source[0]| to |source[len]| (exclusive) elements into the typed
    407   * array |target|, starting at index |offset|.  |source| must not be a
    408   * typed array.
    409   */
    410  static bool setFromNonTypedArray(JSContext* cx,
    411                                   Handle<TypedArrayObject*> target,
    412                                   HandleObject source, size_t len,
    413                                   size_t offset = 0) {
    414    MOZ_ASSERT(target->type() == TypeIDOfType<T>::id,
    415               "target type and NativeType must match");
    416    MOZ_ASSERT(!target->is<ImmutableTypedArrayObject>(),
    417               "target is not an immutable typed array");
    418    MOZ_ASSERT(!source->is<TypedArrayObject>(),
    419               "use setFromTypedArray instead of this method");
    420    MOZ_ASSERT_IF(target->hasDetachedBuffer(), target->length().isNothing());
    421 
    422    size_t i = 0;
    423    if (source->is<NativeObject>()) {
    424      size_t targetLength = target->length().valueOr(0);
    425      if (offset <= targetLength && len <= targetLength - offset) {
    426        // Attempt fast-path infallible conversion of dense elements up to
    427        // the first potentially side-effectful lookup or conversion.
    428        size_t bound = std::min<size_t>(
    429            source->as<NativeObject>().getDenseInitializedLength(), len);
    430 
    431        SharedMem<T*> dest = Ops::extract(target).template cast<T*>() + offset;
    432 
    433        MOZ_ASSERT(!canConvertInfallibly(MagicValue(JS_ELEMENTS_HOLE)),
    434                   "the following loop must abort on holes");
    435 
    436        const Value* srcValues = source->as<NativeObject>().getDenseElements();
    437        for (; i < bound; i++) {
    438          if (!canConvertInfallibly(srcValues[i])) {
    439            break;
    440          }
    441          Ops::store(dest + i, infallibleValueToNative(srcValues[i]));
    442        }
    443        if (i == len) {
    444          return true;
    445        }
    446      }
    447    }
    448 
    449    // Convert and copy any remaining elements generically.
    450    RootedValue v(cx);
    451    for (; i < len; i++) {
    452      if constexpr (sizeof(i) == sizeof(uint32_t)) {
    453        if (!GetElement(cx, source, source, uint32_t(i), &v)) {
    454          return false;
    455        }
    456      } else {
    457        if (!GetElementLargeIndex(cx, source, source, i, &v)) {
    458          return false;
    459        }
    460      }
    461 
    462      T n;
    463      if (!valueToNative(cx, v, &n)) {
    464        return false;
    465      }
    466 
    467      // Ignore out-of-bounds writes, but still execute getElement/valueToNative
    468      // because of observable side-effects.
    469      if (offset + i >= target->length().valueOr(0)) {
    470        continue;
    471      }
    472 
    473      MOZ_ASSERT(!target->hasDetachedBuffer());
    474 
    475      // Compute every iteration in case getElement/valueToNative
    476      // detaches the underlying array buffer or GC moves the data.
    477      SharedMem<T*> dest =
    478          Ops::extract(target).template cast<T*>() + offset + i;
    479      Ops::store(dest, n);
    480    }
    481 
    482    return true;
    483  }
    484 
    485  /*
    486   * Copy |source| into the typed array |target|.
    487   */
    488  static bool initFromIterablePackedArray(
    489      JSContext* cx, Handle<FixedLengthTypedArrayObject*> target,
    490      Handle<ArrayObject*> source) {
    491    MOZ_ASSERT(target->type() == TypeIDOfType<T>::id,
    492               "target type and NativeType must match");
    493    MOZ_ASSERT(!target->hasDetachedBuffer(), "target isn't detached");
    494    MOZ_ASSERT(IsPackedArray(source), "source array must be packed");
    495    MOZ_ASSERT(source->getDenseInitializedLength() <= target->length());
    496 
    497    size_t len = source->getDenseInitializedLength();
    498    size_t i = 0;
    499 
    500    // Attempt fast-path infallible conversion of dense elements up to the
    501    // first potentially side-effectful conversion.
    502 
    503    SharedMem<T*> dest = Ops::extract(target).template cast<T*>();
    504 
    505    const Value* srcValues = source->getDenseElements();
    506    for (; i < len; i++) {
    507      if (!canConvertInfallibly(srcValues[i])) {
    508        break;
    509      }
    510      Ops::store(dest + i, infallibleValueToNative(srcValues[i]));
    511    }
    512    if (i == len) {
    513      return true;
    514    }
    515 
    516    // Convert any remaining elements by first collecting them into a
    517    // temporary list, and then copying them into the typed array.
    518    RootedValueVector values(cx);
    519    if (!values.append(srcValues + i, len - i)) {
    520      return false;
    521    }
    522 
    523    RootedValue v(cx);
    524    for (size_t j = 0; j < values.length(); i++, j++) {
    525      v = values[j];
    526 
    527      T n;
    528      if (!valueToNative(cx, v, &n)) {
    529        return false;
    530      }
    531 
    532      // |target| is a newly allocated typed array and not yet visible to
    533      // content script, so valueToNative can't detach the underlying
    534      // buffer.
    535      MOZ_ASSERT(i < target->length());
    536 
    537      // Compute every iteration in case GC moves the data.
    538      SharedMem<T*> newDest = Ops::extract(target).template cast<T*>();
    539      Ops::store(newDest + i, n);
    540    }
    541 
    542    return true;
    543  }
    544 
    545 private:
    546  static bool setFromOverlappingTypedArray(TypedArrayObject* target,
    547                                           size_t targetLength,
    548                                           TypedArrayObject* source,
    549                                           size_t sourceLength, size_t offset,
    550                                           size_t sourceOffset) {
    551    // WARNING: |source| may be an unwrapped typed array from a different
    552    // compartment. Proceed with caution!
    553 
    554    MOZ_ASSERT(TypeIDOfType<T>::id == target->type(),
    555               "calling wrong setFromTypedArray specialization");
    556    MOZ_ASSERT(Scalar::isBigIntType(target->type()) ==
    557                   Scalar::isBigIntType(source->type()),
    558               "can't convert between BigInt and Number");
    559    MOZ_ASSERT(!target->hasDetachedBuffer(), "target isn't detached");
    560    MOZ_ASSERT(!source->hasDetachedBuffer(), "source isn't detached");
    561    MOZ_ASSERT(*target->length() >= targetLength, "target isn't shrunk");
    562    MOZ_ASSERT(*source->length() >= sourceOffset + sourceLength,
    563               "source isn't shrunk");
    564    MOZ_ASSERT(TypedArrayObject::sameBuffer(target, source),
    565               "the provided arrays don't actually overlap, so it's "
    566               "undesirable to use this method");
    567 
    568    MOZ_ASSERT(offset <= targetLength);
    569    MOZ_ASSERT(sourceLength <= targetLength - offset);
    570 
    571    SharedMem<T*> dest = Ops::extract(target).template cast<T*>() + offset;
    572    size_t len = sourceLength;
    573 
    574    if (canUseBitwiseCopy(source->type())) {
    575      SharedMem<T*> src =
    576          Ops::extract(source).template cast<T*>() + sourceOffset;
    577      Ops::podMove(dest, src, len);
    578      return true;
    579    }
    580 
    581    // Copy |source| because it overlaps the target elements being set.
    582    size_t bytesPerElement = source->bytesPerElement();
    583    size_t sourceByteLen = len * bytesPerElement;
    584    auto temp = target->zone()->template make_pod_array<uint8_t>(sourceByteLen);
    585    if (!temp) {
    586      return false;
    587    }
    588 
    589    size_t sourceByteOffset = sourceOffset * bytesPerElement;
    590    auto data = SharedMem<void*>::unshared(temp.get());
    591    Ops::memcpy(data, Ops::extract(source).addBytes(sourceByteOffset),
    592                sourceByteLen);
    593 
    594    storeTo<UnsharedOps>(dest, source->type(), data, len, 0);
    595 
    596    return true;
    597  }
    598 
    599  static bool canConvertInfallibly(const Value& v) {
    600    if (std::is_same_v<T, int64_t> || std::is_same_v<T, uint64_t>) {
    601      // Numbers, Null, Undefined, and Symbols throw a TypeError. Strings may
    602      // OOM and Objects may have side-effects.
    603      return v.isBigInt() || v.isBoolean();
    604    }
    605    // BigInts and Symbols throw a TypeError. Strings may OOM and Objects may
    606    // have side-effects.
    607    return v.isNumber() || v.isBoolean() || v.isNull() || v.isUndefined();
    608  }
    609 
    610  static T infallibleValueToNative(const Value& v) {
    611    if constexpr (std::is_same_v<T, int64_t>) {
    612      if (v.isBigInt()) {
    613        return T(BigInt::toInt64(v.toBigInt()));
    614      }
    615      return T(v.toBoolean());
    616    } else if constexpr (std::is_same_v<T, uint64_t>) {
    617      if (v.isBigInt()) {
    618        return T(BigInt::toUint64(v.toBigInt()));
    619      }
    620      return T(v.toBoolean());
    621    } else {
    622      if (v.isInt32()) {
    623        return T(v.toInt32());
    624      }
    625      if (v.isDouble()) {
    626        return doubleToNative(v.toDouble());
    627      }
    628      if (v.isBoolean()) {
    629        return T(v.toBoolean());
    630      }
    631      if (v.isNull()) {
    632        return T(0);
    633      }
    634 
    635      MOZ_ASSERT(v.isUndefined());
    636      return !std::numeric_limits<T>::is_integer ? T(JS::GenericNaN()) : T(0);
    637    }
    638  }
    639 
    640  static bool valueToNative(JSContext* cx, HandleValue v, T* result) {
    641    MOZ_ASSERT(!v.isMagic());
    642 
    643    if (MOZ_LIKELY(canConvertInfallibly(v))) {
    644      *result = infallibleValueToNative(v);
    645      return true;
    646    }
    647 
    648    if constexpr (std::is_same_v<T, int64_t>) {
    649      JS_TRY_VAR_OR_RETURN_FALSE(cx, *result, ToBigInt64(cx, v));
    650    } else if constexpr (std::is_same_v<T, uint64_t>) {
    651      JS_TRY_VAR_OR_RETURN_FALSE(cx, *result, ToBigUint64(cx, v));
    652    } else {
    653      MOZ_ASSERT(v.isString() || v.isObject() || v.isSymbol() || v.isBigInt());
    654 
    655      double d;
    656      if (!(v.isString() ? StringToNumber(cx, v.toString(), &d)
    657                         : ToNumber(cx, v, &d))) {
    658        return false;
    659      }
    660      *result = doubleToNative(d);
    661    }
    662    return true;
    663  }
    664 
    665  static T doubleToNative(double d) {
    666    if constexpr (!std::numeric_limits<T>::is_integer) {
    667      // The JS spec doesn't distinguish among different NaN values, and
    668      // it deliberately doesn't specify the bit pattern written to a
    669      // typed array when NaN is written into it.  This bit-pattern
    670      // inconsistency could confuse differential testing, so always
    671      // canonicalize NaN values in differential testing.
    672      if (js::SupportDifferentialTesting()) {
    673        d = JS::CanonicalizeNaN(d);
    674      }
    675    }
    676    return ConvertNumber<T>(d);
    677  }
    678 };
    679 
    680 inline gc::AllocKind js::FixedLengthTypedArrayObject::allocKindForTenure()
    681    const {
    682  // Fixed length typed arrays in the nursery may have a lazily allocated
    683  // buffer. Make sure there is room for the array's fixed data when moving the
    684  // array.
    685 
    686  using namespace js::gc;
    687 
    688  if (hasBuffer()) {
    689    return NativeObject::allocKindForTenure();
    690  }
    691 
    692  AllocKind allocKind;
    693  if (hasInlineElements()) {
    694    allocKind = AllocKindForLazyBuffer(byteLength());
    695  } else {
    696    allocKind = GetGCObjectKind(getClass());
    697  }
    698 
    699  MOZ_ASSERT(GetObjectFinalizeKind(getClass()) == gc::FinalizeKind::Background);
    700  return GetFinalizedAllocKind(allocKind, gc::FinalizeKind::Background);
    701 }
    702 
    703 /* static */ gc::AllocKind
    704 js::FixedLengthTypedArrayObject::AllocKindForLazyBuffer(size_t nbytes) {
    705  MOZ_ASSERT(nbytes <= INLINE_BUFFER_LIMIT);
    706  if (nbytes == 0) {
    707    nbytes += sizeof(uint8_t);
    708  }
    709  size_t dataSlots = AlignBytes(nbytes, sizeof(Value)) / sizeof(Value);
    710  MOZ_ASSERT(nbytes <= dataSlots * sizeof(Value));
    711  return gc::GetGCObjectKind(FIXED_DATA_START + dataSlots);
    712 }
    713 
    714 }  // namespace js
    715 
    716 #endif  // vm_TypedArrayObject_inl_h