tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Tensor.cpp (18446B)


      1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with
      5 * fmt::ptr(this\) file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "mozilla/dom/Tensor.h"
      8 
      9 #include "js/ArrayBuffer.h"
     10 #include "js/BigInt.h"
     11 #include "js/Value.h"
     12 #include "mozilla/Assertions.h"
     13 #include "mozilla/Logging.h"
     14 #include "mozilla/PodOperations.h"
     15 #include "mozilla/RefPtr.h"
     16 #include "mozilla/dom/BindingUtils.h"
     17 #include "mozilla/dom/ONNXBinding.h"
     18 #include "mozilla/dom/Promise.h"
     19 #include "mozilla/dom/ScriptSettings.h"
     20 #include "mozilla/dom/ToJSValue.h"
     21 #include "mozilla/dom/TypedArray.h"
     22 #include "nsContentUtils.h"
     23 #include "nsStringFwd.h"
     24 #include "nsTArray.h"
     25 
     26 extern mozilla::LazyLogModule gONNXLog;
     27 #define LOGD(fmt, ...) \
     28  MOZ_LOG_FMT(gONNXLog, LogLevel::Debug, fmt, ##__VA_ARGS__)
     29 
     30 namespace mozilla::dom {
     31 
     32 NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(Tensor, mGlobal)
     33 NS_IMPL_CYCLE_COLLECTING_ADDREF(Tensor)
     34 NS_IMPL_CYCLE_COLLECTING_RELEASE(Tensor)
     35 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(Tensor)
     36  NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
     37  NS_INTERFACE_MAP_ENTRY(nsISupports)
     38 NS_INTERFACE_MAP_END
     39 
     40 Tensor::Tensor(const GlobalObject& aGlobal, const nsACString& aType,
     41               const ArrayBufferView& aData, const Sequence<int32_t>& aDims)
     42    : mType(aType) {
     43  LOGD("{}", __PRETTY_FUNCTION__);
     44  nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
     45  mGlobal = global;
     46  if (!aData.AppendDataTo(mData)) {
     47    size_t len = aData.ProcessFixedData(
     48        [&](const Span<uint8_t>& aData) -> size_t { return aData.Length(); });
     49    LOGD("{} OOM (size: {})", __PRETTY_FUNCTION__, len);
     50  }
     51  mDims.AppendElements(aDims);
     52 }
     53 
     54 Tensor::Tensor(const GlobalObject& aGlobal, const nsACString& aType,
     55               const nsTArray<uint8_t>& aData, const Sequence<int32_t>& aDims)
     56    : mType(aType) {
     57  LOGD("{} type: {} len: {}", __PRETTY_FUNCTION__, aType, aData.Length());
     58  nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
     59  mGlobal = global;
     60  // Cast to uint8_t. Type is held in mType
     61  mData.AppendElements(aData);
     62  mDims.AppendElements(aDims);
     63 }
     64 
     65 Tensor::Tensor(const GlobalObject& aGlobal, ONNXTensorElementDataType aType,
     66               nsTArray<uint8_t> aData, nsTArray<int64_t> aDims)
     67    : mType(ONNXTypeToString(aType)) {
     68  LOGD("Output tensor: {} type: {} len: {}", __PRETTY_FUNCTION__,
     69       ONNXTypeToString(aType), aData.Length());
     70  nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
     71  mGlobal = global;
     72  mData = std::move(aData);
     73  mDims.AppendElements(aDims);
     74 }
     75 
     76 already_AddRefed<Tensor> Tensor::Constructor(
     77    const GlobalObject& global, const nsACString& type,
     78    const ArrayBufferViewOrAnySequence& data, const Sequence<int32_t>& dims,
     79    ErrorResult& aRv) {
     80  if (data.IsAnySequence()) {
     81 #define CASE_BIGINT(onnx_type, c_type, conversionfn)           \
     82  case onnx_type: {                                            \
     83    nsTArray<c_type> values;                                   \
     84    for (const JS::Value& element : data.GetAsAnySequence()) { \
     85      JS::BigInt* bigint = element.toBigInt();                 \
     86      if (bigint) {                                            \
     87        values.AppendElement(conversionfn(bigint));            \
     88      } else {                                                 \
     89        aRv.ThrowTypeError("Inconsistent value in arg 2");     \
     90        return nullptr;                                        \
     91      }                                                        \
     92    }                                                          \
     93    valuesAsBytes.AppendElements(                              \
     94        reinterpret_cast<uint8_t*>(values.Elements()),         \
     95        values.Length() * sizeof(c_type));                     \
     96    break;                                                     \
     97  }
     98 
     99 #define CASE(onnx_type, c_type, checkfn, conversionfn)                      \
    100  case onnx_type: {                                                         \
    101    nsTArray<c_type> values;                                                \
    102    for (const auto& element : data.GetAsAnySequence()) {                   \
    103      if (!element.checkfn()) {                                             \
    104        aRv.ThrowTypeError(                                                 \
    105            "Inconsistency between type and value in second argument");     \
    106        return nullptr;                                                     \
    107      }                                                                     \
    108      if (std::numeric_limits<c_type>::lowest() > element.conversionfn() || \
    109          std::numeric_limits<c_type>::max() < element.conversionfn()) {    \
    110        aRv.ThrowTypeError("Value out of range in arg 2");                  \
    111        return nullptr;                                                     \
    112      }                                                                     \
    113      values.AppendElement(element.conversionfn());                         \
    114    }                                                                       \
    115    valuesAsBytes.AppendElements(                                           \
    116        reinterpret_cast<uint8_t*>(values.Elements()),                      \
    117        values.Length() * sizeof(c_type));                                  \
    118    break;                                                                  \
    119  }
    120 
    121    nsTArray<uint8_t> valuesAsBytes;
    122    // Assume constant type, lock on the type of the first element.
    123    switch (StringToONNXDataType(type)) {
    124      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED, uint8_t, isNumber, toDouble)
    125      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, float, isNumber, toDouble)
    126      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8, uint8_t, isNumber, toDouble)
    127      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8, int8_t, isNumber, toDouble)
    128      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16, uint16_t, isNumber, toDouble)
    129      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16, int16_t, isNumber, toDouble)
    130      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32, int32_t, isNumber, toDouble)
    131      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING, int8_t, isNumber, toDouble)
    132      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16, int16_t, isNumber, toDouble);
    133      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE, double, isNumber, toDouble);
    134      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32, uint32_t, isNumber, toDouble);
    135      CASE_BIGINT(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64, int64_t, ToBigInt64);
    136      CASE_BIGINT(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64, uint64_t, ToBigUint64);
    137      case ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL: {
    138        for (const auto& element : data.GetAsAnySequence()) {
    139          if (!element.isBoolean()) {
    140            aRv.ThrowTypeError(
    141                "Inconsistency between type and value in second argument");
    142            return nullptr;
    143          }
    144          valuesAsBytes.AppendElement(element.toBoolean() ? 1 : 0);
    145        }
    146        break;
    147      }
    148      case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64:
    149      case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128:
    150      case ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16:
    151      case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FN:
    152      case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FNUZ:
    153      case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2:
    154      case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2FNUZ:
    155      case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT4:
    156      case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT4:
    157        MOZ_CRASH("Not handled");
    158        break;
    159    }
    160 
    161    auto rv = MakeRefPtr<Tensor>(global, type, valuesAsBytes, dims);
    162 
    163    LOGD("Tensor from sequence<any>: {}", rv->ToString().get());
    164 
    165    return rv.forget();
    166  }
    167 
    168  auto rv = MakeRefPtr<Tensor>(global, type, data.GetAsArrayBufferView(), dims);
    169  LOGD("Tensor from TypedArray: {}", rv->ToString().get());
    170  return rv.forget();
    171 }  // namespace mozilla::dom
    172 
    173 #undef CASE
    174 #undef CASE_BIGINT
    175 
    176 void Tensor::Dispose() { mData.Clear(); }
    177 
    178 void Tensor::SetDims(const nsTArray<int32_t>& aVal) {
    179  mDims.Clear();
    180  mDims.AppendElements(aVal);
    181 }
    182 
    183 void Tensor::GetDims(nsTArray<int32_t>& aRetVal) {
    184  aRetVal.AppendElements(mDims);
    185 }
    186 
    187 void Tensor::GetType(nsCString& aRetVal) const { aRetVal.Assign(mType); }
    188 
    189 void Tensor::GetData(JSContext* aCx,
    190                     JS::MutableHandle<JSObject*> aRetVal) const {
    191  LOGD("{} {} type: {} size: {}", __PRETTY_FUNCTION__, fmt::ptr(this),
    192       mType.get(), mData.Length());
    193 
    194 #define CASE(onnx_type, typed_array_type, c_type)                     \
    195  case ONNX_TENSOR_ELEMENT_DATA_TYPE_##onnx_type: {                   \
    196    nsTArray<c_type> tmp((c_type*)mData.Elements(),                   \
    197                         mData.Length() / sizeof(c_type));            \
    198    dom::TypedArrayCreator<typed_array_type> creator(std::move(tmp)); \
    199    aRetVal.set(creator.Create(aCx));                                 \
    200    break;                                                            \
    201  }
    202 
    203  switch (Type()) {
    204    CASE(INT8, Int8Array, int8_t)
    205    CASE(UINT8, Uint8Array, uint8_t)
    206    CASE(INT16, Int16Array, int16_t)
    207    CASE(UINT16, Uint16Array, uint16_t)
    208    CASE(INT32, Int32Array, int32_t)
    209    CASE(UINT32, Uint32Array, uint32_t)
    210    CASE(INT64, BigInt64Array, int64_t)
    211    CASE(UINT64, BigUint64Array, uint64_t)
    212    CASE(BOOL, Uint8Array, uint8_t)
    213    CASE(DOUBLE, Float64Array, double)
    214    CASE(FLOAT, Float32Array, float)
    215    CASE(STRING, Uint8Array, uint8_t)  // hmmm
    216    case ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16:
    217    case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128:
    218    case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64:
    219    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16:
    220    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FN:
    221    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FNUZ:
    222    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2:
    223    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2FNUZ:
    224    case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT4:
    225    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT4:
    226    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED:
    227      MOZ_CRASH("Missing ONNX data type to js value");
    228      break;
    229  }
    230 
    231 #undef CASE
    232 }  // namespace mozilla::dom
    233 
    234 TensorDataLocation Tensor::Location() const {
    235  LOGD("{} {}", __PRETTY_FUNCTION__, fmt::ptr(this));
    236  return TensorDataLocation::Cpu;
    237 }
    238 
    239 already_AddRefed<Promise> Tensor::GetData(const Optional<bool>& releaseData) {
    240  LOGD("{} {} type: {} size: {}", __PRETTY_FUNCTION__, fmt::ptr(this),
    241       mType.get(), mData.Length());
    242 
    243  AutoJSContext ctx;
    244 
    245  RefPtr<Promise> p = Promise::CreateInfallible(mGlobal);
    246 
    247  if (releaseData.WasPassed() && releaseData.Value()) {
    248    size_t lengthBytes = mData.Length();
    249    UniquePtr<uint8_t[], JS::FreePolicy> tensorData(
    250        js_pod_arena_malloc<uint8_t>(js::ArrayBufferContentsArena,
    251                                     lengthBytes));
    252    PodCopy(tensorData.get(), mData.Elements(), lengthBytes);
    253    JS::Rooted<JSObject*> data(
    254        ctx, JS::NewArrayBufferWithContents(ctx, lengthBytes,
    255                                            std::move(tensorData)));
    256    JS::Rooted<JS::Value> value(ctx, JS::ObjectValue(*data));
    257    p->MaybeResolve(value);
    258    mData.Clear();
    259  } else {
    260    size_t lengthBytes = mData.Length();
    261    UniquePtr<uint8_t[], JS::FreePolicy> tensorData(
    262        js_pod_arena_malloc<uint8_t>(js::ArrayBufferContentsArena,
    263                                     lengthBytes));
    264    PodCopy(tensorData.get(), mData.Elements(), lengthBytes);
    265    JS::Rooted<JSObject*> data(
    266        ctx, JS::NewArrayBufferWithContents(ctx, lengthBytes,
    267                                            std::move(tensorData)));
    268    JS::Rooted<JS::Value> value(ctx, JS::ObjectValue(*data));
    269    p->MaybeResolve(value);
    270  }
    271 
    272  return p.forget();
    273 }
    274 
    275 nsCString Tensor::TypeString() const { return ONNXTypeToString(Type()); }
    276 
    277 ONNXTensorElementDataType Tensor::StringToONNXDataType(
    278    const nsACString& aString) {
    279 #define CASE(string, suffix)                         \
    280  do {                                               \
    281    if (aString.EqualsASCII(#string)) {              \
    282      return ONNX_TENSOR_ELEMENT_DATA_TYPE_##suffix; \
    283    }                                                \
    284  } while (0);
    285 
    286  CASE(int4, INT4);
    287  CASE(uint4, UINT4);
    288  CASE(int8, INT8);
    289  CASE(uint8, UINT8);
    290  CASE(int16, INT16);
    291  CASE(uint16, UINT16);
    292  CASE(int32, INT32);
    293  CASE(uint32, UINT32);
    294  CASE(int64, INT64);
    295  CASE(uint64, UINT64);
    296  CASE(float16, FLOAT16);
    297  CASE(float32, FLOAT);
    298  CASE(float64, DOUBLE);
    299  CASE(bool, BOOL);
    300 
    301  MOZ_CRASH("Missing string to ONNX data type value");
    302 
    303 #undef CASE
    304 }
    305 
    306 ONNXTensorElementDataType Tensor::Type() const {
    307  return StringToONNXDataType(mType);
    308 }
    309 
    310 nsLiteralCString Tensor::ONNXTypeToString(
    311    ONNXTensorElementDataType aType) const {
    312  switch (aType) {
    313    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED:
    314      return "undefined"_ns;
    315    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT4:
    316      return "uint4"_ns;
    317    case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT4:
    318      return "int4"_ns;
    319    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8:
    320      return "uint8"_ns;
    321    case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8:
    322      return "int8"_ns;
    323    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16:
    324      return "uint16"_ns;
    325    case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16:
    326      return "int16"_ns;
    327    case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32:
    328      return "int32"_ns;
    329    case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:
    330      return "int64"_ns;
    331    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32:
    332      return "uint32"_ns;
    333    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64:
    334      return "uint64"_ns;
    335    case ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING:
    336      return "string"_ns;
    337    case ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL:
    338      return "bool"_ns;
    339    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16:
    340      return "float16"_ns;
    341    case ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16:
    342      return "bfloat16"_ns;
    343    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:
    344      return "float32"_ns;
    345    case ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE:
    346      return "double"_ns;
    347    case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64:
    348    case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128:
    349    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FN:
    350    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FNUZ:
    351    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2:
    352    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2FNUZ:
    353      MOZ_CRASH("Missing ONNX data type value to string");
    354      break;
    355  }
    356  return ""_ns;
    357 }
    358 
    359 nsCString Tensor::ToString() const {
    360  nsCString rv;
    361  size_t count = mData.Length() / DataTypeSize(Type());
    362  rv.AppendFmt(FMT_STRING("{} {} elements, {} bytes, {} dims"), mType, count,
    363               mData.Length(), mDims.Length());
    364 
    365  if (MOZ_LOG_TEST(gONNXLog, LogLevel::Verbose)) {
    366    rv.AppendFmt("Dims:\n");
    367    rv.AppendFmt("{}\n", fmt::join(mDims, ","));
    368    rv.AppendFmt("Values:\n");
    369 
    370 #define CASE(onnx_type, c_type)                                           \
    371  case onnx_type: {                                                       \
    372    rv.AppendFmt("{}\n",                                                  \
    373                 fmt::join(Span((c_type*)mData.Elements(), count), ",")); \
    374    break;                                                                \
    375  }
    376 
    377    switch (Type()) {
    378      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED, uint8_t)
    379      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, float)
    380      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8, uint8_t)
    381      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8, int8_t)
    382      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16, uint16_t)
    383      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16, int16_t)
    384      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32, int32_t)
    385      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64, int64_t)
    386      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING, int8_t)
    387      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL, int8_t)
    388      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16, int16_t);
    389      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE, double);
    390      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32, uint32_t);
    391      CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64, uint64_t);
    392      case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64:
    393      case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128:
    394      case ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16:
    395      case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FN:
    396      case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FNUZ:
    397      case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2:
    398      case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2FNUZ:
    399      case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT4:
    400      case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT4:
    401        MOZ_CRASH("Not handled");
    402        break;
    403    }
    404 #undef CASE
    405  }
    406  return rv;
    407 }
    408 
    409 size_t Tensor::DataTypeSize(ONNXTensorElementDataType aType) {
    410 #define CASE(onnx_type, c_type) \
    411  do {                          \
    412    case onnx_type:             \
    413      return sizeof(c_type);    \
    414  } while (0);
    415 
    416  switch (aType) {
    417    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED, uint8_t)
    418    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, float)
    419    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8, uint8_t)
    420    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8, int8_t)
    421    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16, uint16_t)
    422    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16, int16_t)
    423    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32, int32_t)
    424    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64, int64_t)
    425    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING, int8_t)
    426    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL, int8_t)
    427    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16, int16_t);
    428    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE, double);
    429    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32, uint32_t);
    430    CASE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64, uint64_t);
    431    case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64:
    432    case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128:
    433    case ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16:
    434    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FN:
    435    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FNUZ:
    436    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2:
    437    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2FNUZ:
    438    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT4:
    439    case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT4:
    440      MOZ_CRASH("Not handled");
    441      break;
    442  }
    443 #undef CASE
    444  return 0;
    445 }
    446 
    447 JSObject* Tensor::WrapObject(JSContext* aCx,
    448                             JS::Handle<JSObject*> aGivenProto) {
    449  return Tensor_Binding::Wrap(aCx, this, aGivenProto);
    450 }
    451 
    452 }  // namespace mozilla::dom