tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

AtomicsObject.cpp (65522B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 /*
      8 * JS Atomics pseudo-module.
      9 *
     10 * See chapter 24.4 "The Atomics Object" and chapter 27 "Memory Model" in
     11 * ECMAScript 2021 for the full specification.
     12 */
     13 
     14 #include "builtin/AtomicsObject.h"
     15 
     16 #include "mozilla/Atomics.h"
     17 #include "mozilla/DebugOnly.h"
     18 #include "mozilla/FloatingPoint.h"
     19 #include "mozilla/Maybe.h"
     20 #include "mozilla/ScopeExit.h"
     21 
     22 #include "jsnum.h"
     23 
     24 #include "builtin/Promise.h"
     25 #include "jit/AtomicOperations.h"
     26 #include "jit/InlinableNatives.h"
     27 #include "js/Class.h"
     28 #include "js/friend/ErrorMessages.h"  // js::GetErrorMessage, JSMSG_*
     29 #include "js/PropertySpec.h"
     30 #include "js/Result.h"
     31 #include "js/WaitCallbacks.h"
     32 #include "vm/GlobalObject.h"
     33 #include "vm/HelperThreads.h"                 // AutoLockHelperThreadState
     34 #include "vm/OffThreadPromiseRuntimeState.h"  // OffthreadPromiseTask
     35 #include "vm/TypedArrayObject.h"
     36 
     37 #include "vm/Compartment-inl.h"
     38 #include "vm/JSObject-inl.h"
     39 
     40 using namespace js;
     41 
     42 static bool ReportBadArrayType(JSContext* cx) {
     43  JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
     44                            JSMSG_ATOMICS_BAD_ARRAY);
     45  return false;
     46 }
     47 
     48 static bool ReportDetachedArrayBuffer(JSContext* cx) {
     49  JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
     50                            JSMSG_TYPED_ARRAY_DETACHED);
     51  return false;
     52 }
     53 
     54 static bool ReportImmutableBuffer(JSContext* cx) {
     55  JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
     56                            JSMSG_ARRAYBUFFER_IMMUTABLE);
     57  return false;
     58 }
     59 
     60 static bool ReportResizedArrayBuffer(JSContext* cx) {
     61  JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
     62                            JSMSG_TYPED_ARRAY_RESIZED_BOUNDS);
     63  return false;
     64 }
     65 
     66 static bool ReportOutOfRange(JSContext* cx) {
     67  // Use JSMSG_BAD_INDEX here, it is what ToIndex uses for some cases that it
     68  // reports directly.
     69  JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
     70  return false;
     71 }
     72 
     73 enum class AccessMode { Read, Write };
     74 
     75 // ES2026 draft rev affcec07523a45d40fb668689c07657412e772ac
     76 // Plus: https://tc39.es/proposal-immutable-arraybuffer/
     77 // 25.4.3.1 ValidateIntegerTypedArray ( typedArray, waitable )
     78 static bool ValidateIntegerTypedArray(
     79    JSContext* cx, HandleValue typedArray, bool waitable, AccessMode accessMode,
     80    MutableHandle<TypedArrayObject*> unwrappedTypedArray) {
     81  // Steps 1-2.
     82  auto* unwrapped = UnwrapAndTypeCheckValue<TypedArrayObject>(
     83      cx, typedArray, [cx]() { ReportBadArrayType(cx); });
     84  if (!unwrapped) {
     85    return false;
     86  }
     87 
     88  if (unwrapped->hasDetachedBuffer()) {
     89    return ReportDetachedArrayBuffer(cx);
     90  }
     91 
     92  if (accessMode == AccessMode::Write &&
     93      unwrapped->is<ImmutableTypedArrayObject>()) {
     94    return ReportImmutableBuffer(cx);
     95  }
     96 
     97  // Steps 3-4.
     98  if (waitable) {
     99    switch (unwrapped->type()) {
    100      case Scalar::Int32:
    101      case Scalar::BigInt64:
    102        break;
    103      default:
    104        return ReportBadArrayType(cx);
    105    }
    106  } else {
    107    switch (unwrapped->type()) {
    108      case Scalar::Int8:
    109      case Scalar::Uint8:
    110      case Scalar::Int16:
    111      case Scalar::Uint16:
    112      case Scalar::Int32:
    113      case Scalar::Uint32:
    114      case Scalar::BigInt64:
    115      case Scalar::BigUint64:
    116        break;
    117      default:
    118        return ReportBadArrayType(cx);
    119    }
    120  }
    121 
    122  // Step 5 (modified to return the TypedArray).
    123  unwrappedTypedArray.set(unwrapped);
    124  return true;
    125 }
    126 
    127 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
    128 // 24.4.1.2 ValidateAtomicAccess ( typedArray, requestIndex )
    129 static bool ValidateAtomicAccess(JSContext* cx,
    130                                 Handle<TypedArrayObject*> typedArray,
    131                                 HandleValue requestIndex, size_t* index) {
    132  MOZ_ASSERT(!typedArray->hasDetachedBuffer());
    133 
    134  // Steps 1-2.
    135  mozilla::Maybe<size_t> length = typedArray->length();
    136  if (!length) {
    137    // ValidateIntegerTypedArray doesn't check for out-of-bounds in our
    138    // implementation, so we have to handle this case here.
    139    return ReportResizedArrayBuffer(cx);
    140  }
    141 
    142  // Steps 3-4.
    143  uint64_t accessIndex;
    144  if (!ToIndex(cx, requestIndex, &accessIndex)) {
    145    return false;
    146  }
    147 
    148  // Step 5.
    149  if (accessIndex >= *length) {
    150    return ReportOutOfRange(cx);
    151  }
    152 
    153  // Steps 6-9.
    154  *index = size_t(accessIndex);
    155  return true;
    156 }
    157 
    158 template <typename T>
    159 struct ArrayOps {
    160  using Type = T;
    161 
    162  static JS::Result<T> convertValue(JSContext* cx, HandleValue v) {
    163    int32_t n;
    164    if (!ToInt32(cx, v, &n)) {
    165      return cx->alreadyReportedError();
    166    }
    167    return static_cast<T>(n);
    168  }
    169 
    170  static JS::Result<T> convertValue(JSContext* cx, HandleValue v,
    171                                    MutableHandleValue result) {
    172    double d;
    173    if (!ToInteger(cx, v, &d)) {
    174      return cx->alreadyReportedError();
    175    }
    176    result.setNumber(d);
    177    return static_cast<T>(JS::ToInt32(d));
    178  }
    179 
    180  static JS::Result<> storeResult(JSContext* cx, T v,
    181                                  MutableHandleValue result) {
    182    result.setInt32(v);
    183    return Ok();
    184  }
    185 };
    186 
    187 template <>
    188 JS::Result<> ArrayOps<uint32_t>::storeResult(JSContext* cx, uint32_t v,
    189                                             MutableHandleValue result) {
    190  // Always double typed so that the JITs can assume the types are stable.
    191  result.setDouble(v);
    192  return Ok();
    193 }
    194 
    195 template <>
    196 struct ArrayOps<int64_t> {
    197  using Type = int64_t;
    198 
    199  static JS::Result<int64_t> convertValue(JSContext* cx, HandleValue v) {
    200    BigInt* bi = ToBigInt(cx, v);
    201    if (!bi) {
    202      return cx->alreadyReportedError();
    203    }
    204    return BigInt::toInt64(bi);
    205  }
    206 
    207  static JS::Result<int64_t> convertValue(JSContext* cx, HandleValue v,
    208                                          MutableHandleValue result) {
    209    BigInt* bi = ToBigInt(cx, v);
    210    if (!bi) {
    211      return cx->alreadyReportedError();
    212    }
    213    result.setBigInt(bi);
    214    return BigInt::toInt64(bi);
    215  }
    216 
    217  static JS::Result<> storeResult(JSContext* cx, int64_t v,
    218                                  MutableHandleValue result) {
    219    BigInt* bi = BigInt::createFromInt64(cx, v);
    220    if (!bi) {
    221      return cx->alreadyReportedError();
    222    }
    223    result.setBigInt(bi);
    224    return Ok();
    225  }
    226 };
    227 
    228 template <>
    229 struct ArrayOps<uint64_t> {
    230  using Type = uint64_t;
    231 
    232  static JS::Result<uint64_t> convertValue(JSContext* cx, HandleValue v) {
    233    BigInt* bi = ToBigInt(cx, v);
    234    if (!bi) {
    235      return cx->alreadyReportedError();
    236    }
    237    return BigInt::toUint64(bi);
    238  }
    239 
    240  static JS::Result<uint64_t> convertValue(JSContext* cx, HandleValue v,
    241                                           MutableHandleValue result) {
    242    BigInt* bi = ToBigInt(cx, v);
    243    if (!bi) {
    244      return cx->alreadyReportedError();
    245    }
    246    result.setBigInt(bi);
    247    return BigInt::toUint64(bi);
    248  }
    249 
    250  static JS::Result<> storeResult(JSContext* cx, uint64_t v,
    251                                  MutableHandleValue result) {
    252    BigInt* bi = BigInt::createFromUint64(cx, v);
    253    if (!bi) {
    254      return cx->alreadyReportedError();
    255    }
    256    result.setBigInt(bi);
    257    return Ok();
    258  }
    259 };
    260 
    261 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
    262 // 24.4.1.11 AtomicReadModifyWrite ( typedArray, index, value, op ), steps 1-2.
    263 // 24.4.1.12 AtomicLoad ( typedArray, index ), steps 1-2.
    264 // 24.4.4 Atomics.compareExchange ( typedArray, index, ... ), steps 1-2.
    265 // 24.4.9 Atomics.store ( typedArray, index, value ), steps 1-2.
    266 template <typename Op>
    267 static bool AtomicAccess(JSContext* cx, HandleValue obj, HandleValue index,
    268                         AccessMode accessMode, Op op) {
    269  // Step 1.
    270  Rooted<TypedArrayObject*> unwrappedTypedArray(cx);
    271  if (!ValidateIntegerTypedArray(cx, obj, false, accessMode,
    272                                 &unwrappedTypedArray)) {
    273    return false;
    274  }
    275 
    276  // Step 2.
    277  size_t intIndex;
    278  if (!ValidateAtomicAccess(cx, unwrappedTypedArray, index, &intIndex)) {
    279    return false;
    280  }
    281 
    282  switch (unwrappedTypedArray->type()) {
    283    case Scalar::Int8:
    284      return op(ArrayOps<int8_t>{}, unwrappedTypedArray, intIndex);
    285    case Scalar::Uint8:
    286      return op(ArrayOps<uint8_t>{}, unwrappedTypedArray, intIndex);
    287    case Scalar::Int16:
    288      return op(ArrayOps<int16_t>{}, unwrappedTypedArray, intIndex);
    289    case Scalar::Uint16:
    290      return op(ArrayOps<uint16_t>{}, unwrappedTypedArray, intIndex);
    291    case Scalar::Int32:
    292      return op(ArrayOps<int32_t>{}, unwrappedTypedArray, intIndex);
    293    case Scalar::Uint32:
    294      return op(ArrayOps<uint32_t>{}, unwrappedTypedArray, intIndex);
    295    case Scalar::BigInt64:
    296      return op(ArrayOps<int64_t>{}, unwrappedTypedArray, intIndex);
    297    case Scalar::BigUint64:
    298      return op(ArrayOps<uint64_t>{}, unwrappedTypedArray, intIndex);
    299    case Scalar::Float16:
    300    case Scalar::Float32:
    301    case Scalar::Float64:
    302    case Scalar::Uint8Clamped:
    303    case Scalar::MaxTypedArrayViewType:
    304    case Scalar::Int64:
    305    case Scalar::Simd128:
    306      break;
    307  }
    308  MOZ_CRASH("Unsupported TypedArray type");
    309 }
    310 
    311 template <typename T>
    312 static SharedMem<T*> TypedArrayData(JSContext* cx, TypedArrayObject* typedArray,
    313                                    size_t index) {
    314  // RevalidateAtomicAccess, steps 1-3.
    315  mozilla::Maybe<size_t> length = typedArray->length();
    316 
    317  // RevalidateAtomicAccess, step 4.
    318  if (!length) {
    319    ReportDetachedArrayBuffer(cx);
    320    return {};
    321  }
    322 
    323  // RevalidateAtomicAccess, step 5.
    324  if (index >= *length) {
    325    ReportOutOfRange(cx);
    326    return {};
    327  }
    328 
    329  SharedMem<void*> typedArrayData = typedArray->dataPointerEither();
    330  return typedArrayData.cast<T*>() + index;
    331 }
    332 
    333 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
    334 // 24.4.4 Atomics.compareExchange ( typedArray, index, expectedValue,
    335 //                                  replacementValue )
    336 static bool atomics_compareExchange(JSContext* cx, unsigned argc, Value* vp) {
    337  CallArgs args = CallArgsFromVp(argc, vp);
    338  HandleValue typedArray = args.get(0);
    339  HandleValue index = args.get(1);
    340 
    341  return AtomicAccess(
    342      cx, typedArray, index, AccessMode::Write,
    343      [cx, &args](auto ops, Handle<TypedArrayObject*> unwrappedTypedArray,
    344                  size_t index) {
    345        using T = typename decltype(ops)::Type;
    346 
    347        HandleValue expectedValue = args.get(2);
    348        HandleValue replacementValue = args.get(3);
    349 
    350        T oldval;
    351        JS_TRY_VAR_OR_RETURN_FALSE(cx, oldval,
    352                                   ops.convertValue(cx, expectedValue));
    353 
    354        T newval;
    355        JS_TRY_VAR_OR_RETURN_FALSE(cx, newval,
    356                                   ops.convertValue(cx, replacementValue));
    357 
    358        SharedMem<T*> addr = TypedArrayData<T>(cx, unwrappedTypedArray, index);
    359        if (!addr) {
    360          return false;
    361        }
    362 
    363        oldval =
    364            jit::AtomicOperations::compareExchangeSeqCst(addr, oldval, newval);
    365 
    366        JS_TRY_OR_RETURN_FALSE(cx, ops.storeResult(cx, oldval, args.rval()));
    367        return true;
    368      });
    369 }
    370 
    371 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
    372 // 24.4.7 Atomics.load ( typedArray, index )
    373 static bool atomics_load(JSContext* cx, unsigned argc, Value* vp) {
    374  CallArgs args = CallArgsFromVp(argc, vp);
    375  HandleValue typedArray = args.get(0);
    376  HandleValue index = args.get(1);
    377 
    378  return AtomicAccess(
    379      cx, typedArray, index, AccessMode::Read,
    380      [cx, &args](auto ops, Handle<TypedArrayObject*> unwrappedTypedArray,
    381                  size_t index) {
    382        using T = typename decltype(ops)::Type;
    383 
    384        SharedMem<T*> addr = TypedArrayData<T>(cx, unwrappedTypedArray, index);
    385        if (!addr) {
    386          return false;
    387        }
    388 
    389        T v = jit::AtomicOperations::loadSeqCst(addr);
    390 
    391        JS_TRY_OR_RETURN_FALSE(cx, ops.storeResult(cx, v, args.rval()));
    392        return true;
    393      });
    394 }
    395 
    396 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
    397 // 24.4.9 Atomics.store ( typedArray, index, value )
    398 static bool atomics_store(JSContext* cx, unsigned argc, Value* vp) {
    399  CallArgs args = CallArgsFromVp(argc, vp);
    400  HandleValue typedArray = args.get(0);
    401  HandleValue index = args.get(1);
    402 
    403  return AtomicAccess(
    404      cx, typedArray, index, AccessMode::Write,
    405      [cx, &args](auto ops, Handle<TypedArrayObject*> unwrappedTypedArray,
    406                  size_t index) {
    407        using T = typename decltype(ops)::Type;
    408 
    409        HandleValue value = args.get(2);
    410 
    411        T v;
    412        JS_TRY_VAR_OR_RETURN_FALSE(cx, v,
    413                                   ops.convertValue(cx, value, args.rval()));
    414 
    415        SharedMem<T*> addr = TypedArrayData<T>(cx, unwrappedTypedArray, index);
    416        if (!addr) {
    417          return false;
    418        }
    419 
    420        jit::AtomicOperations::storeSeqCst(addr, v);
    421        return true;
    422      });
    423 }
    424 
    425 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
    426 // 24.4.1.11 AtomicReadModifyWrite ( typedArray, index, value, op )
    427 template <typename AtomicOp>
    428 static bool AtomicReadModifyWrite(JSContext* cx, const CallArgs& args,
    429                                  AtomicOp op) {
    430  HandleValue typedArray = args.get(0);
    431  HandleValue index = args.get(1);
    432 
    433  return AtomicAccess(
    434      cx, typedArray, index, AccessMode::Write,
    435      [cx, &args, op](auto ops, Handle<TypedArrayObject*> unwrappedTypedArray,
    436                      size_t index) {
    437        using T = typename decltype(ops)::Type;
    438 
    439        HandleValue value = args.get(2);
    440 
    441        T v;
    442        JS_TRY_VAR_OR_RETURN_FALSE(cx, v, ops.convertValue(cx, value));
    443 
    444        SharedMem<T*> addr = TypedArrayData<T>(cx, unwrappedTypedArray, index);
    445        if (!addr) {
    446          return false;
    447        }
    448 
    449        v = op(addr, v);
    450 
    451        JS_TRY_OR_RETURN_FALSE(cx, ops.storeResult(cx, v, args.rval()));
    452        return true;
    453      });
    454 }
    455 
    456 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
    457 // 24.4.5 Atomics.exchange ( typedArray, index, value )
    458 static bool atomics_exchange(JSContext* cx, unsigned argc, Value* vp) {
    459  CallArgs args = CallArgsFromVp(argc, vp);
    460 
    461  return AtomicReadModifyWrite(cx, args, [](auto addr, auto val) {
    462    return jit::AtomicOperations::exchangeSeqCst(addr, val);
    463  });
    464 }
    465 
    466 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
    467 // 24.4.2 Atomics.add ( typedArray, index, value )
    468 static bool atomics_add(JSContext* cx, unsigned argc, Value* vp) {
    469  CallArgs args = CallArgsFromVp(argc, vp);
    470 
    471  return AtomicReadModifyWrite(cx, args, [](auto addr, auto val) {
    472    return jit::AtomicOperations::fetchAddSeqCst(addr, val);
    473  });
    474 }
    475 
    476 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
    477 // 24.4.10 Atomics.sub ( typedArray, index, value )
    478 static bool atomics_sub(JSContext* cx, unsigned argc, Value* vp) {
    479  CallArgs args = CallArgsFromVp(argc, vp);
    480 
    481  return AtomicReadModifyWrite(cx, args, [](auto addr, auto val) {
    482    return jit::AtomicOperations::fetchSubSeqCst(addr, val);
    483  });
    484 }
    485 
    486 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
    487 // 24.4.3 Atomics.and ( typedArray, index, value )
    488 static bool atomics_and(JSContext* cx, unsigned argc, Value* vp) {
    489  CallArgs args = CallArgsFromVp(argc, vp);
    490 
    491  return AtomicReadModifyWrite(cx, args, [](auto addr, auto val) {
    492    return jit::AtomicOperations::fetchAndSeqCst(addr, val);
    493  });
    494 }
    495 
    496 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
    497 // 24.4.8 Atomics.or ( typedArray, index, value )
    498 static bool atomics_or(JSContext* cx, unsigned argc, Value* vp) {
    499  CallArgs args = CallArgsFromVp(argc, vp);
    500 
    501  return AtomicReadModifyWrite(cx, args, [](auto addr, auto val) {
    502    return jit::AtomicOperations::fetchOrSeqCst(addr, val);
    503  });
    504 }
    505 
    506 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
    507 // 24.4.13 Atomics.xor ( typedArray, index, value )
    508 static bool atomics_xor(JSContext* cx, unsigned argc, Value* vp) {
    509  CallArgs args = CallArgsFromVp(argc, vp);
    510 
    511  return AtomicReadModifyWrite(cx, args, [](auto addr, auto val) {
    512    return jit::AtomicOperations::fetchXorSeqCst(addr, val);
    513  });
    514 }
    515 
    516 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
    517 // 24.4.6 Atomics.isLockFree ( size )
    518 static bool atomics_isLockFree(JSContext* cx, unsigned argc, Value* vp) {
    519  CallArgs args = CallArgsFromVp(argc, vp);
    520  HandleValue v = args.get(0);
    521 
    522  // Step 1.
    523  int32_t size;
    524  if (v.isInt32()) {
    525    size = v.toInt32();
    526  } else {
    527    double dsize;
    528    if (!ToInteger(cx, v, &dsize)) {
    529      return false;
    530    }
    531 
    532    // Step 7 (non-integer case only).
    533    if (!mozilla::NumberEqualsInt32(dsize, &size)) {
    534      args.rval().setBoolean(false);
    535      return true;
    536    }
    537  }
    538 
    539  // Steps 2-7.
    540  args.rval().setBoolean(jit::AtomicOperations::isLockfreeJS(size));
    541  return true;
    542 }
    543 
    544 namespace js {
    545 
    546 /*
    547 * [SMDOC] Atomics.wait, Atomics.waitAsync, and Atomics.notify
    548 *
    549 * `wait`, `waitAsync`, and `notify` are provided as low-level primitives for
    550 * thread synchronization. The primary use case is to take code that looks like
    551 * this:
    552 *
    553 *     const ValueIndex = 0;
    554 *     const FlagIndex = 1;
    555 *
    556 *     THREAD A:
    557 *       // Write a value.
    558 *       Atomics.store(sharedBuffer, ValueIndex, value);
    559 *       // Update a flag to indicate that the value was written.
    560 *       Atomics.store(sharedBuffer, FlagIndex, 1);
    561 *
    562 *     THREAD B:
    563 *       // Busy-wait for the flag to be updated.
    564 *       while (Atomics.load(sharedBuffer, FlagIndex) == 0) {}
    565 *       // Load the value.
    566 *       let value = Atomics.load(sharedBuffer, ValueIndex);
    567 *
    568 * ...and replace the busy-wait:
    569 *
    570 *     THREAD A':
    571 *       // Write the value and update the flag.
    572 *       Atomics.store(sharedBuffer, ValueIndex, value);
    573 *       Atomics.store(sharedBuffer, FlagIndex, 1);
    574 *       // Notify that the flag has been written.
    575 *       Atomics.notify(sharedBuffer, FlagIndex);
    576 *
    577 *     THREAD B':
    578 *       // Wait until the flag is notified.
    579 *       // If it's already non-zero, no wait occurs.
    580 *       Atomics.wait(sharedBuffer, FlagIndex, 0);
    581 *       // Load the value.
    582 *       let value = Atomics.load(sharedBuffer, ValueIndex);
    583 *
    584 * `wait` puts the calling thread to sleep until it is notified (or an optional
    585 * timeout expires). This can't be used on the main thread.
    586 *
    587 * `waitAsync` instead creates a Promise which will be resolved when the
    588 * position is notified (or an optional timeout expires).
    589 *
    590 * When `wait` or `waitAsync` is called, a waiter is created and registered with
    591 * the SharedArrayBuffer. Waiter instances for a SharedArrayRawBuffer are
    592 * connected in a circular doubly-linked list, containing both sync and async
    593 * waiters. Sync waiters are stack allocated in the stack frame of the waiting
    594 * thread. Async waiters are heap-allocated. The `waiters` field of the
    595 * SharedArrayRawBuffer is a dedicated list head node for the list. Waiters are
    596 * awoken in a first-in-first-out order. The `next` field of the list head node
    597 * points to the highest priority waiter. The `prev` field points to the lowest
    598 * priority waiter. This list is traversed when `notify` is called to find the
    599 * waiters that should be woken up.
    600 *
    601 * Synchronous waits are implemented using a per-context condition variable. See
    602 * FutexThread::wait.
    603 *
    604 * Asynchronous waits are more complicated, particularly with respect to
    605 * timeouts. In addition to the AsyncFutexWaiter that is added to the list of
    606 * waiters, we also create:
    607 *
    608 *   1. A Promise object to return to the caller. The promise will be resolved
    609 *      when the waiter is notified or times out.
    610 *   2. A WaitAsyncNotifyTask (derived from OffThreadPromiseTask) wrapping that
    611 *      promise. `notify` can be called from any thread, but the promise must be
    612 *      resolved on the thread that owns it. To resolve the promise, we dispatch
    613 *      the task to enqueue a promise resolution task in the target's event
    614 *      loop. The notify task is stored in the AsyncFutexWaiter.
    615 *   3. If there is a non-zero timeout, a WaitAsyncTimeoutTask (derived from
    616 *      JS::Dispatchable) containing a pointer to the async waiter. We dispatch
    617 *      this task to the embedding's event loop, with a delay. When the timeout
    618 *      expires and the task runs, if the promise has not yet been resolved, we
    619 *      resolve it with "timed-out".
    620 *
    621 * `waitAsync` Lifetimes
    622 * ---------------------
    623 *           ┌─────┐
    624 *           │ SAB │
    625 *           └─────┘
    626 *        ┌────► ◄────┐ bi-directional linked list
    627 *        │           │
    628 *        ▼           ▼
    629 *      *waiter      *waiter
    630 *        ▲           ▲
    631 *        │           │
    632 *        └───► *  ◄──┘
    633 *              │
    634 *      ┌───────▼────────┐
    635 *      │AsyncFutexWaiter│ ◄───────────┐
    636 *      └────────────────┘             │
    637 *              │                      │
    638 *              │ borrow               │ borrow
    639 *              ▼                      ▼
    640 *      ┌────────────────────┐       ┌───────────────────┐
    641 *      │WaitAsyncTimeoutTask│       │WaitAsyncNotifyTask│ ◄─────┐
    642 *      └────────────────────┘       └───┬───────────────┘       │
    643 *              ▲                        │             ▲         │
    644 *              │                        │             │         │ (transferred)
    645 *              │ own                    ▼             │         │ own
    646 *     ┌────────────────────────────┐ ┌─────────────┐  │ ┌─────────────────────┐
    647 *     │DelayedJSDispatchableHandler│ │PromiseObject│  │ │JSDispatchableHandler│
    648 *     └────────────────────────────┘ └─────────────┘  │ └─────────────────────┘
    649 *              ▲                        ▲             │
    650 *     ┌────────┼────────────────────────┼──────┐      │
    651 *     │ ┌──────┴───────┐           ┌────┴────┐ │      │ own (initialized)
    652 *     │ │TimeoutManager│           │JSContext┼─┼──────┘
    653 *     │ └──────────────┘           └─────────┘ │   Cancellable List
    654 *     │                                        │
    655 *     │     Runtime (MainThread or Worker)     │
    656 *     └────────────────────────────────────────┘
    657 *
    658 *
    659 * The data representing an async wait is divided between the JSContext in which
    660 * it was created and the SharedArrayBuffer being waited on. There are three
    661 * potential components:
    662 *
    663 * A) The AsyncFutexWaiter itself (shared by the SharedArrayRawBuffer,
    664 *    WaitAsyncNotifyTask, and WaitAsyncTimeoutTask if it exists). It
    665 *    will be cleaned up manually.
    666 * B) The corresponding WaitAsyncNotifyTask (owned by the JS::Context). It
    667 *    destroys itself on run.
    668 * C) The WaitAsyncTimeoutTask (owned by the embedding's job queue). It
    669 *    destroys itself on run.
    670 *
    671 * WaitAsyncNotifyTask and WaitAsyncTimeoutTask (if it exists) delete
    672 * themselves. When either task is run or destroyed, they also trigger the
    673 * destruction and unlinking of the AsyncFutexWaiter. There are
    674 * four scenarios:
    675 *
    676 * 1. A call to `Atomics.notify` notifies the waiter (atomics_notify_impl)
    677 *    from another thread.
    678 *    A) The async waiter is removed from the list.
    679 *    B) The notify task is removed from OffThreadPromiseRuntimeState's
    680 *       cancelable list and is dispatched to resolve the promise with "ok".
    681 *       The task then destroys itself.
    682 *    C) The WaitAsyncTimeoutTask is disabled. It will fire and do nothing.
    683 *       See AsyncFutexWaiter::maybeClearTimeout in atomics_notify_impl.
    684 *    D) The async waiter is destroyed.
    685 *
    686 * 2. A call to `Atomics.notify` notifies the waiter (atomics_notify_impl)
    687 *    from the same thread.
    688 *    A) The async waiter is removed from the list.
    689 *    B) The notify task is cancelled. The promise is extracted and resolved
    690 *        directly.
    691 *    C) The WaitAsyncTimeoutTask is disabled. It will fire and do nothing.
    692 *       See AsyncFutexWaiter::maybeClearTimeout in atomics_notify_impl.
    693 *    D) The async waiter is destroyed.
    694 *
    695 * 3. The timeout expires without notification (WaitAsyncTimeoutTask::run)
    696 *    A) The async waiter is removed from the list.
    697 *    B) The notify task is dispatched to resolve the promise with "timed-out"
    698 *       and destroys itself..
    699 *    C) The timeout task is running and will be destroyed when it's done.
    700 *    D) The async waiter is destroyed.
    701 *
    702 * 4. The context is destroyed (OffThreadPromiseRuntimeState::shutdown):
    703 *    A) The async waiter is removed and destroyed by
    704 *       WaitAsyncNotifyTask::prepareForCancel.
    705 *    B) The notify task is cancelled and destroyed by
    706 *       OffThreadPromiseRuntimeState::shutdown.
    707 *    C) The WaitAsyncTimeoutTask is disabled.
    708 *       See AsyncFutexWaiter::maybeClearTimeout in prepareForCancel.
    709 *
    710 * 5. The SharedArrayBuffer is collected by the GC (~FutexWaiterListHead)
    711 *    A) Async waiters without timeouts can no longer resolve. They are removed.
    712 *    B) If no timeout task exists, the notify task is dispatched and
    713 *       destroys itself, without resolving the promise.
    714 *    C) If there is an enqueued timeout, the waiter can still be resolved.
    715 *       In this case it will not be destroyed until it times out.
    716 *
    717 * The UniquePtr can be thought of as a "runnable handle" that gives exclusive
    718 * access to executing a runnable by a given owner. The runnable will still
    719 * delete itself (via js_delete, see OffThreadPromiseRuntimeState.cpp
    720 * implementation of OffThreadPromiseTask::run). If somehow the UniquePtr is not
    721 * passed to embedding code that will run the code, the task is released from
    722 * the pointer. We then use the list of raw pointers in
    723 * OffThreadPromiseRuntimeState's cancellable and dead lists are used to
    724 * identify which were never dispatched, and which failed to dispatch, and clear
    725 * them when the engine has an opportunity to do so (i.e. shutdown).
    726 */
    727 
    728 class WaitAsyncNotifyTask;
    729 class WaitAsyncTimeoutTask;
    730 
    731 class AutoLockFutexAPI {
    732  // We have to wrap this in a Maybe because of the way loading
    733  // mozilla::Atomic pointers works.
    734  mozilla::Maybe<js::UniqueLock<js::Mutex>> unique_;
    735 
    736 public:
    737  AutoLockFutexAPI() {
    738    js::Mutex* lock = FutexThread::lock_;
    739    unique_.emplace(*lock);
    740  }
    741 
    742  ~AutoLockFutexAPI() { unique_.reset(); }
    743 
    744  js::UniqueLock<js::Mutex>& unique() { return *unique_; }
    745 };
    746 
    747 // Represents one waiter. This is the abstract base class for SyncFutexWaiter
    748 // and AsyncFutexWaiter.
    749 class FutexWaiter : public FutexWaiterListNode {
    750 protected:
    751  FutexWaiter(JSContext* cx, size_t offset, FutexWaiterKind kind)
    752      : FutexWaiterListNode(kind), offset_(offset), cx_(cx) {}
    753 
    754  size_t offset_;  // Element index within the SharedArrayBuffer
    755  JSContext* cx_;  // The thread that called `wait` or `waitAsync`.
    756 
    757 public:
    758  bool isSync() const { return kind_ == FutexWaiterKind::Sync; }
    759  SyncFutexWaiter* asSync() {
    760    MOZ_ASSERT(isSync());
    761    return reinterpret_cast<SyncFutexWaiter*>(this);
    762  }
    763 
    764  bool isAsync() const { return kind_ == FutexWaiterKind::Async; }
    765  AsyncFutexWaiter* asAsync() {
    766    MOZ_ASSERT(isAsync());
    767    return reinterpret_cast<AsyncFutexWaiter*>(this);
    768  }
    769  size_t offset() const { return offset_; }
    770  JSContext* cx() { return cx_; }
    771 };
    772 
    773 // Represents a worker blocked while calling |Atomics.wait|.
    774 // Instances of js::SyncFutexWaiter are stack-allocated and linked
    775 // onto the waiter list across a call to FutexThread::wait().
    776 // When this waiter is notified, the worker will resume execution.
    777 class MOZ_STACK_CLASS SyncFutexWaiter : public FutexWaiter {
    778 public:
    779  SyncFutexWaiter(JSContext* cx, size_t offset)
    780      : FutexWaiter(cx, offset, FutexWaiterKind::Sync) {}
    781 };
    782 
    783 // Represents a waiter asynchronously waiting after calling |Atomics.waitAsync|.
    784 // Instances of js::AsyncFutexWaiter are heap-allocated.
    785 // When this waiter is notified, the promise it holds will be resolved.
    786 class AsyncFutexWaiter : public FutexWaiter {
    787 public:
    788  AsyncFutexWaiter(JSContext* cx, size_t offset)
    789      : FutexWaiter(cx, offset, FutexWaiterKind::Async) {}
    790 
    791  // NOTE: AsyncFutexWaiter is deleted only by UniquePtr<AsyncFutexWaiter>,
    792  //       and thus the destructor is not virtual.
    793  ~AsyncFutexWaiter();
    794 
    795  WaitAsyncNotifyTask* notifyTask() { return notifyTask_; }
    796 
    797  void setNotifyTask(WaitAsyncNotifyTask* task) {
    798    MOZ_ASSERT(!notifyTask_);
    799    notifyTask_ = task;
    800  }
    801 
    802  void resetNotifyTask() { notifyTask_ = nullptr; }
    803 
    804  void setTimeoutTask(WaitAsyncTimeoutTask* task) {
    805    MOZ_ASSERT(!timeoutTask_);
    806    timeoutTask_ = task;
    807  }
    808 
    809  void resetTimeoutTask() { timeoutTask_ = nullptr; }
    810 
    811  bool hasTimeout() const { return !!timeoutTask_; }
    812  WaitAsyncTimeoutTask* timeoutTask() const { return timeoutTask_; }
    813 
    814  void maybeClearTimeout(AutoLockFutexAPI& lock);
    815 
    816 private:
    817  // Both of these pointers are borrowed pointers. The notifyTask is owned by
    818  // the runtime's cancellable list, while the timeout task (if it exists) is
    819  // owned by the embedding's timeout manager.
    820  //
    821  // Set by setNotifyTask immediately after construction, and reset by
    822  // resetNotifyTask when the notify task is getting deleted.
    823  // WaitAsyncNotifyTask is responsible for calling resetNotifyTask
    824  WaitAsyncNotifyTask* notifyTask_ = nullptr;
    825 
    826  // Set by setTimeoutTask immediately after construction, and reset by
    827  // resetTimeoutTask when the timeout task is getting deleted.
    828  // WaitAsyncTimeoutTask is responsible for calling resetTimeoutTask
    829  WaitAsyncTimeoutTask* timeoutTask_ = nullptr;
    830 };
    831 
    832 // When an async waiter from a different context is notified, this
    833 // task is queued to resolve the promise on the thread to which it
    834 // belongs.
    835 //
    836 // WaitAsyncNotifyTask (derived from OffThreadPromiseTask) is wrapping that
    837 // promise. `Atomics.notify` can be called from any thread, but the promise must
    838 // be resolved on the thread that owns it. To resolve the promise, we dispatch
    839 // the task to enqueue a promise resolution task in the target's event
    840 // loop.
    841 //
    842 // See [SMDOC] Atomics.wait for more details.
    843 class WaitAsyncNotifyTask : public OffThreadPromiseTask {
    844 public:
    845  enum class Result { Ok, TimedOut, Dead };
    846 
    847 private:
    848  Result result_ = Result::Ok;
    849 
    850  // A back-edge to the waiter so that it can be cleaned up when the
    851  // Notify Task is dispatched and destroyed.
    852  //
    853  // Set by setWaiter immediately after construction, and reset by resetWaiter
    854  // when the waiter is getting deleted.  AsyncFutexWaiter is responsible for
    855  // calling resetWaiter.
    856  AsyncFutexWaiter* waiter_ = nullptr;
    857 
    858 public:
    859  WaitAsyncNotifyTask(JSContext* cx, Handle<PromiseObject*> promise)
    860      : OffThreadPromiseTask(cx, promise) {}
    861 
    862  ~WaitAsyncNotifyTask() override {
    863    if (waiter_) {
    864      waiter_->resetNotifyTask();
    865    }
    866  }
    867 
    868  void setWaiter(AsyncFutexWaiter* waiter) {
    869    MOZ_ASSERT(!waiter_);
    870    waiter_ = waiter;
    871  }
    872  void resetWaiter() { waiter_ = nullptr; }
    873 
    874  void setResult(Result result, AutoLockFutexAPI& lock) { result_ = result; }
    875 
    876  bool resolve(JSContext* cx, Handle<PromiseObject*> promise) override {
    877    RootedValue resultMsg(cx);
    878    switch (result_) {
    879      case Result::Ok:
    880        resultMsg = StringValue(cx->names().ok);
    881        break;
    882      case Result::TimedOut:
    883        resultMsg = StringValue(cx->names().timed_out_);
    884        break;
    885      case Result::Dead:
    886        // The underlying SharedArrayBuffer is no longer reachable, and no
    887        // timeout is associated with this waiter. The promise will never
    888        // resolve. There's nothing to do here.
    889        return true;
    890    }
    891    return PromiseObject::resolve(cx, promise, resultMsg);
    892  }
    893 
    894  void prepareForCancel() override;
    895 };
    896 
    897 // WaitAsyncNotifyTask (derived from OffThreadPromiseTask) is wrapping that
    898 // promise. `notify` can be called from any thread, but the promise must be
    899 // resolved on the thread that owns it. To resolve the promise, we dispatch
    900 // the task to enqueue a promise resolution task in the target's event
    901 // loop.
    902 //
    903 // See [SMDOC] Atomics.wait for more details.
    904 class WaitAsyncTimeoutTask : public JS::Dispatchable {
    905  // Set by the constructor, and reset by resetWaiter when the waiter is getting
    906  // deleted. AsyncFutexWaiter is responsible for calling resetWaiter.
    907  AsyncFutexWaiter* waiter_;
    908 
    909 public:
    910  explicit WaitAsyncTimeoutTask(AsyncFutexWaiter* waiter) : waiter_(waiter) {
    911    MOZ_ASSERT(waiter_);
    912  }
    913  ~WaitAsyncTimeoutTask() {
    914    if (waiter_) {
    915      waiter_->resetTimeoutTask();
    916    }
    917  }
    918 
    919  void resetWaiter() { waiter_ = nullptr; }
    920 
    921  void clear(AutoLockFutexAPI&) {
    922    if (waiter_) {
    923      waiter_->resetTimeoutTask();
    924    }
    925    waiter_ = nullptr;
    926  }
    927  bool cleared(AutoLockFutexAPI&) { return !waiter_; }
    928 
    929  void run(JSContext*, MaybeShuttingDown maybeshuttingdown) final;
    930  void transferToRuntime() final;
    931 };
    932 
    933 AsyncFutexWaiter::~AsyncFutexWaiter() {
    934  if (notifyTask_) {
    935    notifyTask_->resetWaiter();
    936  }
    937  if (timeoutTask_) {
    938    timeoutTask_->resetWaiter();
    939  }
    940 }
    941 
    942 }  // namespace js
    943 
    944 // https://tc39.es/ecma262/#sec-addwaiter
    945 static void AddWaiter(SharedArrayRawBuffer* sarb, FutexWaiter* node,
    946                      AutoLockFutexAPI&) {
    947  FutexWaiterListNode* listHead = sarb->waiters();
    948 
    949  // Step 3: Append waiterRecord to WL.[[Waiters]].
    950  node->setNext(listHead);
    951  node->setPrev(listHead->prev());
    952  listHead->prev()->setNext(node);
    953  listHead->setPrev(node);
    954 }
    955 
    956 // https://tc39.es/ecma262/#sec-removewaiter
    957 static void RemoveWaiterImpl(FutexWaiterListNode* node, AutoLockFutexAPI&) {
    958  if (!node->prev()) {
    959    MOZ_ASSERT(!node->next());
    960    return;
    961  }
    962 
    963  node->prev()->setNext(node->next());
    964  node->next()->setPrev(node->prev());
    965 
    966  node->setNext(nullptr);
    967  node->setPrev(nullptr);
    968 }
    969 
    970 // Sync waiters are stack allocated and can simply be removed from the list.
    971 static void RemoveSyncWaiter(SyncFutexWaiter* waiter, AutoLockFutexAPI& lock) {
    972  RemoveWaiterImpl(waiter, lock);
    973 }
    974 
    975 // Async waiters are heap allocated. After removing the waiter, the caller
    976 // is responsible for freeing it. Return the waiter to help enforce this.
    977 [[nodiscard]] AsyncFutexWaiter* RemoveAsyncWaiter(AsyncFutexWaiter* waiter,
    978                                                  AutoLockFutexAPI& lock) {
    979  RemoveWaiterImpl(waiter, lock);
    980  return waiter;
    981 }
    982 
    983 FutexWaiterListHead::~FutexWaiterListHead() {
    984  // Cleanup steps from 5. in SMDOC for Atomics.waitAsync
    985  // When a SharedArrayRawBuffer is no longer reachable, the contents of its
    986  // waiters list can no longer be notified. However, they can still resolve if
    987  // they have an associated timeout. When the list head goes away, we walk
    988  // through the remaining waiters and clean up the ones that don't have
    989  // timeouts. We leave the remaining waiters in a free-floating linked list;
    990  // they will remove themselves as the timeouts fire or the associated runtime
    991  // shuts down.
    992  AutoLockHelperThreadState helperLock;
    993  AutoLockFutexAPI lock;
    994 
    995  FutexWaiterListNode* iter = next();
    996  while (iter && iter != this) {
    997    // All remaining FutexWaiters must be async. A sync waiter can only exist if
    998    // a thread is waiting, and that thread must have a reference to the shared
    999    // array buffer it's waiting on, so that buffer can't be freed.
   1000 
   1001    AsyncFutexWaiter* removedWaiter =
   1002        RemoveAsyncWaiter(iter->toWaiter()->asAsync(), lock);
   1003    iter = iter->next();
   1004 
   1005    if (removedWaiter->hasTimeout() &&
   1006        !removedWaiter->timeoutTask()->cleared(lock)) {
   1007      // If a timeout task exists,  allow it to clean up the notify task when it
   1008      // runs. See the comment in WaitAsyncTimeoutTask::run() or the the SMDOC
   1009      // in this file.
   1010      continue;
   1011    }
   1012    // In the case that a timeout task does not exist, the two live raw
   1013    // pointers at this point are WaitAsyncNotifyTask and the
   1014    // AsyncFutexWaiter. We can clean them up here as there is no way to
   1015    // notify them without the SAB or without waiting for the shutdown of the
   1016    // JS::Context. In order to do this, we store the removed waiter in a
   1017    // unique ptr, so that it is cleared after this function, and dispatch and
   1018    // destroy the notify task.
   1019    UniquePtr<AsyncFutexWaiter> ownedWaiter(removedWaiter);
   1020    WaitAsyncNotifyTask* task = ownedWaiter->notifyTask();
   1021    task->setResult(WaitAsyncNotifyTask::Result::Dead, lock);
   1022    task->removeFromCancellableListAndDispatch(helperLock);
   1023  }
   1024 
   1025  RemoveWaiterImpl(this, lock);
   1026 }
   1027 
   1028 // Creates an object to use as the return value of Atomics.waitAsync.
   1029 static PlainObject* CreateAsyncResultObject(JSContext* cx, bool async,
   1030                                            HandleValue promiseOrString) {
   1031  Rooted<PlainObject*> resultObject(cx, NewPlainObject(cx));
   1032  if (!resultObject) {
   1033    return nullptr;
   1034  }
   1035 
   1036  RootedValue isAsync(cx, BooleanValue(async));
   1037  if (!NativeDefineDataProperty(cx, resultObject, cx->names().async, isAsync,
   1038                                JSPROP_ENUMERATE)) {
   1039    return nullptr;
   1040  }
   1041 
   1042  MOZ_ASSERT_IF(!async, promiseOrString.isString());
   1043  MOZ_ASSERT_IF(async, promiseOrString.isObject() &&
   1044                           promiseOrString.toObject().is<PromiseObject>());
   1045  if (!NativeDefineDataProperty(cx, resultObject, cx->names().value,
   1046                                promiseOrString, JSPROP_ENUMERATE)) {
   1047    return nullptr;
   1048  }
   1049 
   1050  return resultObject;
   1051 }
   1052 
   1053 void WaitAsyncNotifyTask::prepareForCancel() {
   1054  AutoLockFutexAPI lock;
   1055  UniquePtr<AsyncFutexWaiter> waiter(RemoveAsyncWaiter(waiter_, lock));
   1056  waiter->maybeClearTimeout(lock);
   1057 }
   1058 
   1059 void WaitAsyncTimeoutTask::run(JSContext* cx,
   1060                               MaybeShuttingDown maybeShuttingDown) {
   1061  AutoLockHelperThreadState helperLock;
   1062  AutoLockFutexAPI lock;
   1063 
   1064  // If the waiter was notified while this task was enqueued, do nothing.
   1065  if (cleared(lock)) {
   1066    js_delete(this);
   1067    return;
   1068  }
   1069 
   1070  // Cleanup steps from 3. and 5. lifecycle in SMDOC for Atomics.waitAsync
   1071  // Take ownership of the async waiter, so that it will be freed
   1072  // when we return.
   1073  UniquePtr<AsyncFutexWaiter> asyncWaiter(RemoveAsyncWaiter(waiter_, lock));
   1074  asyncWaiter->resetTimeoutTask();
   1075 
   1076  // Dispatch a task to resolve the promise with value "timed-out".
   1077  WaitAsyncNotifyTask* task = asyncWaiter->notifyTask();
   1078  task->setResult(WaitAsyncNotifyTask::Result::TimedOut, lock);
   1079  task->removeFromCancellableListAndDispatch(helperLock);
   1080  js_delete(this);
   1081 }
   1082 
   1083 void WaitAsyncTimeoutTask::transferToRuntime() {
   1084  // Clear and delete. Clearing this task will result in the cancellable
   1085  // notify task being cleaned up on shutdown, as it can no longer be triggered.
   1086  // In as sense, the task "transfered" for cleanup is the notify task.
   1087  {
   1088    AutoLockFutexAPI lock;
   1089    clear(lock);
   1090  }
   1091  // As we are not managing any state, the runtime is not tracking this task,
   1092  // and we have nothing to run, we can delete.
   1093  js_delete(this);
   1094 }
   1095 
   1096 void AsyncFutexWaiter::maybeClearTimeout(AutoLockFutexAPI& lock) {
   1097  if (timeoutTask_) {
   1098    timeoutTask_->clear(lock);
   1099    timeoutTask_ = nullptr;
   1100  }
   1101 }
   1102 
   1103 // DoWait Steps 17-31
   1104 // https://tc39.es/ecma262/#sec-dowait
   1105 template <typename T>
   1106 static FutexThread::WaitResult AtomicsWaitAsyncCriticalSection(
   1107    JSContext* cx, SharedArrayRawBuffer* sarb, size_t byteOffset, T value,
   1108    const mozilla::Maybe<mozilla::TimeDuration>& timeout,
   1109    Handle<PromiseObject*> promise) {
   1110  // Step 17: Enter critical section.
   1111  // We need to initialize an OffThreadPromiseTask inside this critical section.
   1112  // To avoid deadlock, we claim the helper thread lock first.
   1113  AutoLockHelperThreadState helperThreadLock;
   1114 
   1115  UniquePtr<WaitAsyncTimeoutTask> timeoutTask;
   1116  {
   1117    AutoLockFutexAPI futexLock;
   1118 
   1119    // Steps 18-20:
   1120    SharedMem<T*> addr =
   1121        sarb->dataPointerShared().cast<T*>() + (byteOffset / sizeof(T));
   1122    if (jit::AtomicOperations::loadSafeWhenRacy(addr) != value) {
   1123      return FutexThread::WaitResult::NotEqual;
   1124    }
   1125 
   1126    // Step 21
   1127    bool hasTimeout = timeout.isSome();
   1128    if (hasTimeout && timeout.value().IsZero()) {
   1129      return FutexThread::WaitResult::TimedOut;
   1130    }
   1131 
   1132    // Steps 22-30
   1133    // To handle potential failures, we split this up into two phases:
   1134    // First, we allocate everything: the notify task, the waiter, and
   1135    // (if necessary) the timeout task. The allocations are managed
   1136    // using unique pointers, which will free them on failure. This
   1137    // phase has no external side-effects.
   1138 
   1139    // Second, we transfer ownership of the allocations to the right places:
   1140    // the waiter owns the notify task, the shared array buffer owns the waiter,
   1141    // and the event loop owns the timeout task. This phase is infallible.
   1142    auto notifyTask = js::MakeUnique<WaitAsyncNotifyTask>(cx, promise);
   1143    if (!notifyTask) {
   1144      JS_ReportOutOfMemory(cx);
   1145      return FutexThread::WaitResult::Error;
   1146    }
   1147    auto waiter = js::MakeUnique<AsyncFutexWaiter>(cx, byteOffset);
   1148    if (!waiter) {
   1149      JS_ReportOutOfMemory(cx);
   1150      return FutexThread::WaitResult::Error;
   1151    }
   1152 
   1153    notifyTask->setWaiter(waiter.get());
   1154    waiter->setNotifyTask(notifyTask.get());
   1155 
   1156    if (hasTimeout) {
   1157      timeoutTask = js::MakeUnique<WaitAsyncTimeoutTask>(waiter.get());
   1158      if (!timeoutTask) {
   1159        JS_ReportOutOfMemory(cx);
   1160        return FutexThread::WaitResult::Error;
   1161      }
   1162      waiter->setTimeoutTask(timeoutTask.get());
   1163    }
   1164 
   1165    // This is the last fallible operation. If it fails, all allocations
   1166    // will be freed. init has no side-effects if it fails.
   1167    if (!js::OffThreadPromiseTask::InitCancellable(cx, helperThreadLock,
   1168                                                   std::move(notifyTask))) {
   1169      return FutexThread::WaitResult::Error;
   1170    }
   1171 
   1172    // Below this point, everything is infallible.
   1173    AddWaiter(sarb, waiter.release(), futexLock);
   1174  }  // End of futexLock critical section
   1175 
   1176  // We dispatch the task after leaving the critical section to avoid
   1177  // potential deadlock if the dispatch callback has internal locking.
   1178  // See bug 1980271.
   1179  if (timeoutTask) {
   1180    OffThreadPromiseRuntimeState& state =
   1181        cx->runtime()->offThreadPromiseState.ref();
   1182    // We are not tracking the dispatch of the timeout task using the
   1183    // OffThreadPromiseRuntimeState, so we ignore the return value. If this
   1184    // fails, the embeddings should call transferToRuntime on timeoutTask
   1185    // which will clear itself, and set the notify task to be cleaned on
   1186    // shutdown.
   1187    (void)state.delayedDispatchToEventLoop(std::move(timeoutTask),
   1188                                           timeout.value().ToMilliseconds());
   1189  }
   1190 
   1191  // Step 31: Leave critical section.
   1192  return FutexThread::WaitResult::OK;
   1193 }
   1194 
   1195 // DoWait steps 12-35
   1196 // https://tc39.es/ecma262/#sec-dowait
   1197 // This implements the mode=ASYNC case.
   1198 template <typename T>
   1199 static PlainObject* AtomicsWaitAsync(
   1200    JSContext* cx, SharedArrayRawBuffer* sarb, size_t byteOffset, T value,
   1201    const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
   1202  // Step 16a.
   1203  Rooted<PromiseObject*> promiseObject(
   1204      cx, CreatePromiseObjectWithoutResolutionFunctions(cx));
   1205  if (!promiseObject) {
   1206    return nullptr;
   1207  }
   1208 
   1209  // Steps 17-31
   1210  switch (AtomicsWaitAsyncCriticalSection(cx, sarb, byteOffset, value, timeout,
   1211                                          promiseObject)) {
   1212    case FutexThread::WaitResult::NotEqual: {
   1213      // Steps 16b, 20c-e
   1214      RootedValue msg(cx, StringValue(cx->names().not_equal_));
   1215      return CreateAsyncResultObject(cx, false, msg);
   1216    }
   1217    case FutexThread::WaitResult::TimedOut: {
   1218      // Steps 16b, 21c-e
   1219      RootedValue msg(cx, StringValue(cx->names().timed_out_));
   1220      return CreateAsyncResultObject(cx, false, msg);
   1221    }
   1222    case FutexThread::WaitResult::Error:
   1223      return nullptr;
   1224    case FutexThread::WaitResult::OK:
   1225      break;
   1226  }
   1227 
   1228  // Steps 15b, 33-35
   1229  RootedValue objectValue(cx, ObjectValue(*promiseObject));
   1230  return CreateAsyncResultObject(cx, true, objectValue);
   1231 }
   1232 
   1233 // DoWait steps 12-32
   1234 // https://tc39.es/ecma262/#sec-dowait
   1235 // This implements the mode=SYNC case.
   1236 template <typename T>
   1237 static FutexThread::WaitResult AtomicsWait(
   1238    JSContext* cx, SharedArrayRawBuffer* sarb, size_t byteOffset, T value,
   1239    const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
   1240  // Validation and other guards should ensure that this does not happen.
   1241  MOZ_ASSERT(sarb, "wait is only applicable to shared memory");
   1242 
   1243  SharedMem<T*> addr =
   1244      sarb->dataPointerShared().cast<T*>() + (byteOffset / sizeof(T));
   1245 
   1246  // Steps 17 and 31 (through destructor).
   1247  // This lock also protects the "waiters" field on SharedArrayRawBuffer,
   1248  // and it provides the necessary memory fence.
   1249  AutoLockFutexAPI lock;
   1250 
   1251  // Steps 18-20.
   1252  if (jit::AtomicOperations::loadSafeWhenRacy(addr) != value) {
   1253    return FutexThread::WaitResult::NotEqual;
   1254  }
   1255 
   1256  // Steps 14, 22-27
   1257  SyncFutexWaiter w(cx, byteOffset);
   1258 
   1259  // Steps 28-29
   1260  AddWaiter(sarb, &w, lock);
   1261  FutexThread::WaitResult retval = cx->fx.wait(cx, lock.unique(), timeout);
   1262  RemoveSyncWaiter(&w, lock);
   1263 
   1264  // Step 32
   1265  return retval;
   1266 }
   1267 
   1268 FutexThread::WaitResult js::atomics_wait_impl(
   1269    JSContext* cx, SharedArrayRawBuffer* sarb, size_t byteOffset, int32_t value,
   1270    const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
   1271  return AtomicsWait(cx, sarb, byteOffset, value, timeout);
   1272 }
   1273 
   1274 FutexThread::WaitResult js::atomics_wait_impl(
   1275    JSContext* cx, SharedArrayRawBuffer* sarb, size_t byteOffset, int64_t value,
   1276    const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
   1277  return AtomicsWait(cx, sarb, byteOffset, value, timeout);
   1278 }
   1279 
   1280 PlainObject* js::atomics_wait_async_impl(
   1281    JSContext* cx, SharedArrayRawBuffer* sarb, size_t byteOffset, int32_t value,
   1282    const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
   1283  return AtomicsWaitAsync(cx, sarb, byteOffset, value, timeout);
   1284 }
   1285 
   1286 PlainObject* js::atomics_wait_async_impl(
   1287    JSContext* cx, SharedArrayRawBuffer* sarb, size_t byteOffset, int64_t value,
   1288    const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
   1289  return AtomicsWaitAsync(cx, sarb, byteOffset, value, timeout);
   1290 }
   1291 
   1292 // https://tc39.es/ecma262/#sec-dowait
   1293 // DoWait ( mode, typedArray, index, value, timeout ), steps 8-35.
   1294 template <typename T>
   1295 static bool DoAtomicsWait(JSContext* cx, bool isAsync,
   1296                          Handle<TypedArrayObject*> unwrappedTypedArray,
   1297                          size_t index, T value, HandleValue timeoutv,
   1298                          MutableHandleValue r) {
   1299  mozilla::Maybe<mozilla::TimeDuration> timeout;
   1300  if (!timeoutv.isUndefined()) {
   1301    // Step 8.
   1302    double timeout_ms;
   1303    if (!ToNumber(cx, timeoutv, &timeout_ms)) {
   1304      return false;
   1305    }
   1306 
   1307    // Step 9.
   1308    if (!std::isnan(timeout_ms)) {
   1309      if (timeout_ms < 0) {
   1310        timeout = mozilla::Some(mozilla::TimeDuration::FromSeconds(0.0));
   1311      } else if (!std::isinf(timeout_ms)) {
   1312        timeout =
   1313            mozilla::Some(mozilla::TimeDuration::FromMilliseconds(timeout_ms));
   1314      }
   1315    }
   1316  }
   1317 
   1318  // Step 10.
   1319  if (!isAsync && !cx->fx.canWait()) {
   1320    JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
   1321                              JSMSG_ATOMICS_WAIT_NOT_ALLOWED);
   1322    return false;
   1323  }
   1324 
   1325  // Step 11.
   1326  Rooted<SharedArrayBufferObject*> unwrappedSab(
   1327      cx, unwrappedTypedArray->bufferShared());
   1328 
   1329  // Step 12
   1330  mozilla::Maybe<size_t> offset = unwrappedTypedArray->byteOffset();
   1331  MOZ_ASSERT(
   1332      offset,
   1333      "offset can't become invalid because shared buffers can only grow");
   1334 
   1335  // Step 13.
   1336  // The computation will not overflow because range checks have been
   1337  // performed.
   1338  size_t byteIndexInBuffer = index * sizeof(T) + *offset;
   1339 
   1340  // Steps 14-35.
   1341  if (isAsync) {
   1342    PlainObject* resultObject = atomics_wait_async_impl(
   1343        cx, unwrappedSab->rawBufferObject(), byteIndexInBuffer, value, timeout);
   1344    if (!resultObject) {
   1345      return false;
   1346    }
   1347    r.setObject(*resultObject);
   1348    return true;
   1349  }
   1350 
   1351  switch (atomics_wait_impl(cx, unwrappedSab->rawBufferObject(),
   1352                            byteIndexInBuffer, value, timeout)) {
   1353    case FutexThread::WaitResult::NotEqual:
   1354      r.setString(cx->names().not_equal_);
   1355      return true;
   1356    case FutexThread::WaitResult::OK:
   1357      r.setString(cx->names().ok);
   1358      return true;
   1359    case FutexThread::WaitResult::TimedOut:
   1360      r.setString(cx->names().timed_out_);
   1361      return true;
   1362    case FutexThread::WaitResult::Error:
   1363      return false;
   1364    default:
   1365      MOZ_CRASH("Should not happen");
   1366  }
   1367 }
   1368 
   1369 // https://tc39.es/ecma262/#sec-dowait
   1370 // DoWait ( mode, typedArray, index, value, timeout )
   1371 static bool DoWait(JSContext* cx, bool isAsync, HandleValue objv,
   1372                   HandleValue index, HandleValue valv, HandleValue timeoutv,
   1373                   MutableHandleValue r) {
   1374  // Steps 1-2.
   1375  Rooted<TypedArrayObject*> unwrappedTypedArray(cx);
   1376  if (!ValidateIntegerTypedArray(cx, objv, true, AccessMode::Read,
   1377                                 &unwrappedTypedArray)) {
   1378    return false;
   1379  }
   1380  MOZ_ASSERT(unwrappedTypedArray->type() == Scalar::Int32 ||
   1381             unwrappedTypedArray->type() == Scalar::BigInt64);
   1382 
   1383  // Step 3
   1384  if (!unwrappedTypedArray->isSharedMemory()) {
   1385    return ReportBadArrayType(cx);
   1386  }
   1387 
   1388  // Step 4.
   1389  size_t intIndex;
   1390  if (!ValidateAtomicAccess(cx, unwrappedTypedArray, index, &intIndex)) {
   1391    return false;
   1392  }
   1393 
   1394  // Step 5
   1395  if (unwrappedTypedArray->type() == Scalar::Int32) {
   1396    // Step 7.
   1397    int32_t value;
   1398    if (!ToInt32(cx, valv, &value)) {
   1399      return false;
   1400    }
   1401 
   1402    // Steps 8-35.
   1403    return DoAtomicsWait(cx, isAsync, unwrappedTypedArray, intIndex, value,
   1404                         timeoutv, r);
   1405  }
   1406 
   1407  MOZ_ASSERT(unwrappedTypedArray->type() == Scalar::BigInt64);
   1408 
   1409  // Step 6.
   1410  RootedBigInt value(cx, ToBigInt(cx, valv));
   1411  if (!value) {
   1412    return false;
   1413  }
   1414 
   1415  // Steps 8-35.
   1416  return DoAtomicsWait(cx, isAsync, unwrappedTypedArray, intIndex,
   1417                       BigInt::toInt64(value), timeoutv, r);
   1418 }
   1419 
   1420 // 24.4.11 Atomics.wait ( typedArray, index, value, timeout )
   1421 // https://tc39.es/ecma262/#sec-atomics.wait
   1422 static bool atomics_wait(JSContext* cx, unsigned argc, Value* vp) {
   1423  CallArgs args = CallArgsFromVp(argc, vp);
   1424  HandleValue objv = args.get(0);
   1425  HandleValue index = args.get(1);
   1426  HandleValue valv = args.get(2);
   1427  HandleValue timeoutv = args.get(3);
   1428  MutableHandleValue r = args.rval();
   1429 
   1430  return DoWait(cx, /*isAsync = */ false, objv, index, valv, timeoutv, r);
   1431 }
   1432 
   1433 // Atomics.waitAsync ( typedArray, index, value, timeout )
   1434 // https://tc39.es/ecma262/#sec-atomics.waitasync
   1435 static bool atomics_wait_async(JSContext* cx, unsigned argc, Value* vp) {
   1436  CallArgs args = CallArgsFromVp(argc, vp);
   1437  HandleValue objv = args.get(0);
   1438  HandleValue index = args.get(1);
   1439  HandleValue valv = args.get(2);
   1440  HandleValue timeoutv = args.get(3);
   1441  MutableHandleValue r = args.rval();
   1442 
   1443  return DoWait(cx, /*isAsync = */ true, objv, index, valv, timeoutv, r);
   1444 }
   1445 
   1446 // Atomics.notify ( typedArray, index, count ), steps 8-13.
   1447 // https://tc39.es/ecma262/#sec-atomics.notify
   1448 bool js::atomics_notify_impl(JSContext* cx, SharedArrayRawBuffer* sarb,
   1449                             size_t byteOffset, int64_t count, int64_t* woken) {
   1450  MOZ_ASSERT(woken);
   1451 
   1452  // Validation should ensure this does not happen.
   1453  MOZ_ASSERT(sarb, "notify is only applicable to shared memory");
   1454 
   1455  // Step 8
   1456  *woken = 0;
   1457 
   1458  Rooted<GCVector<PromiseObject*>> promisesToResolve(
   1459      cx, GCVector<PromiseObject*>(cx));
   1460  {
   1461    // Steps 9, 12 (through destructor).
   1462    AutoLockHelperThreadState helperLock;
   1463    AutoLockFutexAPI lock;
   1464    // Steps 10-11
   1465    FutexWaiterListNode* waiterListHead = sarb->waiters();
   1466    FutexWaiterListNode* iter = waiterListHead->next();
   1467    while (count && iter != waiterListHead) {
   1468      FutexWaiter* waiter = iter->toWaiter();
   1469      iter = iter->next();
   1470      if (byteOffset != waiter->offset()) {
   1471        continue;
   1472      }
   1473      if (waiter->isSync()) {
   1474        // For sync waits, the context to notify is currently sleeping.
   1475        // We notify that context (unless it's already been notified by
   1476        // another thread).
   1477        if (!waiter->cx()->fx.isWaiting()) {
   1478          continue;
   1479        }
   1480        waiter->cx()->fx.notify(FutexThread::NotifyExplicit);
   1481      } else {
   1482        // For async waits, we resolve a promise.
   1483 
   1484        // Steps to clean up case 1. and 2. in SMDOC for Atomics.waitAsync
   1485        // Take ownership of the async waiter, so that it will be
   1486        // freed at the end of this block.
   1487        UniquePtr<AsyncFutexWaiter> asyncWaiter(
   1488            RemoveAsyncWaiter(waiter->asAsync(), lock));
   1489        asyncWaiter->maybeClearTimeout(lock);
   1490        // If we are notifying a waiter that was created by the current
   1491        // context, we resolve the promise directly instead of dispatching
   1492        // a task to the event loop.
   1493        OffThreadPromiseTask* task = asyncWaiter->notifyTask();
   1494        if (waiter->cx() == cx) {
   1495          // Add the promise to a list to resolve as soon as we've left the
   1496          // critical section.
   1497          PromiseObject* promise =
   1498              OffThreadPromiseTask::ExtractAndForget(task, helperLock);
   1499          if (!promisesToResolve.append(promise)) {
   1500            return false;
   1501          }
   1502        } else {
   1503          // Dispatch a task to resolve the promise with value "ok".
   1504          task->removeFromCancellableListAndDispatch(helperLock);
   1505        }
   1506      }
   1507      // Overflow will be a problem only in two cases:
   1508      // (1) 128-bit systems with substantially more than 2^64 bytes of
   1509      //     memory per process, and a very lightweight
   1510      //     Atomics.waitAsync().  Obviously a future problem.
   1511      // (2) Bugs.
   1512      MOZ_RELEASE_ASSERT(*woken < INT64_MAX);
   1513      (*woken)++;
   1514      if (count > 0) {
   1515        --count;
   1516      }
   1517    }
   1518  }
   1519 
   1520  // Step 10 (reordered)
   1521  // We resolve same-thread promises after we've left the critical section to
   1522  // avoid mutex ordering problems.
   1523  RootedValue resultMsg(cx, StringValue(cx->names().ok));
   1524  for (uint32_t i = 0; i < promisesToResolve.length(); i++) {
   1525    if (!PromiseObject::resolve(cx, promisesToResolve[i], resultMsg)) {
   1526      MOZ_ASSERT(cx->isThrowingOutOfMemory() || cx->isThrowingOverRecursed());
   1527      return false;
   1528    }
   1529  }
   1530 
   1531  // Step 13.
   1532  return true;
   1533 }
   1534 
   1535 // ES2021 draft rev bd868f20b8c574ad6689fba014b62a1dba819e56
   1536 // 24.4.12 Atomics.notify ( typedArray, index, count )
   1537 static bool atomics_notify(JSContext* cx, unsigned argc, Value* vp) {
   1538  CallArgs args = CallArgsFromVp(argc, vp);
   1539  HandleValue objv = args.get(0);
   1540  HandleValue index = args.get(1);
   1541  HandleValue countv = args.get(2);
   1542  MutableHandleValue r = args.rval();
   1543 
   1544  // Step 1.
   1545  Rooted<TypedArrayObject*> unwrappedTypedArray(cx);
   1546  if (!ValidateIntegerTypedArray(cx, objv, true, AccessMode::Read,
   1547                                 &unwrappedTypedArray)) {
   1548    return false;
   1549  }
   1550  MOZ_ASSERT(unwrappedTypedArray->type() == Scalar::Int32 ||
   1551             unwrappedTypedArray->type() == Scalar::BigInt64);
   1552 
   1553  // Step 2.
   1554  size_t intIndex;
   1555  if (!ValidateAtomicAccess(cx, unwrappedTypedArray, index, &intIndex)) {
   1556    return false;
   1557  }
   1558 
   1559  // Steps 3-4.
   1560  int64_t count;
   1561  if (countv.isUndefined()) {
   1562    count = -1;
   1563  } else {
   1564    double dcount;
   1565    if (!ToInteger(cx, countv, &dcount)) {
   1566      return false;
   1567    }
   1568    if (dcount < 0.0) {
   1569      dcount = 0.0;
   1570    }
   1571    count = dcount < double(1ULL << 63) ? int64_t(dcount) : -1;
   1572  }
   1573 
   1574  // https://github.com/tc39/ecma262/pull/1908
   1575  if (!unwrappedTypedArray->isSharedMemory()) {
   1576    r.setInt32(0);
   1577    return true;
   1578  }
   1579 
   1580  // Step 5.
   1581  Rooted<SharedArrayBufferObject*> unwrappedSab(
   1582      cx, unwrappedTypedArray->bufferShared());
   1583 
   1584  // Step 6.
   1585  mozilla::Maybe<size_t> offset = unwrappedTypedArray->byteOffset();
   1586  MOZ_ASSERT(
   1587      offset,
   1588      "offset can't become invalid because shared buffers can only grow");
   1589 
   1590  // Steps 7-9.
   1591  // The computation will not overflow because range checks have been
   1592  // performed.
   1593  size_t elementSize = Scalar::byteSize(unwrappedTypedArray->type());
   1594  size_t indexedPosition = intIndex * elementSize + *offset;
   1595 
   1596  // Steps 10-16.
   1597 
   1598  int64_t woken = 0;
   1599  if (!atomics_notify_impl(cx, unwrappedSab->rawBufferObject(), indexedPosition,
   1600                           count, &woken)) {
   1601    return false;
   1602  }
   1603 
   1604  r.setNumber(double(woken));
   1605 
   1606  return true;
   1607 }
   1608 
   1609 /**
   1610 * Atomics.pause ( [ N ] )
   1611 *
   1612 * https://tc39.es/proposal-atomics-microwait/
   1613 */
   1614 static bool atomics_pause(JSContext* cx, unsigned argc, Value* vp) {
   1615  CallArgs args = CallArgsFromVp(argc, vp);
   1616 
   1617  // Step 1.
   1618  if (args.hasDefined(0)) {
   1619    if (!args[0].isNumber() || !IsInteger(args[0].toNumber())) {
   1620      JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
   1621                                JSMSG_ATOMICS_PAUSE_BAD_COUNT);
   1622      return false;
   1623    }
   1624  }
   1625 
   1626  // Step 2.
   1627  //
   1628  // We ignore the iteration count when not inlining this operation.
   1629  jit::AtomicOperations::pause();
   1630 
   1631  // Step 3.
   1632  args.rval().setUndefined();
   1633  return true;
   1634 }
   1635 
   1636 /* static */
   1637 bool js::FutexThread::initialize() {
   1638  MOZ_ASSERT(!lock_);
   1639  lock_ = js_new<js::Mutex>(mutexid::FutexThread);
   1640  return lock_ != nullptr;
   1641 }
   1642 
   1643 /* static */
   1644 void js::FutexThread::destroy() {
   1645  if (lock_) {
   1646    js::Mutex* lock = lock_;
   1647    js_delete(lock);
   1648    lock_ = nullptr;
   1649  }
   1650 }
   1651 
   1652 /* static */
   1653 void js::FutexThread::lock() {
   1654  // Load the atomic pointer.
   1655  js::Mutex* lock = lock_;
   1656 
   1657  lock->lock();
   1658 }
   1659 
   1660 /* static */ mozilla::Atomic<js::Mutex*, mozilla::SequentiallyConsistent>
   1661    FutexThread::lock_;
   1662 
   1663 /* static */
   1664 void js::FutexThread::unlock() {
   1665  // Load the atomic pointer.
   1666  js::Mutex* lock = lock_;
   1667 
   1668  lock->unlock();
   1669 }
   1670 
   1671 js::FutexThread::FutexThread()
   1672    : cond_(nullptr), state_(Idle), canWait_(false) {}
   1673 
   1674 bool js::FutexThread::initInstance() {
   1675  MOZ_ASSERT(lock_);
   1676  cond_ = js_new<js::ConditionVariable>();
   1677  return cond_ != nullptr;
   1678 }
   1679 
   1680 void js::FutexThread::destroyInstance() {
   1681  if (cond_) {
   1682    js_delete(cond_);
   1683  }
   1684 }
   1685 
   1686 bool js::FutexThread::isWaiting() {
   1687  // When a worker is awoken for an interrupt it goes into state
   1688  // WaitingNotifiedForInterrupt for a short time before it actually
   1689  // wakes up and goes into WaitingInterrupted.  In those states the
   1690  // worker is still waiting, and if an explicit notify arrives the
   1691  // worker transitions to Woken.  See further comments in
   1692  // FutexThread::wait().
   1693  return state_ == Waiting || state_ == WaitingInterrupted ||
   1694         state_ == WaitingNotifiedForInterrupt;
   1695 }
   1696 
   1697 FutexThread::WaitResult js::FutexThread::wait(
   1698    JSContext* cx, js::UniqueLock<js::Mutex>& locked,
   1699    const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
   1700  MOZ_ASSERT(&cx->fx == this);
   1701  MOZ_ASSERT(cx->fx.canWait());
   1702  MOZ_ASSERT(state_ == Idle || state_ == WaitingInterrupted);
   1703 
   1704  // Disallow waiting when a runtime is processing an interrupt.
   1705  // See explanation below.
   1706 
   1707  if (state_ == WaitingInterrupted) {
   1708    UnlockGuard unlock(locked);
   1709    JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
   1710                              JSMSG_ATOMICS_WAIT_NOT_ALLOWED);
   1711    return WaitResult::Error;
   1712  }
   1713 
   1714  // Go back to Idle after returning.
   1715  auto onFinish = mozilla::MakeScopeExit([&] { state_ = Idle; });
   1716 
   1717  const bool isTimed = timeout.isSome();
   1718 
   1719  auto finalEnd = timeout.map([](const mozilla::TimeDuration& timeout) {
   1720    return mozilla::TimeStamp::Now() + timeout;
   1721  });
   1722 
   1723  // 4000s is about the longest timeout slice that is guaranteed to
   1724  // work cross-platform.
   1725  auto maxSlice = mozilla::TimeDuration::FromSeconds(4000.0);
   1726 
   1727  for (;;) {
   1728    // If we are doing a timed wait, calculate the end time for this wait
   1729    // slice.
   1730    auto sliceEnd = finalEnd.map([&](mozilla::TimeStamp& finalEnd) {
   1731      auto sliceEnd = mozilla::TimeStamp::Now() + maxSlice;
   1732      if (finalEnd < sliceEnd) {
   1733        sliceEnd = finalEnd;
   1734      }
   1735      return sliceEnd;
   1736    });
   1737 
   1738    state_ = Waiting;
   1739 
   1740    MOZ_ASSERT((cx->runtime()->beforeWaitCallback == nullptr) ==
   1741               (cx->runtime()->afterWaitCallback == nullptr));
   1742    mozilla::DebugOnly<bool> callbacksPresent =
   1743        cx->runtime()->beforeWaitCallback != nullptr;
   1744 
   1745    void* cookie = nullptr;
   1746    uint8_t clientMemory[JS::WAIT_CALLBACK_CLIENT_MAXMEM];
   1747    if (cx->runtime()->beforeWaitCallback) {
   1748      cookie = (*cx->runtime()->beforeWaitCallback)(clientMemory);
   1749    }
   1750 
   1751    if (isTimed) {
   1752      (void)cond_->wait_until(locked, *sliceEnd);
   1753    } else {
   1754      cond_->wait(locked);
   1755    }
   1756 
   1757    MOZ_ASSERT((cx->runtime()->afterWaitCallback != nullptr) ==
   1758               callbacksPresent);
   1759    if (cx->runtime()->afterWaitCallback) {
   1760      (*cx->runtime()->afterWaitCallback)(cookie);
   1761    }
   1762 
   1763    switch (state_) {
   1764      case FutexThread::Waiting:
   1765        // Timeout or spurious wakeup.
   1766        if (isTimed) {
   1767          auto now = mozilla::TimeStamp::Now();
   1768          if (now >= *finalEnd) {
   1769            return WaitResult::TimedOut;
   1770          }
   1771        }
   1772        break;
   1773 
   1774      case FutexThread::Woken:
   1775        return WaitResult::OK;
   1776 
   1777      case FutexThread::WaitingNotifiedForInterrupt:
   1778        // The interrupt handler may reenter the engine.  In that case
   1779        // there are two complications:
   1780        //
   1781        // - The waiting thread is not actually waiting on the
   1782        //   condition variable so we have to record that it
   1783        //   should be woken when the interrupt handler returns.
   1784        //   To that end, we flag the thread as interrupted around
   1785        //   the interrupt and check state_ when the interrupt
   1786        //   handler returns.  A notify() call that reaches the
   1787        //   runtime during the interrupt sets state_ to Woken.
   1788        //
   1789        // - It is in principle possible for wait() to be
   1790        //   reentered on the same thread/runtime and waiting on the
   1791        //   same location and to yet again be interrupted and enter
   1792        //   the interrupt handler.  In this case, it is important
   1793        //   that when another agent notifies waiters, all waiters using
   1794        //   the same runtime on the same location are woken in LIFO
   1795        //   order; FIFO may be the required order, but FIFO would
   1796        //   fail to wake up the innermost call.  Interrupts are
   1797        //   outside any spec anyway.  Also, several such suspended
   1798        //   waiters may be woken at a time.
   1799        //
   1800        //   For the time being we disallow waiting from within code
   1801        //   that runs from within an interrupt handler; this may
   1802        //   occasionally (very rarely) be surprising but is
   1803        //   expedient.  Other solutions exist, see bug #1131943.  The
   1804        //   code that performs the check is above, at the head of
   1805        //   this function.
   1806 
   1807        state_ = WaitingInterrupted;
   1808        {
   1809          UnlockGuard unlock(locked);
   1810          if (!cx->handleInterrupt()) {
   1811            return WaitResult::Error;
   1812          }
   1813        }
   1814        if (state_ == Woken) {
   1815          return WaitResult::OK;
   1816        }
   1817        break;
   1818 
   1819      default:
   1820        MOZ_CRASH("Bad FutexState in wait()");
   1821    }
   1822  }
   1823 }
   1824 
   1825 void js::FutexThread::notify(NotifyReason reason) {
   1826  MOZ_ASSERT(isWaiting());
   1827 
   1828  if ((state_ == WaitingInterrupted || state_ == WaitingNotifiedForInterrupt) &&
   1829      reason == NotifyExplicit) {
   1830    state_ = Woken;
   1831    return;
   1832  }
   1833  switch (reason) {
   1834    case NotifyExplicit:
   1835      state_ = Woken;
   1836      break;
   1837    case NotifyForJSInterrupt:
   1838      if (state_ == WaitingNotifiedForInterrupt) {
   1839        return;
   1840      }
   1841      state_ = WaitingNotifiedForInterrupt;
   1842      break;
   1843    default:
   1844      MOZ_CRASH("bad NotifyReason in FutexThread::notify()");
   1845  }
   1846  cond_->notify_all();
   1847 }
   1848 
   1849 const JSFunctionSpec AtomicsMethods[] = {
   1850    JS_INLINABLE_FN("compareExchange", atomics_compareExchange, 4, 0,
   1851                    AtomicsCompareExchange),
   1852    JS_INLINABLE_FN("load", atomics_load, 2, 0, AtomicsLoad),
   1853    JS_INLINABLE_FN("store", atomics_store, 3, 0, AtomicsStore),
   1854    JS_INLINABLE_FN("exchange", atomics_exchange, 3, 0, AtomicsExchange),
   1855    JS_INLINABLE_FN("add", atomics_add, 3, 0, AtomicsAdd),
   1856    JS_INLINABLE_FN("sub", atomics_sub, 3, 0, AtomicsSub),
   1857    JS_INLINABLE_FN("and", atomics_and, 3, 0, AtomicsAnd),
   1858    JS_INLINABLE_FN("or", atomics_or, 3, 0, AtomicsOr),
   1859    JS_INLINABLE_FN("xor", atomics_xor, 3, 0, AtomicsXor),
   1860    JS_INLINABLE_FN("isLockFree", atomics_isLockFree, 1, 0, AtomicsIsLockFree),
   1861    JS_FN("wait", atomics_wait, 4, 0),
   1862    JS_FN("waitAsync", atomics_wait_async, 4, 0),
   1863    JS_FN("notify", atomics_notify, 3, 0),
   1864    JS_FN("wake", atomics_notify, 3, 0),  // Legacy name
   1865    JS_INLINABLE_FN("pause", atomics_pause, 0, 0, AtomicsPause),
   1866    JS_FS_END,
   1867 };
   1868 
   1869 static const JSPropertySpec AtomicsProperties[] = {
   1870    JS_STRING_SYM_PS(toStringTag, "Atomics", JSPROP_READONLY),
   1871    JS_PS_END,
   1872 };
   1873 
   1874 static JSObject* CreateAtomicsObject(JSContext* cx, JSProtoKey key) {
   1875  RootedObject proto(cx, &cx->global()->getObjectPrototype());
   1876  return NewTenuredObjectWithGivenProto(cx, &AtomicsObject::class_, proto);
   1877 }
   1878 
   1879 static const ClassSpec AtomicsClassSpec = {
   1880    CreateAtomicsObject,
   1881    nullptr,
   1882    AtomicsMethods,
   1883    AtomicsProperties,
   1884 };
   1885 
   1886 const JSClass AtomicsObject::class_ = {
   1887    "Atomics",
   1888    JSCLASS_HAS_CACHED_PROTO(JSProto_Atomics),
   1889    JS_NULL_CLASS_OPS,
   1890    &AtomicsClassSpec,
   1891 };