tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

AtomicOperations-shared-jit.cpp (9077B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=4 et sw=4 tw=99:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "mozilla/Assertions.h"
      8 #include "mozilla/Attributes.h"
      9 #include "mozilla/MathAlgorithms.h"
     10 
     11 #include <atomic>
     12 #include <stddef.h>
     13 #include <stdint.h>
     14 #include <stdlib.h>
     15 #include <tuple>
     16 #include <utility>
     17 
     18 #include "jit/AtomicOperations.h"
     19 #include "js/GCAPI.h"
     20 
     21 #if defined(__arm__)
     22 #  include "jit/arm/Architecture-arm.h"
     23 #endif
     24 
     25 #ifdef JS_HAVE_GENERATED_ATOMIC_OPS
     26 
     27 using namespace js;
     28 using namespace js::jit;
     29 
     30 // A "block" is a sequence of bytes that is a reasonable quantum to copy to
     31 // amortize call overhead when implementing memcpy and memmove.  A block will
     32 // not fit in registers on all platforms and copying it without using
     33 // intermediate memory will therefore be sensitive to overlap.
     34 //
     35 // A "word" is an item that we can copy using only register intermediate storage
     36 // on all platforms; words can be individually copied without worrying about
     37 // overlap.
     38 //
     39 // Blocks and words can be aligned or unaligned; specific (generated) copying
     40 // functions handle this in platform-specific ways.
     41 
     42 static constexpr size_t WORDSIZE = sizeof(uintptr_t);
     43 static constexpr size_t BLOCKSIZE = 8 * WORDSIZE;  // Must be a power of 2
     44 
     45 static_assert(BLOCKSIZE % WORDSIZE == 0,
     46              "A block is an integral number of words");
     47 
     48 // Constants must match the ones in GenerateAtomicOperations.py
     49 static_assert(JS_GENERATED_ATOMICS_BLOCKSIZE == BLOCKSIZE);
     50 static_assert(JS_GENERATED_ATOMICS_WORDSIZE == WORDSIZE);
     51 
     52 static constexpr size_t WORDMASK = WORDSIZE - 1;
     53 static constexpr size_t BLOCKMASK = BLOCKSIZE - 1;
     54 
     55 namespace js {
     56 namespace jit {
     57 
     58 static bool UnalignedAccessesAreOK() {
     59 #  ifdef DEBUG
     60  const char* flag = getenv("JS_NO_UNALIGNED_MEMCPY");
     61  if (flag && *flag == '1') return false;
     62 #  endif
     63 #  if defined(__x86_64__) || defined(__i386__)
     64  return true;
     65 #  elif defined(__arm__)
     66  return !ARMFlags::HasAlignmentFault();
     67 #  elif defined(__aarch64__)
     68  // This is not necessarily true but it's the best guess right now.
     69  return true;
     70 #  else
     71 #    error "Unsupported platform"
     72 #  endif
     73 }
     74 
     75 #  ifndef JS_64BIT
     76 void AtomicCompilerFence() {
     77  std::atomic_signal_fence(std::memory_order_acq_rel);
     78 }
     79 #  endif
     80 
     81 /**
     82 * Return `true` if all pointers are aligned to `Alignment`.
     83 */
     84 template <size_t Alignment>
     85 static inline bool CanCopyAligned(const uint8_t* dest, const uint8_t* src,
     86                                  const uint8_t* lim) {
     87  static_assert(mozilla::IsPowerOfTwo(Alignment));
     88  return ((uintptr_t(dest) | uintptr_t(src) | uintptr_t(lim)) &
     89          (Alignment - 1)) == 0;
     90 }
     91 
     92 /**
     93 * Return `true` if both pointers have the same alignment and can be aligned to
     94 * `Alignment`.
     95 */
     96 template <size_t Alignment>
     97 static inline bool CanAlignTo(const uint8_t* dest, const uint8_t* src) {
     98  static_assert(mozilla::IsPowerOfTwo(Alignment));
     99  return ((uintptr_t(dest) ^ uintptr_t(src)) & (Alignment - 1)) == 0;
    100 }
    101 
    102 /**
    103 * Copy a datum smaller than `WORDSIZE`. Prevents tearing when `dest` and `src`
    104 * are both aligned.
    105 *
    106 * No tearing is a requirement for integer TypedArrays.
    107 *
    108 * https://tc39.es/ecma262/#sec-isnotearconfiguration
    109 * https://tc39.es/ecma262/#sec-tear-free-aligned-reads
    110 * https://tc39.es/ecma262/#sec-valid-executions
    111 */
    112 static MOZ_ALWAYS_INLINE auto AtomicCopyDownNoTearIfAlignedUnsynchronized(
    113    uint8_t* dest, const uint8_t* src, const uint8_t* srcEnd) {
    114  MOZ_ASSERT(src <= srcEnd);
    115  MOZ_ASSERT(size_t(srcEnd - src) < WORDSIZE);
    116 
    117  if (WORDSIZE > 4 && CanCopyAligned<4>(dest, src, srcEnd)) {
    118    static_assert(WORDSIZE <= 8, "copies 32-bits at most once");
    119 
    120    if (src < srcEnd) {
    121      AtomicCopy32Unsynchronized(dest, src);
    122      dest += 4;
    123      src += 4;
    124    }
    125  } else if (CanCopyAligned<2>(dest, src, srcEnd)) {
    126    while (src < srcEnd) {
    127      AtomicCopy16Unsynchronized(dest, src);
    128      dest += 2;
    129      src += 2;
    130    }
    131  } else {
    132    while (src < srcEnd) {
    133      AtomicCopy8Unsynchronized(dest++, src++);
    134    }
    135  }
    136  return std::pair{dest, src};
    137 }
    138 
    139 void AtomicMemcpyDownUnsynchronized(uint8_t* dest, const uint8_t* src,
    140                                    size_t nbytes) {
    141  JS::AutoSuppressGCAnalysis nogc;
    142 
    143  const uint8_t* lim = src + nbytes;
    144 
    145  // Set up bulk copying.  The cases are ordered the way they are on the
    146  // assumption that if we can achieve aligned copies even with a little
    147  // preprocessing then that is better than unaligned copying on a platform
    148  // that supports it.
    149 
    150  if (nbytes >= WORDSIZE) {
    151    void (*copyBlock)(uint8_t* dest, const uint8_t* src);
    152    void (*copyWord)(uint8_t* dest, const uint8_t* src);
    153 
    154    if (CanAlignTo<WORDSIZE>(dest, src)) {
    155      const uint8_t* cutoff = (const uint8_t*)RoundUp(uintptr_t(src), WORDSIZE);
    156      MOZ_ASSERT(cutoff <= lim);  // because nbytes >= WORDSIZE
    157 
    158      // Copy initial bytes to align to word size.
    159      std::tie(dest, src) =
    160          AtomicCopyDownNoTearIfAlignedUnsynchronized(dest, src, cutoff);
    161 
    162      copyBlock = AtomicCopyBlockDownUnsynchronized;
    163      copyWord = AtomicCopyWordUnsynchronized;
    164    } else if (UnalignedAccessesAreOK()) {
    165      copyBlock = AtomicCopyBlockDownUnsynchronized;
    166      copyWord = AtomicCopyWordUnsynchronized;
    167    } else {
    168      copyBlock = AtomicCopyUnalignedBlockDownUnsynchronized;
    169      copyWord = AtomicCopyUnalignedWordDownUnsynchronized;
    170    }
    171 
    172    // Bulk copy, first larger blocks and then individual words.
    173 
    174    const uint8_t* blocklim = src + ((lim - src) & ~BLOCKMASK);
    175    while (src < blocklim) {
    176      copyBlock(dest, src);
    177      dest += BLOCKSIZE;
    178      src += BLOCKSIZE;
    179    }
    180 
    181    const uint8_t* wordlim = src + ((lim - src) & ~WORDMASK);
    182    while (src < wordlim) {
    183      copyWord(dest, src);
    184      dest += WORDSIZE;
    185      src += WORDSIZE;
    186    }
    187  }
    188 
    189  // Copy any remaining tail.
    190 
    191  AtomicCopyDownNoTearIfAlignedUnsynchronized(dest, src, lim);
    192 }
    193 
    194 /**
    195 * Copy a datum smaller than `WORDSIZE`. Prevents tearing when `dest` and `src`
    196 * are both aligned.
    197 *
    198 * No tearing is a requirement for integer TypedArrays.
    199 *
    200 * https://tc39.es/ecma262/#sec-isnotearconfiguration
    201 * https://tc39.es/ecma262/#sec-tear-free-aligned-reads
    202 * https://tc39.es/ecma262/#sec-valid-executions
    203 */
    204 static MOZ_ALWAYS_INLINE auto AtomicCopyUpNoTearIfAlignedUnsynchronized(
    205    uint8_t* dest, const uint8_t* src, const uint8_t* srcBegin) {
    206  MOZ_ASSERT(src >= srcBegin);
    207  MOZ_ASSERT(size_t(src - srcBegin) < WORDSIZE);
    208 
    209  if (WORDSIZE > 4 && CanCopyAligned<4>(dest, src, srcBegin)) {
    210    static_assert(WORDSIZE <= 8, "copies 32-bits at most once");
    211 
    212    if (src > srcBegin) {
    213      dest -= 4;
    214      src -= 4;
    215      AtomicCopy32Unsynchronized(dest, src);
    216    }
    217  } else if (CanCopyAligned<2>(dest, src, srcBegin)) {
    218    while (src > srcBegin) {
    219      dest -= 2;
    220      src -= 2;
    221      AtomicCopy16Unsynchronized(dest, src);
    222    }
    223  } else {
    224    while (src > srcBegin) {
    225      AtomicCopy8Unsynchronized(--dest, --src);
    226    }
    227  }
    228  return std::pair{dest, src};
    229 }
    230 
    231 void AtomicMemcpyUpUnsynchronized(uint8_t* dest, const uint8_t* src,
    232                                  size_t nbytes) {
    233  JS::AutoSuppressGCAnalysis nogc;
    234 
    235  const uint8_t* lim = src;
    236 
    237  src += nbytes;
    238  dest += nbytes;
    239 
    240  // Set up bulk copying.  The cases are ordered the way they are on the
    241  // assumption that if we can achieve aligned copies even with a little
    242  // preprocessing then that is better than unaligned copying on a platform
    243  // that supports it.
    244 
    245  if (nbytes >= WORDSIZE) {
    246    void (*copyBlock)(uint8_t* dest, const uint8_t* src);
    247    void (*copyWord)(uint8_t* dest, const uint8_t* src);
    248 
    249    if (CanAlignTo<WORDSIZE>(dest, src)) {
    250      const uint8_t* cutoff = (const uint8_t*)(uintptr_t(src) & ~WORDMASK);
    251      MOZ_ASSERT(cutoff >= lim);  // Because nbytes >= WORDSIZE
    252 
    253      // Copy initial bytes to align to word size.
    254      std::tie(dest, src) =
    255          AtomicCopyUpNoTearIfAlignedUnsynchronized(dest, src, cutoff);
    256 
    257      copyBlock = AtomicCopyBlockUpUnsynchronized;
    258      copyWord = AtomicCopyWordUnsynchronized;
    259    } else if (UnalignedAccessesAreOK()) {
    260      copyBlock = AtomicCopyBlockUpUnsynchronized;
    261      copyWord = AtomicCopyWordUnsynchronized;
    262    } else {
    263      copyBlock = AtomicCopyUnalignedBlockUpUnsynchronized;
    264      copyWord = AtomicCopyUnalignedWordUpUnsynchronized;
    265    }
    266 
    267    // Bulk copy, first larger blocks and then individual words.
    268 
    269    const uint8_t* blocklim = src - ((src - lim) & ~BLOCKMASK);
    270    while (src > blocklim) {
    271      dest -= BLOCKSIZE;
    272      src -= BLOCKSIZE;
    273      copyBlock(dest, src);
    274    }
    275 
    276    const uint8_t* wordlim = src - ((src - lim) & ~WORDMASK);
    277    while (src > wordlim) {
    278      dest -= WORDSIZE;
    279      src -= WORDSIZE;
    280      copyWord(dest, src);
    281    }
    282  }
    283 
    284  // Copy any remaining tail.
    285 
    286  AtomicCopyUpNoTearIfAlignedUnsynchronized(dest, src, lim);
    287 }
    288 
    289 }  // namespace jit
    290 }  // namespace js
    291 
    292 #endif  // JS_HAVE_GENERATED_ATOMIC_OPS