tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Assembler-x64.cpp (7978B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/x64/Assembler-x64.h"
      8 
      9 #include "gc/Tracer.h"
     10 #include "util/Memory.h"
     11 #include "wasm/WasmFrame.h"
     12 
     13 using namespace js;
     14 using namespace js::jit;
     15 
     16 ABIArgGenerator::ABIArgGenerator(ABIKind kind)
     17    : ABIArgGeneratorShared(kind),
     18 #if defined(XP_WIN)
     19      regIndex_(0)
     20 #else
     21      intRegIndex_(0),
     22      floatRegIndex_(0)
     23 #endif
     24 {
     25 #if defined(XP_WIN)
     26  stackOffset_ += ShadowStackSpace;
     27 #endif
     28 }
     29 
     30 ABIArg ABIArgGenerator::next(MIRType type) {
     31 #if defined(XP_WIN)
     32  static_assert(NumIntArgRegs == NumFloatArgRegs);
     33  if (regIndex_ == NumIntArgRegs) {
     34    if (type == MIRType::Simd128) {
     35      // On Win64, >64 bit args need to be passed by reference.  However, wasm
     36      // doesn't allow passing SIMD values to JS, so the only way to reach this
     37      // is wasm to wasm calls.  Ergo we can break the native ABI here and use
     38      // the Wasm ABI instead.
     39      MOZ_ASSERT(kind_ == ABIKind::Wasm);
     40      stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
     41      current_ = ABIArg(stackOffset_);
     42      stackOffset_ += Simd128DataSize;
     43    } else {
     44      current_ = ABIArg(stackOffset_);
     45      stackOffset_ += sizeof(uint64_t);
     46    }
     47    return current_;
     48  }
     49  switch (type) {
     50    case MIRType::Int32:
     51    case MIRType::Int64:
     52    case MIRType::Pointer:
     53    case MIRType::WasmAnyRef:
     54    case MIRType::WasmArrayData:
     55    case MIRType::StackResults:
     56      current_ = ABIArg(IntArgRegs[regIndex_++]);
     57      break;
     58    case MIRType::Float32:
     59      current_ = ABIArg(FloatArgRegs[regIndex_++].asSingle());
     60      break;
     61    case MIRType::Double:
     62      current_ = ABIArg(FloatArgRegs[regIndex_++]);
     63      break;
     64    case MIRType::Simd128:
     65      MOZ_ASSERT(kind_ == ABIKind::Wasm);
     66      // On Win64, >64 bit args need to be passed by reference, but wasm
     67      // doesn't allow passing SIMD values to FFIs. The only way to reach
     68      // here is asm to asm calls, so we can break the ABI here.
     69      current_ = ABIArg(FloatArgRegs[regIndex_++].asSimd128());
     70      break;
     71    default:
     72      MOZ_CRASH("Unexpected argument type");
     73  }
     74  return current_;
     75 #else
     76  switch (type) {
     77    case MIRType::Int32:
     78    case MIRType::Int64:
     79    case MIRType::Pointer:
     80    case MIRType::WasmAnyRef:
     81    case MIRType::WasmArrayData:
     82    case MIRType::StackResults:
     83      if (intRegIndex_ == NumIntArgRegs) {
     84        current_ = ABIArg(stackOffset_);
     85        stackOffset_ += sizeof(uint64_t);
     86        break;
     87      }
     88      current_ = ABIArg(IntArgRegs[intRegIndex_++]);
     89      break;
     90    case MIRType::Double:
     91    case MIRType::Float32:
     92      if (floatRegIndex_ == NumFloatArgRegs) {
     93        current_ = ABIArg(stackOffset_);
     94        stackOffset_ += sizeof(uint64_t);
     95        break;
     96      }
     97      if (type == MIRType::Float32) {
     98        current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSingle());
     99      } else {
    100        current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
    101      }
    102      break;
    103    case MIRType::Simd128:
    104      if (floatRegIndex_ == NumFloatArgRegs) {
    105        stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
    106        current_ = ABIArg(stackOffset_);
    107        stackOffset_ += Simd128DataSize;
    108        break;
    109      }
    110      current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSimd128());
    111      break;
    112    default:
    113      MOZ_CRASH("Unexpected argument type");
    114  }
    115  return current_;
    116 #endif
    117 }
    118 
    119 void Assembler::addPendingJump(JmpSrc src, ImmPtr target,
    120                               RelocationKind reloc) {
    121  MOZ_ASSERT(target.value != nullptr);
    122 
    123  // Emit reloc before modifying the jump table, since it computes a 0-based
    124  // index. This jump is not patchable at runtime.
    125  if (reloc == RelocationKind::JITCODE) {
    126    jumpRelocations_.writeUnsigned(src.offset());
    127  }
    128 
    129  static_assert(MaxCodeBytesPerProcess <= uint64_t(2) * 1024 * 1024 * 1024,
    130                "Code depends on using int32_t for cross-JitCode jump offsets");
    131 
    132  MOZ_ASSERT_IF(reloc == RelocationKind::JITCODE,
    133                AddressIsInExecutableMemory(target.value));
    134 
    135  RelativePatch patch(src.offset(), target.value, reloc);
    136  if (reloc == RelocationKind::JITCODE ||
    137      AddressIsInExecutableMemory(target.value)) {
    138    enoughMemory_ &= codeJumps_.append(patch);
    139  } else {
    140    enoughMemory_ &= extendedJumps_.append(patch);
    141  }
    142 }
    143 
    144 void Assembler::finish() {
    145  if (oom()) {
    146    return;
    147  }
    148 
    149  AutoCreatedBy acb(*this, "Assembler::finish");
    150 
    151  if (!extendedJumps_.length()) {
    152    // Since we may be folowed by non-executable data, eagerly insert an
    153    // undefined instruction byte to prevent processors from decoding
    154    // gibberish into their pipelines. See Intel performance guides.
    155    masm.ud2();
    156    return;
    157  }
    158 
    159  // Emit the jump table.
    160  masm.haltingAlign(SizeOfJumpTableEntry);
    161  extendedJumpTable_ = masm.size();
    162 
    163  // Zero the extended jumps table.
    164  for (size_t i = 0; i < extendedJumps_.length(); i++) {
    165 #ifdef DEBUG
    166    size_t oldSize = masm.size();
    167 #endif
    168    MOZ_ASSERT(hasCreator());
    169    masm.jmp_rip(2);
    170    MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == 6);
    171    // Following an indirect branch with ud2 hints to the hardware that
    172    // there's no fall-through. This also aligns the 64-bit immediate.
    173    masm.ud2();
    174    MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == 8);
    175    masm.immediate64(0);
    176    MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == SizeOfExtendedJump);
    177    MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == SizeOfJumpTableEntry);
    178  }
    179 }
    180 
    181 void Assembler::executableCopy(uint8_t* buffer) {
    182  AssemblerX86Shared::executableCopy(buffer);
    183 
    184  for (RelativePatch& rp : codeJumps_) {
    185    uint8_t* src = buffer + rp.offset;
    186    MOZ_ASSERT(rp.target);
    187 
    188    MOZ_RELEASE_ASSERT(X86Encoding::CanRelinkJump(src, rp.target));
    189    X86Encoding::SetRel32(src, rp.target);
    190  }
    191 
    192  for (size_t i = 0; i < extendedJumps_.length(); i++) {
    193    RelativePatch& rp = extendedJumps_[i];
    194    uint8_t* src = buffer + rp.offset;
    195    MOZ_ASSERT(rp.target);
    196 
    197    if (X86Encoding::CanRelinkJump(src, rp.target)) {
    198      X86Encoding::SetRel32(src, rp.target);
    199    } else {
    200      // An extended jump table must exist, and its offset must be in
    201      // range.
    202      MOZ_ASSERT(extendedJumpTable_);
    203      MOZ_ASSERT((extendedJumpTable_ + i * SizeOfJumpTableEntry) <=
    204                 size() - SizeOfJumpTableEntry);
    205 
    206      // Patch the jump to go to the extended jump entry.
    207      uint8_t* entry = buffer + extendedJumpTable_ + i * SizeOfJumpTableEntry;
    208      X86Encoding::SetRel32(src, entry);
    209 
    210      // Now patch the pointer, note that we need to align it to
    211      // *after* the extended jump, i.e. after the 64-bit immedate.
    212      X86Encoding::SetPointer(entry + SizeOfExtendedJump, rp.target);
    213    }
    214  }
    215 }
    216 
    217 class RelocationIterator {
    218  CompactBufferReader reader_;
    219  uint32_t offset_ = 0;
    220 
    221 public:
    222  explicit RelocationIterator(CompactBufferReader& reader) : reader_(reader) {}
    223 
    224  bool read() {
    225    if (!reader_.more()) {
    226      return false;
    227    }
    228    offset_ = reader_.readUnsigned();
    229    return true;
    230  }
    231 
    232  uint32_t offset() const { return offset_; }
    233 };
    234 
    235 JitCode* Assembler::CodeFromJump(JitCode* code, uint8_t* jump) {
    236  uint8_t* target = (uint8_t*)X86Encoding::GetRel32Target(jump);
    237 
    238  MOZ_ASSERT(!code->containsNativePC(target),
    239             "Extended jump table not used for cross-JitCode jumps");
    240 
    241  return JitCode::FromExecutable(target);
    242 }
    243 
    244 void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
    245                                     CompactBufferReader& reader) {
    246  RelocationIterator iter(reader);
    247  while (iter.read()) {
    248    JitCode* child = CodeFromJump(code, code->raw() + iter.offset());
    249    TraceManuallyBarrieredEdge(trc, &child, "rel32");
    250    MOZ_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset()));
    251  }
    252 }