tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Assembler-mips64.cpp (13326B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/mips64/Assembler-mips64.h"
      8 
      9 #include "mozilla/DebugOnly.h"
     10 #include "mozilla/Maybe.h"
     11 
     12 #include "jit/AutoWritableJitCode.h"
     13 #include "wasm/WasmFrame.h"
     14 
     15 using mozilla::DebugOnly;
     16 
     17 using namespace js;
     18 using namespace js::jit;
     19 
     20 ABIArg ABIArgGenerator::next(MIRType type) {
     21  static_assert(NumIntArgRegs == NumFloatArgRegs);
     22  if (regIndex_ == NumIntArgRegs) {
     23    if (type != MIRType::Simd128) {
     24      current_ = ABIArg(stackOffset_);
     25      stackOffset_ += sizeof(uint64_t);
     26    } else {
     27      // Mips platform does not support simd yet.
     28      MOZ_CRASH("Unexpected argument type");
     29    }
     30    return current_;
     31  }
     32  switch (type) {
     33    case MIRType::Int32:
     34    case MIRType::Int64:
     35    case MIRType::Pointer:
     36    case MIRType::WasmAnyRef:
     37    case MIRType::WasmArrayData:
     38    case MIRType::StackResults: {
     39      Register destReg;
     40      GetIntArgReg(regIndex_++, &destReg);
     41      current_ = ABIArg(destReg);
     42      break;
     43    }
     44    case MIRType::Float32:
     45    case MIRType::Double: {
     46      FloatRegister::ContentType contentType;
     47      contentType = (type == MIRType::Double) ? FloatRegisters::Double
     48                                              : FloatRegisters::Single;
     49      FloatRegister destFReg;
     50      GetFloatArgReg(regIndex_++, &destFReg);
     51      current_ = ABIArg(FloatRegister(destFReg.id(), contentType));
     52      break;
     53    }
     54    default:
     55      MOZ_CRASH("Unexpected argument type");
     56  }
     57  return current_;
     58 }
     59 
     60 uint32_t js::jit::RT(FloatRegister r) {
     61  MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
     62  return r.id() << RTShift;
     63 }
     64 
     65 uint32_t js::jit::RD(FloatRegister r) {
     66  MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
     67  return r.id() << RDShift;
     68 }
     69 
     70 uint32_t js::jit::RZ(FloatRegister r) {
     71  MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
     72  return r.id() << RZShift;
     73 }
     74 
     75 uint32_t js::jit::SA(FloatRegister r) {
     76  MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
     77  return r.id() << SAShift;
     78 }
     79 
     80 void Assembler::executableCopy(uint8_t* buffer) {
     81  MOZ_ASSERT(isFinished);
     82  m_buffer.executableCopy(buffer);
     83 }
     84 
     85 uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
     86  Instruction* inst = (Instruction*)instPtr;
     87  return Assembler::ExtractLoad64Value(inst);
     88 }
     89 
     90 static JitCode* CodeFromJump(Instruction* jump) {
     91  uint8_t* target = (uint8_t*)Assembler::ExtractLoad64Value(jump);
     92  return JitCode::FromExecutable(target);
     93 }
     94 
     95 void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
     96                                     CompactBufferReader& reader) {
     97  while (reader.more()) {
     98    JitCode* child =
     99        CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
    100    TraceManuallyBarrieredEdge(trc, &child, "rel32");
    101  }
    102 }
    103 
    104 static void TraceOneDataRelocation(JSTracer* trc,
    105                                   mozilla::Maybe<AutoWritableJitCode>& awjc,
    106                                   JitCode* code, Instruction* inst) {
    107  void* ptr = (void*)Assembler::ExtractLoad64Value(inst);
    108  void* prior = ptr;
    109 
    110  // Data relocations can be for Values or for raw pointers. If a Value is
    111  // zero-tagged, we can trace it as if it were a raw pointer. If a Value
    112  // is not zero-tagged, we have to interpret it as a Value to ensure that the
    113  // tag bits are masked off to recover the actual pointer.
    114  uintptr_t word = reinterpret_cast<uintptr_t>(ptr);
    115  if (word >> JSVAL_TAG_SHIFT) {
    116    // This relocation is a Value with a non-zero tag.
    117    Value v = Value::fromRawBits(word);
    118    TraceManuallyBarrieredEdge(trc, &v, "jit-masm-value");
    119    ptr = (void*)v.bitsAsPunboxPointer();
    120  } else {
    121    // This relocation is a raw pointer or a Value with a zero tag.
    122    // No barrier needed since these are constants.
    123    TraceManuallyBarrieredGenericPointerEdge(
    124        trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
    125  }
    126 
    127  if (ptr != prior) {
    128    if (awjc.isNothing()) {
    129      awjc.emplace(code);
    130    }
    131    Assembler::UpdateLoad64Value(inst, uint64_t(ptr));
    132  }
    133 }
    134 
    135 /* static */
    136 void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
    137                                     CompactBufferReader& reader) {
    138  mozilla::Maybe<AutoWritableJitCode> awjc;
    139  while (reader.more()) {
    140    size_t offset = reader.readUnsigned();
    141    Instruction* inst = (Instruction*)(code->raw() + offset);
    142    TraceOneDataRelocation(trc, awjc, code, inst);
    143  }
    144 }
    145 
    146 void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
    147  if (label.patchAt().bound()) {
    148    auto mode = label.linkMode();
    149    intptr_t offset = label.patchAt().offset();
    150    intptr_t target = label.target().offset();
    151 
    152    if (mode == CodeLabel::RawPointer) {
    153      *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
    154    } else {
    155      MOZ_ASSERT(mode == CodeLabel::MoveImmediate ||
    156                 mode == CodeLabel::JumpImmediate);
    157      Instruction* inst = (Instruction*)(rawCode + offset);
    158      Assembler::UpdateLoad64Value(inst, (uint64_t)(rawCode + target));
    159    }
    160  }
    161 }
    162 
    163 void Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target) {
    164  UseScratchRegisterScope temps(*this);
    165 
    166  int64_t offset = target - branch;
    167  InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
    168  InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
    169 
    170  // If encoded offset is 4, then the jump must be short
    171  if (BOffImm16(inst[0]).decode() == 4) {
    172    MOZ_ASSERT(BOffImm16::IsInRange(offset));
    173    inst[0].setBOffImm16(BOffImm16(offset));
    174    inst[1].makeNop();
    175    return;
    176  }
    177 
    178  // Generate the long jump for calls because return address has to be the
    179  // address after the reserved block.
    180  if (inst[0].encode() == inst_bgezal.encode()) {
    181    addLongJump(BufferOffset(branch), BufferOffset(target));
    182    Register scratch = temps.Acquire();
    183    Assembler::WriteLoad64Instructions(inst, scratch,
    184                                       LabelBase::INVALID_OFFSET);
    185    inst[4] = InstReg(op_special, scratch, zero, ra, ff_jalr).encode();
    186    // There is 1 nop after this.
    187    return;
    188  }
    189 
    190  if (BOffImm16::IsInRange(offset)) {
    191    // Don't skip trailing nops can improve performance
    192    // on Loongson3 platform.
    193    bool skipNops =
    194        !isLoongson() && (inst[0].encode() != inst_bgezal.encode() &&
    195                          inst[0].encode() != inst_beq.encode());
    196 
    197    inst[0].setBOffImm16(BOffImm16(offset));
    198    inst[1].makeNop();
    199 
    200    if (skipNops) {
    201      inst[2] =
    202          InstImm(op_regimm, zero, rt_bgez, BOffImm16(5 * sizeof(uint32_t)))
    203              .encode();
    204      // There are 4 nops after this
    205    }
    206    return;
    207  }
    208 
    209  Register scratch = temps.Acquire();
    210  if (inst[0].encode() == inst_beq.encode()) {
    211    // Handle long unconditional jump.
    212    addLongJump(BufferOffset(branch), BufferOffset(target));
    213    Assembler::WriteLoad64Instructions(inst, scratch,
    214                                       LabelBase::INVALID_OFFSET);
    215 #ifdef MIPSR6
    216    inst[4] = InstReg(op_special, scratch, zero, zero, ff_jalr).encode();
    217 #else
    218    inst[4] = InstReg(op_special, scratch, zero, zero, ff_jr).encode();
    219 #endif
    220    // There is 1 nop after this.
    221  } else {
    222    // Handle long conditional jump.
    223    inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
    224    // No need for a "nop" here because we can clobber scratch.
    225    addLongJump(BufferOffset(branch + sizeof(uint32_t)), BufferOffset(target));
    226    Assembler::WriteLoad64Instructions(&inst[1], scratch,
    227                                       LabelBase::INVALID_OFFSET);
    228 #ifdef MIPSR6
    229    inst[5] = InstReg(op_special, scratch, zero, zero, ff_jalr).encode();
    230 #else
    231    inst[5] = InstReg(op_special, scratch, zero, zero, ff_jr).encode();
    232 #endif
    233    // There is 1 nop after this.
    234  }
    235 }
    236 
    237 void Assembler::processCodeLabels(uint8_t* rawCode) {
    238  for (const CodeLabel& label : codeLabels_) {
    239    Bind(rawCode, label);
    240  }
    241 }
    242 
    243 uint32_t Assembler::PatchWrite_NearCallSize() {
    244  // Load an address needs 4 instructions, and a jump with a delay slot.
    245  return (4 + 2) * sizeof(uint32_t);
    246 }
    247 
    248 void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
    249                                    CodeLocationLabel toCall) {
    250  Instruction* inst = (Instruction*)start.raw();
    251  uint8_t* dest = toCall.raw();
    252 
    253  // Overwrite whatever instruction used to be here with a call.
    254  // Always use long jump for two reasons:
    255  // - Jump has to be the same size because of PatchWrite_NearCallSize.
    256  // - Return address has to be at the end of replaced block.
    257  // Short jump wouldn't be more efficient.
    258  Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest);
    259  inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
    260  inst[5] = InstNOP();
    261 }
    262 
    263 uint64_t Assembler::ExtractLoad64Value(Instruction* inst0) {
    264  InstImm* i0 = (InstImm*)inst0;
    265  InstImm* i1 = (InstImm*)i0->next();
    266  InstReg* i2 = (InstReg*)i1->next();
    267  InstImm* i3 = (InstImm*)i2->next();
    268  InstImm* i5 = (InstImm*)i3->next()->next();
    269 
    270  MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
    271  MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift) ||
    272             i1->extractOpcode() == ((uint32_t)op_daddiu >> OpcodeShift));
    273  MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
    274 
    275  if (i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)) {
    276    uint64_t value = (uint64_t(i0->extractImm16Value()) << 32) |
    277                     (uint64_t(i1->extractImm16Value()) << 16) |
    278                     uint64_t(i3->extractImm16Value());
    279    return uint64_t((int64_t(value) << 16) >> 16);
    280  }
    281 
    282  MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
    283  uint64_t value = ((uint64_t(i0->extractImm16Value()) << 48) +
    284                    ((int64_t(i1->extractImm16Value()) << 48) >> 16)) |
    285                   (uint64_t(i3->extractImm16Value()) << 16) |
    286                   uint64_t(i5->extractImm16Value());
    287  return value;
    288 }
    289 
    290 void Assembler::UpdateLoad64Value(Instruction* inst0, uint64_t value) {
    291  InstImm* i0 = (InstImm*)inst0;
    292  InstImm* i1 = (InstImm*)i0->next();
    293  InstReg* i2 = (InstReg*)i1->next();
    294  InstImm* i3 = (InstImm*)i2->next();
    295  InstImm* i5 = (InstImm*)i3->next()->next();
    296 
    297  MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
    298  MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift) ||
    299             i1->extractOpcode() == ((uint32_t)op_daddiu >> OpcodeShift));
    300  MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
    301 
    302  if (i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)) {
    303    i0->setImm16(Imm16::Lower(Imm32(value >> 32)));
    304    i1->setImm16(Imm16::Upper(Imm32(value)));
    305    i3->setImm16(Imm16::Lower(Imm32(value)));
    306    return;
    307  }
    308 
    309  MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
    310 
    311  i0->setImm16(Imm16::Upper(Imm32((value >> 32) + 0x8000)));
    312  i1->setImm16(Imm16::Lower(Imm32(value >> 32)));
    313  i3->setImm16(Imm16::Upper(Imm32(value)));
    314  i5->setImm16(Imm16::Lower(Imm32(value)));
    315 }
    316 
    317 void Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg,
    318                                        uint64_t value) {
    319  Instruction* inst1 = inst0->next();
    320  Instruction* inst2 = inst1->next();
    321  Instruction* inst3 = inst2->next();
    322 
    323  *inst0 = InstImm(op_lui, zero, reg, Imm16::Lower(Imm32(value >> 32)));
    324  *inst1 = InstImm(op_ori, reg, reg, Imm16::Upper(Imm32(value)));
    325  *inst2 = InstReg(op_special, rs_zero, reg, reg, 16, ff_dsll);
    326  *inst3 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
    327 }
    328 
    329 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
    330                                        ImmPtr newValue, ImmPtr expectedValue) {
    331  PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
    332                          PatchedImmPtr(expectedValue.value));
    333 }
    334 
    335 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
    336                                        PatchedImmPtr newValue,
    337                                        PatchedImmPtr expectedValue) {
    338  Instruction* inst = (Instruction*)label.raw();
    339 
    340  // Extract old Value
    341  DebugOnly<uint64_t> value = Assembler::ExtractLoad64Value(inst);
    342  MOZ_ASSERT(value == uint64_t(expectedValue.value));
    343 
    344  // Replace with new value
    345  Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value));
    346 }
    347 
    348 uint64_t Assembler::ExtractInstructionImmediate(uint8_t* code) {
    349  InstImm* inst = (InstImm*)code;
    350  return Assembler::ExtractLoad64Value(inst);
    351 }
    352 
    353 void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
    354  Instruction* inst = (Instruction*)inst_.raw();
    355  InstImm* i0 = (InstImm*)inst;
    356  InstImm* i1 = (InstImm*)i0->next();
    357  InstImm* i3 = (InstImm*)i1->next()->next();
    358  Instruction* i4 = (Instruction*)i3->next();
    359 
    360  MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
    361  MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
    362  MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
    363 
    364  if (enabled) {
    365    MOZ_ASSERT(i4->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift));
    366    InstReg jalr = InstReg(op_special, Register::FromCode(i3->extractRT()),
    367                           zero, ra, ff_jalr);
    368    *i4 = jalr;
    369  } else {
    370    InstNOP nop;
    371    *i4 = nop;
    372  }
    373 }