tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MoveEmitter-arm64.cpp (9751B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/arm64/MoveEmitter-arm64.h"
      8 #include "jit/MacroAssembler-inl.h"
      9 
     10 using namespace js;
     11 using namespace js::jit;
     12 
     13 MemOperand MoveEmitterARM64::toMemOperand(const MoveOperand& operand) const {
     14  MOZ_ASSERT(operand.isMemory());
     15  ARMRegister base(operand.base(), 64);
     16  if (operand.base() == masm.getStackPointer()) {
     17    return MemOperand(base,
     18                      operand.disp() + (masm.framePushed() - pushedAtStart_));
     19  }
     20  return MemOperand(base, operand.disp());
     21 }
     22 
     23 void MoveEmitterARM64::emit(const MoveResolver& moves) {
     24  vixl::UseScratchRegisterScope temps(&masm.asVIXL());
     25  // We have two scratch general registers, so use one as temporary storage for
     26  // breaking cycles and leave the other available for memory to memory moves.
     27  //
     28  // This register is used when breaking GENERAL, INT32, FLOAT32, and DOUBLE
     29  // move cycles. For FLOAT32/DOUBLE, this involves a fmov between float and
     30  // general registers. We could avoid this if we had an extra scratch float
     31  // register, otherwise we need the scratch float register for memory to
     32  // memory moves that may happen in the cycle. We cannot use the scratch
     33  // general register for SIMD128 cycles as it is not large enough.
     34  cycleGeneralReg_ = temps.AcquireX();
     35 
     36  for (size_t i = 0; i < moves.numMoves(); i++) {
     37    emitMove(moves.getMove(i));
     38  }
     39 
     40  cycleGeneralReg_ = ARMRegister();
     41 }
     42 
     43 void MoveEmitterARM64::finish() {
     44  assertDone();
     45  masm.freeStack(masm.framePushed() - pushedAtStart_);
     46  MOZ_ASSERT(masm.framePushed() == pushedAtStart_);
     47 }
     48 
     49 void MoveEmitterARM64::emitMove(const MoveOp& move) {
     50  const MoveOperand& from = move.from();
     51  const MoveOperand& to = move.to();
     52 
     53  if (move.isCycleBegin()) {
     54    MOZ_ASSERT(!inCycle_ && !move.isCycleEnd());
     55    breakCycle(from, to, move.endCycleType());
     56    inCycle_ = true;
     57  } else if (move.isCycleEnd()) {
     58    MOZ_ASSERT(inCycle_);
     59    completeCycle(from, to, move.type());
     60    inCycle_ = false;
     61    return;
     62  }
     63 
     64  switch (move.type()) {
     65    case MoveOp::FLOAT32:
     66      emitFloat32Move(from, to);
     67      break;
     68    case MoveOp::DOUBLE:
     69      emitDoubleMove(from, to);
     70      break;
     71    case MoveOp::SIMD128:
     72      emitSimd128Move(from, to);
     73      break;
     74    case MoveOp::INT32:
     75      emitInt32Move(from, to);
     76      break;
     77    case MoveOp::GENERAL:
     78      emitGeneralMove(from, to);
     79      break;
     80    default:
     81      MOZ_CRASH("Unexpected move type");
     82  }
     83 }
     84 
     85 void MoveEmitterARM64::emitFloat32Move(const MoveOperand& from,
     86                                       const MoveOperand& to) {
     87  if (from.isFloatReg()) {
     88    if (to.isFloatReg()) {
     89      masm.Fmov(toFPReg(to, MoveOp::FLOAT32), toFPReg(from, MoveOp::FLOAT32));
     90    } else {
     91      masm.Str(toFPReg(from, MoveOp::FLOAT32), toMemOperand(to));
     92    }
     93    return;
     94  }
     95 
     96  if (to.isFloatReg()) {
     97    masm.Ldr(toFPReg(to, MoveOp::FLOAT32), toMemOperand(from));
     98    return;
     99  }
    100 
    101  vixl::UseScratchRegisterScope temps(&masm.asVIXL());
    102  const ARMFPRegister scratch32 = temps.AcquireS();
    103  masm.Ldr(scratch32, toMemOperand(from));
    104  masm.Str(scratch32, toMemOperand(to));
    105 }
    106 
    107 void MoveEmitterARM64::emitDoubleMove(const MoveOperand& from,
    108                                      const MoveOperand& to) {
    109  if (from.isFloatReg()) {
    110    if (to.isFloatReg()) {
    111      masm.Fmov(toFPReg(to, MoveOp::DOUBLE), toFPReg(from, MoveOp::DOUBLE));
    112    } else {
    113      masm.Str(toFPReg(from, MoveOp::DOUBLE), toMemOperand(to));
    114    }
    115    return;
    116  }
    117 
    118  if (to.isFloatReg()) {
    119    masm.Ldr(toFPReg(to, MoveOp::DOUBLE), toMemOperand(from));
    120    return;
    121  }
    122 
    123  vixl::UseScratchRegisterScope temps(&masm.asVIXL());
    124  const ARMFPRegister scratch = temps.AcquireD();
    125  masm.Ldr(scratch, toMemOperand(from));
    126  masm.Str(scratch, toMemOperand(to));
    127 }
    128 
    129 void MoveEmitterARM64::emitSimd128Move(const MoveOperand& from,
    130                                       const MoveOperand& to) {
    131  if (from.isFloatReg()) {
    132    if (to.isFloatReg()) {
    133      masm.Mov(toFPReg(to, MoveOp::SIMD128), toFPReg(from, MoveOp::SIMD128));
    134    } else {
    135      masm.Str(toFPReg(from, MoveOp::SIMD128), toMemOperand(to));
    136    }
    137    return;
    138  }
    139 
    140  if (to.isFloatReg()) {
    141    masm.Ldr(toFPReg(to, MoveOp::SIMD128), toMemOperand(from));
    142    return;
    143  }
    144 
    145  vixl::UseScratchRegisterScope temps(&masm.asVIXL());
    146  const ARMFPRegister scratch = temps.AcquireQ();
    147  masm.Ldr(scratch, toMemOperand(from));
    148  masm.Str(scratch, toMemOperand(to));
    149 }
    150 
    151 void MoveEmitterARM64::emitInt32Move(const MoveOperand& from,
    152                                     const MoveOperand& to) {
    153  if (from.isGeneralReg()) {
    154    if (to.isGeneralReg()) {
    155      masm.Mov(toARMReg32(to), toARMReg32(from));
    156    } else {
    157      masm.Str(toARMReg32(from), toMemOperand(to));
    158    }
    159    return;
    160  }
    161 
    162  if (to.isGeneralReg()) {
    163    masm.Ldr(toARMReg32(to), toMemOperand(from));
    164    return;
    165  }
    166 
    167  vixl::UseScratchRegisterScope temps(&masm.asVIXL());
    168  const ARMRegister scratch32 = temps.AcquireW();
    169  masm.Ldr(scratch32, toMemOperand(from));
    170  masm.Str(scratch32, toMemOperand(to));
    171 }
    172 
    173 void MoveEmitterARM64::emitGeneralMove(const MoveOperand& from,
    174                                       const MoveOperand& to) {
    175  if (from.isGeneralReg()) {
    176    MOZ_ASSERT(to.isGeneralReg() || to.isMemory());
    177    if (to.isGeneralReg()) {
    178      masm.Mov(toARMReg64(to), toARMReg64(from));
    179    } else {
    180      masm.Str(toARMReg64(from), toMemOperand(to));
    181    }
    182    return;
    183  }
    184 
    185  // {Memory OR EffectiveAddress} -> Register move.
    186  if (to.isGeneralReg()) {
    187    MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
    188    if (from.isMemory()) {
    189      masm.Ldr(toARMReg64(to), toMemOperand(from));
    190    } else {
    191      masm.Add(toARMReg64(to), toARMReg64(from), Operand(from.disp()));
    192    }
    193    return;
    194  }
    195 
    196  vixl::UseScratchRegisterScope temps(&masm.asVIXL());
    197  const ARMRegister scratch64 = temps.AcquireX();
    198 
    199  // Memory -> Memory move.
    200  if (from.isMemory()) {
    201    MOZ_ASSERT(to.isMemory());
    202    masm.Ldr(scratch64, toMemOperand(from));
    203    masm.Str(scratch64, toMemOperand(to));
    204    return;
    205  }
    206 
    207  // EffectiveAddress -> Memory move.
    208  MOZ_ASSERT(from.isEffectiveAddress());
    209  MOZ_ASSERT(to.isMemory());
    210  masm.Add(scratch64, toARMReg64(from), Operand(from.disp()));
    211  masm.Str(scratch64, toMemOperand(to));
    212 }
    213 
    214 MemOperand MoveEmitterARM64::cycleSlot() {
    215  // Using SP as stack pointer requires alignment preservation below.
    216  MOZ_ASSERT(!masm.GetStackPointer64().Is(sp));
    217 
    218  // Allocate a slot for breaking cycles if we have not already
    219  if (pushedAtCycle_ == -1) {
    220    static_assert(SpillSlotSize == 16);
    221    masm.reserveStack(SpillSlotSize);
    222    pushedAtCycle_ = masm.framePushed();
    223  }
    224 
    225  return MemOperand(masm.GetStackPointer64(),
    226                    masm.framePushed() - pushedAtCycle_);
    227 }
    228 
    229 void MoveEmitterARM64::breakCycle(const MoveOperand& from,
    230                                  const MoveOperand& to, MoveOp::Type type) {
    231  switch (type) {
    232    case MoveOp::FLOAT32:
    233      if (to.isMemory()) {
    234        masm.Ldr(cycleGeneralReg_.W(), toMemOperand(to));
    235      } else {
    236        masm.Fmov(cycleGeneralReg_.W(), toFPReg(to, type));
    237      }
    238      break;
    239 
    240    case MoveOp::DOUBLE:
    241      if (to.isMemory()) {
    242        masm.Ldr(cycleGeneralReg_.X(), toMemOperand(to));
    243      } else {
    244        masm.Fmov(cycleGeneralReg_.X(), toFPReg(to, type));
    245      }
    246      break;
    247 
    248    case MoveOp::SIMD128:
    249      if (to.isMemory()) {
    250        vixl::UseScratchRegisterScope temps(&masm.asVIXL());
    251        const ARMFPRegister scratch128 = temps.AcquireQ();
    252        masm.Ldr(scratch128, toMemOperand(to));
    253        masm.Str(scratch128, cycleSlot());
    254      } else {
    255        masm.Str(toFPReg(to, type), cycleSlot());
    256      }
    257      break;
    258 
    259    case MoveOp::INT32:
    260      if (to.isMemory()) {
    261        masm.Ldr(cycleGeneralReg_.W(), toMemOperand(to));
    262      } else {
    263        masm.Mov(cycleGeneralReg_.W(), toARMReg32(to));
    264      }
    265      break;
    266 
    267    case MoveOp::GENERAL:
    268      if (to.isMemory()) {
    269        masm.Ldr(cycleGeneralReg_.X(), toMemOperand(to));
    270      } else {
    271        masm.Mov(cycleGeneralReg_.X(), toARMReg64(to));
    272      }
    273      break;
    274 
    275    default:
    276      MOZ_CRASH("Unexpected move type");
    277  }
    278 }
    279 
    280 void MoveEmitterARM64::completeCycle(const MoveOperand& from,
    281                                     const MoveOperand& to, MoveOp::Type type) {
    282  switch (type) {
    283    case MoveOp::FLOAT32:
    284      if (to.isMemory()) {
    285        masm.Str(cycleGeneralReg_.W(), toMemOperand(to));
    286      } else {
    287        masm.Fmov(toFPReg(to, type), cycleGeneralReg_.W());
    288      }
    289      break;
    290 
    291    case MoveOp::DOUBLE:
    292      if (to.isMemory()) {
    293        masm.Str(cycleGeneralReg_.X(), toMemOperand(to));
    294      } else {
    295        masm.Fmov(toFPReg(to, type), cycleGeneralReg_.X());
    296      }
    297      break;
    298 
    299    case MoveOp::SIMD128:
    300      if (to.isMemory()) {
    301        vixl::UseScratchRegisterScope temps(&masm.asVIXL());
    302        const ARMFPRegister scratch = temps.AcquireQ();
    303        masm.Ldr(scratch, cycleSlot());
    304        masm.Str(scratch, toMemOperand(to));
    305      } else {
    306        masm.Ldr(toFPReg(to, type), cycleSlot());
    307      }
    308      break;
    309 
    310    case MoveOp::INT32:
    311      if (to.isMemory()) {
    312        masm.Str(cycleGeneralReg_.W(), toMemOperand(to));
    313      } else {
    314        masm.Mov(toARMReg32(to), cycleGeneralReg_.W());
    315      }
    316      break;
    317 
    318    case MoveOp::GENERAL:
    319      if (to.isMemory()) {
    320        masm.Str(cycleGeneralReg_.X(), toMemOperand(to));
    321      } else {
    322        masm.Mov(toARMReg64(to), cycleGeneralReg_.X());
    323      }
    324      break;
    325 
    326    default:
    327      MOZ_CRASH("Unexpected move type");
    328  }
    329 }