tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit e1b581d72df74a05e3a18e1aacf26604d4f302de
parent 0fbec504335c89af2b478620c6dee04b935faf4c
Author: André Bargull <andre.bargull@gmail.com>
Date:   Fri, 24 Oct 2025 14:58:53 +0000

Bug 1996083 - Part 1: Don't require reuse-input allocations for ALU instructions on loong64. r=spidermonkey-reviewers,jandem

Lowering changes happen in part 5.

Differential Revision: https://phabricator.services.mozilla.com/D269833

Diffstat:
Mjs/src/jit/loong64/CodeGenerator-loong64.cpp | 182+++++++++++++++++++++++++++++++++++++------------------------------------------
Mjs/src/jit/loong64/MacroAssembler-loong64.cpp | 9+++------
Mjs/src/jit/loong64/MacroAssembler-loong64.h | 6+++---
3 files changed, 92 insertions(+), 105 deletions(-)

diff --git a/js/src/jit/loong64/CodeGenerator-loong64.cpp b/js/src/jit/loong64/CodeGenerator-loong64.cpp @@ -27,7 +27,6 @@ using namespace js; using namespace js::jit; using JS::GenericNaN; -using mozilla::FloorLog2; using mozilla::NegativeInfinity; // shared @@ -685,17 +684,15 @@ void CodeGenerator::visitAddIntPtr(LAddIntPtr* ins) { } void CodeGenerator::visitAddI64(LAddI64* lir) { - LInt64Allocation lhs = lir->lhs(); + Register lhs = ToRegister64(lir->lhs()).reg; LInt64Allocation rhs = lir->rhs(); - - MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs)); + Register dest = ToOutRegister64(lir).reg; if (IsConstant(rhs)) { - masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs)); - return; + masm.ma_add_d(dest, lhs, ImmWord(ToInt64(rhs))); + } else { + masm.as_add_d(dest, lhs, ToRegister64(rhs).reg); } - - masm.add64(ToRegister64(rhs), ToRegister64(lhs)); } void CodeGenerator::visitSubI(LSubI* ins) { @@ -741,17 +738,15 @@ void CodeGenerator::visitSubIntPtr(LSubIntPtr* ins) { } void CodeGenerator::visitSubI64(LSubI64* lir) { - LInt64Allocation lhs = lir->lhs(); + Register lhs = ToRegister64(lir->lhs()).reg; LInt64Allocation rhs = lir->rhs(); - - MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs)); + Register dest = ToOutRegister64(lir).reg; if (IsConstant(rhs)) { - masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs)); - return; + masm.ma_sub_d(dest, lhs, ImmWord(ToInt64(rhs))); + } else { + masm.as_sub_d(dest, lhs, ToRegister64(rhs).reg); } - - masm.sub64(ToRegister64(rhs), ToRegister64(lhs)); } void CodeGenerator::visitMulI(LMulI* ins) { @@ -1192,52 +1187,46 @@ void CodeGenerator::visitModMaskI(LModMaskI* ins) { } void CodeGenerator::visitBitNotI(LBitNotI* ins) { - const LAllocation* input = ins->input(); - const LDefinition* dest = ins->output(); - MOZ_ASSERT(!input->isConstant()); - - masm.as_nor(ToRegister(dest), ToRegister(input), zero); + Register input = ToRegister(ins->input()); + Register dest = ToRegister(ins->output()); + masm.as_nor(dest, input, zero); } void CodeGenerator::visitBitNotI64(LBitNotI64* ins) { - LInt64Allocation input = ins->input(); - MOZ_ASSERT(!IsConstant(input)); - Register64 inputReg = ToRegister64(input); - MOZ_ASSERT(inputReg == ToOutRegister64(ins)); - masm.as_nor(inputReg.reg, inputReg.reg, zero); + Register input = ToRegister64(ins->input()).reg; + Register dest = ToOutRegister64(ins).reg; + masm.as_nor(dest, input, zero); } void CodeGenerator::visitBitOpI(LBitOpI* ins) { - const LAllocation* lhs = ins->lhs(); + Register lhs = ToRegister(ins->lhs()); const LAllocation* rhs = ins->rhs(); - const LDefinition* dest = ins->output(); + Register dest = ToRegister(ins->output()); + // all of these bitops should be either imm32's, or integer registers. switch (ins->bitop()) { case JSOp::BitOr: if (rhs->isConstant()) { - masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), - true); + masm.ma_or(dest, lhs, Imm32(ToInt32(rhs))); } else { - masm.as_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs)); - masm.as_slli_w(ToRegister(dest), ToRegister(dest), 0); + masm.as_or(dest, lhs, ToRegister(rhs)); + masm.as_slli_w(dest, dest, 0); } break; case JSOp::BitXor: if (rhs->isConstant()) { - masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), - true); + masm.ma_xor(dest, lhs, Imm32(ToInt32(rhs))); } else { - masm.as_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs)); - masm.as_slli_w(ToRegister(dest), ToRegister(dest), 0); + masm.as_xor(dest, lhs, ToRegister(rhs)); + masm.as_slli_w(dest, dest, 0); } break; case JSOp::BitAnd: if (rhs->isConstant()) { - masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), - true); + masm.ma_and(dest, lhs, Imm32(ToInt32(rhs))); } else { - masm.as_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs)); - masm.as_slli_w(ToRegister(dest), ToRegister(dest), 0); + masm.as_and(dest, lhs, ToRegister(rhs)); + masm.as_slli_w(dest, dest, 0); } break; default: @@ -1246,32 +1235,30 @@ void CodeGenerator::visitBitOpI(LBitOpI* ins) { } void CodeGenerator::visitBitOpI64(LBitOpI64* lir) { - LInt64Allocation lhs = lir->lhs(); - LInt64Allocation rhs = lir->rhs(); + Register lhs = ToRegister64(lir->lhs()).reg; + Register rhs; + Register dest = ToOutRegister64(lir).reg; - MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs)); + UseScratchRegisterScope temps(masm); + if (IsConstant(lir->rhs())) { + rhs = temps.Acquire(); + + // Small immediates can be handled without the load immediate instruction, + // but this optimisation isn't yet implemented. + masm.ma_li(rhs, ImmWord(ToInt64(lir->rhs()))); + } else { + rhs = ToRegister64(lir->rhs()).reg; + } switch (lir->bitop()) { case JSOp::BitOr: - if (IsConstant(rhs)) { - masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs)); - } else { - masm.or64(ToRegister64(rhs), ToRegister64(lhs)); - } + masm.as_or(dest, lhs, rhs); break; case JSOp::BitXor: - if (IsConstant(rhs)) { - masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs)); - } else { - masm.xor64(ToRegister64(rhs), ToRegister64(lhs)); - } + masm.as_xor(dest, lhs, rhs); break; case JSOp::BitAnd: - if (IsConstant(rhs)) { - masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs)); - } else { - masm.and64(ToRegister64(rhs), ToRegister64(lhs)); - } + masm.as_and(dest, lhs, rhs); break; default: MOZ_CRASH("unexpected binary opcode"); @@ -1343,18 +1330,22 @@ void CodeGenerator::visitShiftIntPtr(LShiftIntPtr* ins) { if (rhs->isConstant()) { int32_t shift = ToIntPtr(rhs) & 0x3F; - switch (ins->bitop()) { - case JSOp::Lsh: - masm.as_slli_d(dest, lhs, shift); - break; - case JSOp::Rsh: - masm.as_srai_d(dest, lhs, shift); - break; - case JSOp::Ursh: - masm.as_srli_d(dest, lhs, shift); - break; - default: - MOZ_CRASH("Unexpected shift op"); + if (shift) { + switch (ins->bitop()) { + case JSOp::Lsh: + masm.as_slli_d(dest, lhs, shift); + break; + case JSOp::Rsh: + masm.as_srai_d(dest, lhs, shift); + break; + case JSOp::Ursh: + masm.as_srli_d(dest, lhs, shift); + break; + default: + MOZ_CRASH("Unexpected shift op"); + } + } else if (lhs != dest) { + masm.movePtr(lhs, dest); } } else { Register shift = ToRegister(rhs); @@ -1375,44 +1366,42 @@ void CodeGenerator::visitShiftIntPtr(LShiftIntPtr* ins) { } void CodeGenerator::visitShiftI64(LShiftI64* lir) { - LInt64Allocation lhs = lir->lhs(); + Register lhs = ToRegister64(lir->lhs()).reg; const LAllocation* rhs = lir->rhs(); - - MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs)); + Register dest = ToOutRegister64(lir).reg; if (rhs->isConstant()) { int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F); - switch (lir->bitop()) { - case JSOp::Lsh: - if (shift) { - masm.lshift64(Imm32(shift), ToRegister64(lhs)); - } - break; - case JSOp::Rsh: - if (shift) { - masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs)); - } - break; - case JSOp::Ursh: - if (shift) { - masm.rshift64(Imm32(shift), ToRegister64(lhs)); - } - break; - default: - MOZ_CRASH("Unexpected shift op"); + if (shift) { + switch (lir->bitop()) { + case JSOp::Lsh: + masm.as_slli_d(dest, lhs, shift); + break; + case JSOp::Rsh: + masm.as_srai_d(dest, lhs, shift); + break; + case JSOp::Ursh: + masm.as_srli_d(dest, lhs, shift); + break; + default: + MOZ_CRASH("Unexpected shift op"); + } + } else if (lhs != dest) { + masm.movePtr(lhs, dest); } return; } + Register shift = ToRegister(rhs); switch (lir->bitop()) { case JSOp::Lsh: - masm.lshift64(ToRegister(rhs), ToRegister64(lhs)); + masm.as_sll_d(dest, lhs, shift); break; case JSOp::Rsh: - masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs)); + masm.as_sra_d(dest, lhs, shift); break; case JSOp::Ursh: - masm.rshift64(ToRegister(rhs), ToRegister64(lhs)); + masm.as_srl_d(dest, lhs, shift); break; default: MOZ_CRASH("Unexpected shift op"); @@ -2058,9 +2047,10 @@ void CodeGenerator::visitNegI(LNegI* ins) { } void CodeGenerator::visitNegI64(LNegI64* ins) { - Register64 input = ToRegister64(ins->input()); - MOZ_ASSERT(input == ToOutRegister64(ins)); - masm.neg64(input); + Register input = ToRegister64(ins->input()).reg; + Register output = ToOutRegister64(ins).reg; + + masm.as_sub_d(output, zero, input); } void CodeGenerator::visitNegD(LNegD* ins) { diff --git a/js/src/jit/loong64/MacroAssembler-loong64.cpp b/js/src/jit/loong64/MacroAssembler-loong64.cpp @@ -1495,8 +1495,7 @@ void MacroAssemblerLOONG64::ma_fmovn(FloatFormat fmt, FloatRegister fd, bind(&done); } -void MacroAssemblerLOONG64::ma_and(Register rd, Register rj, Imm32 imm, - bool bit32) { +void MacroAssemblerLOONG64::ma_and(Register rd, Register rj, Imm32 imm) { if (is_uintN(imm.value, 12)) { as_andi(rd, rj, imm.value); } else if (rd != rj) { @@ -1511,8 +1510,7 @@ void MacroAssemblerLOONG64::ma_and(Register rd, Register rj, Imm32 imm, } } -void MacroAssemblerLOONG64::ma_or(Register rd, Register rj, Imm32 imm, - bool bit32) { +void MacroAssemblerLOONG64::ma_or(Register rd, Register rj, Imm32 imm) { if (is_uintN(imm.value, 12)) { as_ori(rd, rj, imm.value); } else { @@ -1524,8 +1522,7 @@ void MacroAssemblerLOONG64::ma_or(Register rd, Register rj, Imm32 imm, } } -void MacroAssemblerLOONG64::ma_xor(Register rd, Register rj, Imm32 imm, - bool bit32) { +void MacroAssemblerLOONG64::ma_xor(Register rd, Register rj, Imm32 imm) { if (is_uintN(imm.value, 12)) { as_xori(rd, rj, imm.value); } else { diff --git a/js/src/jit/loong64/MacroAssembler-loong64.h b/js/src/jit/loong64/MacroAssembler-loong64.h @@ -273,11 +273,11 @@ class MacroAssemblerLOONG64 : public Assembler { void ma_fmovn(FloatFormat fmt, FloatRegister fd, FloatRegister fj, Register rk); - void ma_and(Register rd, Register rj, Imm32 imm, bool bit32 = false); + void ma_and(Register rd, Register rj, Imm32 imm); - void ma_or(Register rd, Register rj, Imm32 imm, bool bit32 = false); + void ma_or(Register rd, Register rj, Imm32 imm); - void ma_xor(Register rd, Register rj, Imm32 imm, bool bit32 = false); + void ma_xor(Register rd, Register rj, Imm32 imm); // load FaultingCodeOffset ma_load(Register dest, const BaseIndex& src,