commit 1949d39c47bf940312a88072d90d0e49775d21e7
parent f1ea8fcc82f116506a7b3d2edd8329ef91985294
Author: André Bargull <andre.bargull@gmail.com>
Date: Mon, 27 Oct 2025 09:00:02 +0000
Bug 1996346 - Part 3: Split lowering and codegen for LShiftI64. r=spidermonkey-reviewers,iain
Split into separate implementations for x86 and x64 in preparation for part 4.
Differential Revision: https://phabricator.services.mozilla.com/D270027
Diffstat:
9 files changed, 167 insertions(+), 98 deletions(-)
diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -365,6 +365,52 @@ void CodeGenerator::visitShiftIntPtr(LShiftIntPtr* ins) {
}
}
+void CodeGenerator::visitShiftI64(LShiftI64* lir) {
+ Register64 lhs = ToRegister64(lir->lhs());
+ const LAllocation* rhs = lir->rhs();
+
+ MOZ_ASSERT(ToOutRegister64(lir) == lhs);
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.lshift64(Imm32(shift), lhs);
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.rshift64Arithmetic(Imm32(shift), lhs);
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.rshift64(Imm32(shift), lhs);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ Register shift = ToRegister(rhs);
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ masm.lshift64(shift, lhs);
+ break;
+ case JSOp::Rsh:
+ masm.rshift64Arithmetic(shift, lhs);
+ break;
+ case JSOp::Ursh:
+ masm.rshift64(shift, lhs);
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
Register elements = ToRegister(lir->elements());
Register64 out = ToOutRegister64(lir);
diff --git a/js/src/jit/x64/Lowering-x64.cpp b/js/src/jit/x64/Lowering-x64.cpp
@@ -80,6 +80,38 @@ void LIRGeneratorX64::lowerForMulInt64(LMulI64* ins, MMul* mir,
}
}
+template <class LInstr>
+void LIRGeneratorX64::lowerForShiftInt64(LInstr* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs) {
+ LAllocation rhsAlloc;
+ if (rhs->isConstant()) {
+ rhsAlloc = useOrConstantAtStart(rhs);
+ } else if (std::is_same_v<LInstr, LShiftI64>) {
+ rhsAlloc = useShiftRegister(rhs);
+ } else {
+ rhsAlloc = useFixed(rhs, rcx);
+ }
+
+ if constexpr (std::is_same_v<LInstr, LShiftI64>) {
+ ins->setLhs(useInt64RegisterAtStart(lhs));
+ ins->setRhs(rhsAlloc);
+ defineInt64ReuseInput(ins, mir, LShiftI64::LhsIndex);
+ } else {
+ ins->setInput(useInt64RegisterAtStart(lhs));
+ ins->setCount(rhsAlloc);
+ defineInt64ReuseInput(ins, mir, LRotateI64::InputIndex);
+ }
+}
+
+template void LIRGeneratorX64::lowerForShiftInt64(LShiftI64* ins,
+ MDefinition* mir,
+ MDefinition* lhs,
+ MDefinition* rhs);
+template void LIRGeneratorX64::lowerForShiftInt64(LRotateI64* ins,
+ MDefinition* mir,
+ MDefinition* lhs,
+ MDefinition* rhs);
+
void LIRGenerator::visitBox(MBox* box) {
MDefinition* opd = box->getOperand(0);
diff --git a/js/src/jit/x64/Lowering-x64.h b/js/src/jit/x64/Lowering-x64.h
@@ -31,6 +31,10 @@ class LIRGeneratorX64 : public LIRGeneratorX86Shared {
void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs,
MDefinition* rhs);
+ template <class LInstr>
+ void lowerForShiftInt64(LInstr* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
// Returns a box allocation. reg2 is ignored on 64-bit platforms.
LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register,
bool useAtStart = false);
diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -1594,55 +1594,6 @@ void CodeGenerator::visitShiftI(LShiftI* ins) {
}
}
-void CodeGenerator::visitShiftI64(LShiftI64* lir) {
- Register64 lhs = ToRegister64(lir->lhs());
- const LAllocation* rhs = lir->rhs();
-
- MOZ_ASSERT(ToOutRegister64(lir) == lhs);
-
- if (rhs->isConstant()) {
- int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
- switch (lir->bitop()) {
- case JSOp::Lsh:
- if (shift) {
- masm.lshift64(Imm32(shift), lhs);
- }
- break;
- case JSOp::Rsh:
- if (shift) {
- masm.rshift64Arithmetic(Imm32(shift), lhs);
- }
- break;
- case JSOp::Ursh:
- if (shift) {
- masm.rshift64(Imm32(shift), lhs);
- }
- break;
- default:
- MOZ_CRASH("Unexpected shift op");
- }
- return;
- }
-
- Register shift = ToRegister(rhs);
-#ifdef JS_CODEGEN_X86
- MOZ_ASSERT(shift == ecx);
-#endif
- switch (lir->bitop()) {
- case JSOp::Lsh:
- masm.lshift64(shift, lhs);
- break;
- case JSOp::Rsh:
- masm.rshift64Arithmetic(shift, lhs);
- break;
- case JSOp::Ursh:
- masm.rshift64(shift, lhs);
- break;
- default:
- MOZ_CRASH("Unexpected shift op");
- }
-}
-
void CodeGenerator::visitUrshD(LUrshD* ins) {
Register lhs = ToRegister(ins->lhs());
const LAllocation* rhs = ins->rhs();
diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.cpp b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -87,51 +87,6 @@ void LIRGeneratorX86Shared::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
}
}
-template <class LInstr>
-void LIRGeneratorX86Shared::lowerForShiftInt64(LInstr* ins, MDefinition* mir,
- MDefinition* lhs,
- MDefinition* rhs) {
- LAllocation rhsAlloc;
- if (rhs->isConstant()) {
- rhsAlloc = useOrConstantAtStart(rhs);
-#ifdef JS_CODEGEN_X64
- } else if (std::is_same_v<LInstr, LShiftI64>) {
- rhsAlloc = useShiftRegister(rhs);
- } else {
- rhsAlloc = useFixed(rhs, rcx);
- }
-#else
- } else {
- // The operands are int64, but we only care about the lower 32 bits of
- // the RHS. On 32-bit, the code below will load that part in ecx and
- // will discard the upper half.
- rhsAlloc = useLowWordFixed(rhs, ecx);
- }
-#endif
-
- if constexpr (std::is_same_v<LInstr, LShiftI64>) {
- ins->setLhs(useInt64RegisterAtStart(lhs));
- ins->setRhs(rhsAlloc);
- defineInt64ReuseInput(ins, mir, LShiftI64::LhsIndex);
- } else {
- ins->setInput(useInt64RegisterAtStart(lhs));
- ins->setCount(rhsAlloc);
-#if defined(JS_NUNBOX32)
- ins->setTemp0(temp());
-#endif
- defineInt64ReuseInput(ins, mir, LRotateI64::InputIndex);
- }
-}
-
-template void LIRGeneratorX86Shared::lowerForShiftInt64(LShiftI64* ins,
- MDefinition* mir,
- MDefinition* lhs,
- MDefinition* rhs);
-template void LIRGeneratorX86Shared::lowerForShiftInt64(LRotateI64* ins,
- MDefinition* mir,
- MDefinition* lhs,
- MDefinition* rhs);
-
void LIRGeneratorX86Shared::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
MDefinition* mir, MDefinition* input) {
ins->setOperand(0, useRegisterAtStart(input));
diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.h b/js/src/jit/x86-shared/Lowering-x86-shared.h
@@ -32,10 +32,6 @@ class LIRGeneratorX86Shared : public LIRGeneratorShared {
void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
MDefinition* lhs, MDefinition* rhs);
- template <class LInstr>
- void lowerForShiftInt64(LInstr* ins, MDefinition* mir, MDefinition* lhs,
- MDefinition* rhs);
-
void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
MDefinition* input);
void lowerForFPU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
diff --git a/js/src/jit/x86/CodeGenerator-x86.cpp b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -1056,6 +1056,53 @@ void CodeGenerator::visitShiftIntPtr(LShiftIntPtr* ins) {
}
}
+void CodeGenerator::visitShiftI64(LShiftI64* lir) {
+ Register64 lhs = ToRegister64(lir->lhs());
+ const LAllocation* rhs = lir->rhs();
+
+ MOZ_ASSERT(ToOutRegister64(lir) == lhs);
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.lshift64(Imm32(shift), lhs);
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.rshift64Arithmetic(Imm32(shift), lhs);
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.rshift64(Imm32(shift), lhs);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ Register shift = ToRegister(rhs);
+ MOZ_ASSERT(shift == ecx);
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ masm.lshift64(shift, lhs);
+ break;
+ case JSOp::Rsh:
+ masm.rshift64Arithmetic(shift, lhs);
+ break;
+ case JSOp::Ursh:
+ masm.rshift64(shift, lhs);
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
diff --git a/js/src/jit/x86/Lowering-x86.cpp b/js/src/jit/x86/Lowering-x86.cpp
@@ -226,6 +226,40 @@ void LIRGeneratorX86::lowerForMulInt64(LMulI64* ins, MMul* mir,
LAllocation(AnyRegister(eax))));
}
+template <class LInstr>
+void LIRGeneratorX86::lowerForShiftInt64(LInstr* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs) {
+ LAllocation rhsAlloc;
+ if (rhs->isConstant()) {
+ rhsAlloc = useOrConstantAtStart(rhs);
+ } else {
+ // The operands are int64, but we only care about the lower 32 bits of the
+ // RHS. The code below will load that part in ecx and will discard the upper
+ // half.
+ rhsAlloc = useLowWordFixed(rhs, ecx);
+ }
+
+ if constexpr (std::is_same_v<LInstr, LShiftI64>) {
+ ins->setLhs(useInt64RegisterAtStart(lhs));
+ ins->setRhs(rhsAlloc);
+ defineInt64ReuseInput(ins, mir, LShiftI64::LhsIndex);
+ } else {
+ ins->setInput(useInt64RegisterAtStart(lhs));
+ ins->setCount(rhsAlloc);
+ ins->setTemp0(temp());
+ defineInt64ReuseInput(ins, mir, LRotateI64::InputIndex);
+ }
+}
+
+template void LIRGeneratorX86::lowerForShiftInt64(LShiftI64* ins,
+ MDefinition* mir,
+ MDefinition* lhs,
+ MDefinition* rhs);
+template void LIRGeneratorX86::lowerForShiftInt64(LRotateI64* ins,
+ MDefinition* mir,
+ MDefinition* lhs,
+ MDefinition* rhs);
+
void LIRGenerator::visitCompareExchangeTypedArrayElement(
MCompareExchangeTypedArrayElement* ins) {
MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
diff --git a/js/src/jit/x86/Lowering-x86.h b/js/src/jit/x86/Lowering-x86.h
@@ -50,6 +50,10 @@ class LIRGeneratorX86 : public LIRGeneratorX86Shared {
void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs,
MDefinition* rhs);
+ template <class LInstr>
+ void lowerForShiftInt64(LInstr* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
void lowerTruncateDToInt32(MTruncateToInt32* ins);
void lowerTruncateFToInt32(MTruncateToInt32* ins);
void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);