tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit 253f810be52f48f616e70d7963685fb6b6359651
parent fc1c8be451e59491c85ec930e1028f142c56379a
Author: André Bargull <andre.bargull@gmail.com>
Date:   Fri, 24 Oct 2025 14:58:55 +0000

Bug 1996087: Allow lhs==dest when optimising mul as shift. r=spidermonkey-reviewers,jandem

It's easy to modify this code to also allowing optimising to a shift instruction
when `lhs` is equal to `dest`.

Differential Revision: https://phabricator.services.mozilla.com/D269844

Diffstat:
Mjs/src/jit/loong64/CodeGenerator-loong64.cpp | 33++++++++++++++++++---------------
Mjs/src/jit/mips-shared/CodeGenerator-mips-shared.cpp | 29+++++++++++++++++------------
Mjs/src/jit/riscv64/CodeGenerator-riscv64.cpp | 33++++++++++++++++++---------------
3 files changed, 53 insertions(+), 42 deletions(-)

diff --git a/js/src/jit/loong64/CodeGenerator-loong64.cpp b/js/src/jit/loong64/CodeGenerator-loong64.cpp @@ -800,41 +800,44 @@ void CodeGenerator::visitMulI(LMulI* ins) { if (!mul->canOverflow()) { // If it cannot overflow, we can do lots of optimizations. - uint32_t rest = constant - (1 << shift); // See if the constant has one bit set, meaning it can be // encoded as a bitshift. if ((1 << shift) == constant) { - masm.as_slli_w(dest, lhs, shift % 32); + masm.as_slli_w(dest, lhs, shift); return; } // If the constant cannot be encoded as (1<<C1), see if it can // be encoded as (1<<C1) | (1<<C2), which can be computed // using an add and a shift. + uint32_t rest = constant - (1 << shift); uint32_t shift_rest = mozilla::FloorLog2(rest); - if (lhs != dest && (1u << shift_rest) == rest) { - masm.as_slli_w(dest, lhs, (shift - shift_rest) % 32); - masm.add32(lhs, dest); + if ((1u << shift_rest) == rest) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + + masm.as_slli_w(scratch, lhs, (shift - shift_rest)); + masm.as_add_w(dest, scratch, lhs); if (shift_rest != 0) { - masm.as_slli_w(dest, dest, shift_rest % 32); + masm.as_slli_w(dest, dest, shift_rest); } return; } } else { - // To stay on the safe side, only optimize things that are a - // power of 2. - if (lhs != dest && (1 << shift) == constant) { + // To stay on the safe side, only optimize things that are a power of 2. + if ((1 << shift) == constant) { UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); + // dest = lhs * pow(2, shift) - masm.as_slli_w(dest, lhs, shift % 32); - // At runtime, check (lhs == dest >> shift), if this does - // not hold, some bits were lost due to overflow, and the + masm.as_slli_d(dest, lhs, shift); + + // At runtime, check (dest >> shift == intptr_t(dest) >> shift), if + // this does not hold, some bits were lost due to overflow, and the // computation should be resumed as a double. - masm.as_srai_w(scratch, dest, shift % 32); - bailoutCmp32(Assembler::NotEqual, lhs, Register(scratch), - ins->snapshot()); + masm.as_slli_w(scratch, dest, 0); + bailoutCmp32(Assembler::NotEqual, dest, scratch, ins->snapshot()); return; } } diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp @@ -297,9 +297,9 @@ void CodeGenerator::visitMulI(LMulI* ins) { if (constant > 0) { uint32_t shift = mozilla::FloorLog2(constant); + if (!mul->canOverflow()) { // If it cannot overflow, we can do lots of optimizations. - uint32_t rest = constant - (1 << shift); // See if the constant has one bit set, meaning it can be // encoded as a bitshift. @@ -311,28 +311,33 @@ void CodeGenerator::visitMulI(LMulI* ins) { // If the constant cannot be encoded as (1<<C1), see if it can // be encoded as (1<<C1) | (1<<C2), which can be computed // using an add and a shift. + uint32_t rest = constant - (1 << shift); uint32_t shift_rest = mozilla::FloorLog2(rest); - if (lhs != dest && (1u << shift_rest) == rest) { - masm.ma_sll(dest, lhs, Imm32(shift - shift_rest)); - masm.add32(lhs, dest); + if ((1u << shift_rest) == rest) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + + masm.ma_sll(scratch, lhs, Imm32(shift - shift_rest)); + masm.as_addu(dest, scratch, lhs); if (shift_rest != 0) { masm.ma_sll(dest, dest, Imm32(shift_rest)); } return; } } else { - // To stay on the safe side, only optimize things that are a - // power of 2. - if (lhs != dest && (1 << shift) == constant) { + // To stay on the safe side, only optimize things that are a power of 2. + if ((1 << shift) == constant) { UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); + // dest = lhs * pow(2, shift) - masm.ma_sll(dest, lhs, Imm32(shift)); - // At runtime, check (lhs == dest >> shift), if this does - // not hold, some bits were lost due to overflow, and the + masm.ma_dsll(dest, lhs, Imm32(shift)); + + // At runtime, check (dest >> shift == intptr_t(dest) >> shift), if + // this does not hold, some bits were lost due to overflow, and the // computation should be resumed as a double. - masm.ma_sra(scratch, dest, Imm32(shift)); - bailoutCmp32(Assembler::NotEqual, lhs, scratch, ins->snapshot()); + masm.ma_sll(scratch, dest, Imm32(0)); + bailoutCmp32(Assembler::NotEqual, dest, scratch, ins->snapshot()); return; } } diff --git a/js/src/jit/riscv64/CodeGenerator-riscv64.cpp b/js/src/jit/riscv64/CodeGenerator-riscv64.cpp @@ -829,41 +829,44 @@ void CodeGenerator::visitMulI(LMulI* ins) { if (!mul->canOverflow()) { // If it cannot overflow, we can do lots of optimizations. - uint32_t rest = constant - (1 << shift); // See if the constant has one bit set, meaning it can be // encoded as a bitshift. if ((1 << shift) == constant) { - masm.slliw(dest, lhs, shift % 32); + masm.slliw(dest, lhs, shift); return; } // If the constant cannot be encoded as (1<<C1), see if it can // be encoded as (1<<C1) | (1<<C2), which can be computed // using an add and a shift. + uint32_t rest = constant - (1 << shift); uint32_t shift_rest = mozilla::FloorLog2(rest); - if (lhs != dest && (1u << shift_rest) == rest) { - masm.slliw(dest, lhs, (shift - shift_rest) % 32); - masm.add32(lhs, dest); + if ((1u << shift_rest) == rest) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + + masm.slliw(scratch, lhs, (shift - shift_rest)); + masm.addw(dest, scratch, lhs); if (shift_rest != 0) { - masm.slliw(dest, dest, shift_rest % 32); + masm.slliw(dest, dest, shift_rest); } return; } } else { - // To stay on the safe side, only optimize things that are a - // power of 2. - if (lhs != dest && (1 << shift) == constant) { + // To stay on the safe side, only optimize things that are a power of 2. + if ((1 << shift) == constant) { UseScratchRegisterScope temps(&masm); Register scratch = temps.Acquire(); + // dest = lhs * pow(2, shift) - masm.slliw(dest, lhs, shift % 32); - // At runtime, check (lhs == dest >> shift), if this does - // not hold, some bits were lost due to overflow, and the + masm.slli(dest, lhs, shift); + + // At runtime, check (dest >> shift == intptr_t(dest) >> shift), if + // this does not hold, some bits were lost due to overflow, and the // computation should be resumed as a double. - masm.sraiw(scratch, dest, shift % 32); - bailoutCmp32(Assembler::NotEqual, lhs, Register(scratch), - ins->snapshot()); + masm.sext_w(scratch, dest); + bailoutCmp32(Assembler::NotEqual, dest, scratch, ins->snapshot()); return; } }