tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit a82e3b9c431fbfe9a06c776440cc360973498c06
parent cf057d8b8aa0fd023ecac8bb7d094db042a4267a
Author: Jan de Mooij <jdemooij@mozilla.com>
Date:   Fri, 19 Dec 2025 08:40:39 +0000

Bug 2004893 part 3 - Add batching for source compression tasks. r=jonco

Instead of creating a `SourceCompressionTask` for each `ScriptSource`, we can now
use some heuristics to put multiple script sources into a single task to eliminate
per-task overhead. Short script sources are quite common.

Differential Revision: https://phabricator.services.mozilla.com/D276486

Diffstat:
Mjs/src/vm/HelperThreadState.h | 4++++
Mjs/src/vm/HelperThreads.cpp | 31+++++++++++++++++++++++++++++++
2 files changed, 35 insertions(+), 0 deletions(-)

diff --git a/js/src/vm/HelperThreadState.h b/js/src/vm/HelperThreadState.h @@ -635,6 +635,10 @@ class SourceCompressionTask final : public HelperThreadTask { bool runtimeMatches(JSRuntime* runtime) const { return runtime == runtime_; } + [[nodiscard]] bool addEntry(ScriptSource* source) { + return entries_.emplaceBack(source); + } + void runTask(); void runHelperThreadTask(AutoLockHelperThreadState& locked) override; void complete(); diff --git a/js/src/vm/HelperThreads.cpp b/js/src/vm/HelperThreads.cpp @@ -1614,6 +1614,20 @@ void GlobalHelperThreadState::createAndSubmitCompressionTasks( // First create the SourceCompressionTasks and add them to a Vector. Vector<UniquePtr<SourceCompressionTask>, 8, SystemAllocPolicy> tasksToSubmit; + // We use some simple heuristics to batch multiple script sources in a single + // SourceCompressionTask, to reduce overhead for small script sources. + // + // MaxBatchLength is the maximum length (in characters) for a single batch. + // If a single script source exceeds this length, it will get its own + // SourceCompressionTask. + // + // The main downside of increasing the MaxBatchLength threshold is that a + // large compression task could block a helper thread from taking on higher + // priority work. + static constexpr size_t MaxBatchLength = 300'000; + SourceCompressionTask* currentBatch = nullptr; + size_t currentBatchLength = 0; + rt->pendingCompressions().eraseIf([&](const auto& entry) { MOZ_ASSERT(entry.source()->hasUncompressedSource()); @@ -1630,13 +1644,30 @@ void GlobalHelperThreadState::createAndSubmitCompressionTasks( return false; } + // Add this entry to the current batch if the total length doesn't exceed + // MaxBatchLength. + size_t length = entry.source()->length(); + if (currentBatch && currentBatchLength + length <= MaxBatchLength) { + if (!currentBatch->addEntry(entry.source())) { + return false; + } + currentBatchLength += length; + return true; + } + // Heap allocate the task. It will be freed upon compression completing in // AttachFinishedCompressedSources. On OOM we leave the pending compression // in the vector. auto ownedTask = MakeUnique<SourceCompressionTask>(rt, entry.source()); + SourceCompressionTask* task = ownedTask.get(); if (!ownedTask || !tasksToSubmit.append(std::move(ownedTask))) { return false; } + // Heuristic: prefer the task with the smallest source length for batching. + if (!currentBatch || length < currentBatchLength) { + currentBatch = task; + currentBatchLength = length; + } return true; }); if (rt->pendingCompressions().empty()) {