commit 9beac8b6754bd0211a614dc70095426eede73e27
parent 47587ddd0a9722e8ce4612e2e31fad87ddef9afc
Author: Paul Bone <paul@bone.id.au>
Date: Wed, 29 Oct 2025 06:32:44 +0000
Bug 1976162 - pt 1. Move the base allocator into a class r=glandium
* The Mutex class now supports constexpr construction on platforms
other than windows.
* StaticMutex supports constexpr construction on Windows (also
everywhere). Now it can be a member of a class with a constexpr
constructor.
Differential Revision: https://phabricator.services.mozilla.com/D256412
Diffstat:
4 files changed, 64 insertions(+), 51 deletions(-)
diff --git a/memory/build/BaseAlloc.cpp b/memory/build/BaseAlloc.cpp
@@ -10,29 +10,12 @@
using namespace mozilla;
-MOZ_CONSTINIT Mutex base_mtx;
-
-// Current pages that are being used for internal memory allocations. These
-// pages are carved up in cacheline-size quanta, so that there is no chance of
-// false cache line sharing.
-static void* base_pages MOZ_GUARDED_BY(base_mtx);
-static void* base_next_addr MOZ_GUARDED_BY(base_mtx);
-static void* base_next_decommitted MOZ_GUARDED_BY(base_mtx);
-// Address immediately past base_pages.
-static void* base_past_addr MOZ_GUARDED_BY(base_mtx);
-size_t base_mapped MOZ_GUARDED_BY(base_mtx);
-size_t base_committed MOZ_GUARDED_BY(base_mtx);
+MOZ_CONSTINIT BaseAlloc sBaseAlloc;
// Initialize base allocation data structures.
-void base_init() MOZ_REQUIRES(gInitLock) {
- base_mtx.Init();
- MOZ_PUSH_IGNORE_THREAD_SAFETY
- base_mapped = 0;
- base_committed = 0;
- MOZ_POP_THREAD_SAFETY
-}
+void BaseAlloc::Init() MOZ_REQUIRES(gInitLock) { base_mtx.Init(); }
-static bool base_pages_alloc(size_t minsize) MOZ_REQUIRES(base_mtx) {
+bool BaseAlloc::pages_alloc(size_t minsize) MOZ_REQUIRES(base_mtx) {
size_t csize;
size_t pminsize;
@@ -51,13 +34,13 @@ static bool base_pages_alloc(size_t minsize) MOZ_REQUIRES(base_mtx) {
if (pminsize < csize) {
pages_decommit(base_next_decommitted, csize - pminsize);
}
- base_mapped += csize;
- base_committed += pminsize;
+ mStats.mapped += csize;
+ mStats.committed += pminsize;
return false;
}
-void* base_alloc(size_t aSize) {
+void* BaseAlloc::alloc(size_t aSize) {
void* ret;
size_t csize;
@@ -67,7 +50,7 @@ void* base_alloc(size_t aSize) {
MutexAutoLock lock(base_mtx);
// Make sure there's enough space for the allocation.
if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
- if (base_pages_alloc(csize)) {
+ if (pages_alloc(csize)) {
return nullptr;
}
}
@@ -84,7 +67,7 @@ void* base_alloc(size_t aSize) {
return nullptr;
}
- base_committed +=
+ mStats.committed +=
(uintptr_t)pbase_next_addr - (uintptr_t)base_next_decommitted;
base_next_decommitted = pbase_next_addr;
}
@@ -92,8 +75,8 @@ void* base_alloc(size_t aSize) {
return ret;
}
-void* base_calloc(size_t aNumber, size_t aSize) {
- void* ret = base_alloc(aNumber * aSize);
+void* BaseAlloc::calloc(size_t aNumber, size_t aSize) {
+ void* ret = alloc(aNumber * aSize);
if (ret) {
memset(ret, 0, aNumber * aSize);
}
diff --git a/memory/build/BaseAlloc.h b/memory/build/BaseAlloc.h
@@ -7,16 +7,49 @@
#include "Mutex.h"
-extern Mutex base_mtx;
+// The base allocator is a simple memory allocator used internally by
+// mozjemalloc for its own structures.
+class BaseAlloc {
+ public:
+ constexpr BaseAlloc() {};
-extern size_t base_mapped MOZ_GUARDED_BY(base_mtx);
-extern size_t base_committed MOZ_GUARDED_BY(base_mtx);
+ void Init() MOZ_REQUIRES(gInitLock);
-void base_init() MOZ_REQUIRES(gInitLock);
+ void* alloc(size_t aSize);
-void* base_alloc(size_t aSize);
+ void* calloc(size_t aNumber, size_t aSize);
-void* base_calloc(size_t aNumber, size_t aSize);
+ Mutex base_mtx;
+
+ struct Stats {
+ size_t mapped = 0;
+ size_t committed = 0;
+ };
+ Stats GetStats() MOZ_EXCLUDES(base_mtx) {
+ MutexAutoLock lock(base_mtx);
+
+ MOZ_ASSERT(mStats.mapped >= mStats.committed);
+ return mStats;
+ }
+
+ private:
+ // Allocate fresh pages to satsify at least minsize.
+ bool pages_alloc(size_t minsize) MOZ_REQUIRES(base_mtx);
+
+ // Current pages that are being used for internal memory allocations. These
+ // pages are carved up in cacheline-size quanta, so that there is no chance of
+ // false cache line sharing.
+ void* base_pages MOZ_GUARDED_BY(base_mtx) = nullptr;
+ void* base_next_addr MOZ_GUARDED_BY(base_mtx) = nullptr;
+
+ void* base_next_decommitted MOZ_GUARDED_BY(base_mtx) = nullptr;
+ // Address immediately past base_pages.
+ void* base_past_addr MOZ_GUARDED_BY(base_mtx) = nullptr;
+
+ Stats mStats MOZ_GUARDED_BY(base_mtx);
+};
+
+extern BaseAlloc sBaseAlloc;
// A specialization of the base allocator with a free list.
template <typename T>
@@ -28,21 +61,21 @@ struct TypedBaseAlloc {
static T* alloc() {
T* ret;
- base_mtx.Lock();
+ sBaseAlloc.base_mtx.Lock();
if (sFirstFree) {
ret = sFirstFree;
sFirstFree = *(T**)ret;
- base_mtx.Unlock();
+ sBaseAlloc.base_mtx.Unlock();
} else {
- base_mtx.Unlock();
- ret = (T*)base_alloc(size_of());
+ sBaseAlloc.base_mtx.Unlock();
+ ret = (T*)sBaseAlloc.alloc(size_of());
}
return ret;
}
static void dealloc(T* aNode) {
- MutexAutoLock lock(base_mtx);
+ MutexAutoLock lock(sBaseAlloc.base_mtx);
*(T**)aNode = sFirstFree;
sFirstFree = aNode;
}
diff --git a/memory/build/RadixTree.h b/memory/build/RadixTree.h
@@ -77,7 +77,7 @@ class AddressRadixTree {
template <size_t Bits>
bool AddressRadixTree<Bits>::Init() {
mLock.Init();
- mRoot = (void**)base_calloc(1 << kBitsAtLevel1, sizeof(void*));
+ mRoot = (void**)sBaseAlloc.calloc(1 << kBitsAtLevel1, sizeof(void*));
return mRoot;
}
@@ -95,7 +95,7 @@ void** AddressRadixTree<Bits>::GetSlotInternal(void* aAddr, bool aCreate) {
subkey = (key << lshift) >> ((sizeof(void*) << 3) - bits);
child = (void**)node[subkey];
if (!child && aCreate) {
- child = (void**)base_calloc(1 << kBitsPerLevel, sizeof(void*));
+ child = (void**)sBaseAlloc.calloc(1 << kBitsPerLevel, sizeof(void*));
if (child) {
node[subkey] = child;
}
diff --git a/memory/build/mozjemalloc.cpp b/memory/build/mozjemalloc.cpp
@@ -2663,7 +2663,7 @@ void arena_t::InitPRNG() {
*mPRNG = prng;
} else {
void* backing =
- base_alloc(sizeof(mozilla::non_crypto::XorShift128PlusRNG));
+ sBaseAlloc.alloc(sizeof(mozilla::non_crypto::XorShift128PlusRNG));
mPRNG = new (backing)
mozilla::non_crypto::XorShift128PlusRNG(std::move(prng));
}
@@ -3998,7 +3998,7 @@ static bool malloc_init_hard() {
chunks_init();
huge_init();
- base_init();
+ sBaseAlloc.Init();
// Initialize arenas collection here.
if (!gArenas.Init()) {
@@ -4260,12 +4260,9 @@ inline void MozJemalloc::jemalloc_stats_internal(
}
// Get base mapped/allocated.
- {
- MutexAutoLock lock(base_mtx);
- non_arena_mapped += base_mapped;
- aStats->bookkeeping += base_committed;
- MOZ_ASSERT(base_mapped >= base_committed);
- }
+ auto base_stats = sBaseAlloc.GetStats();
+ non_arena_mapped += base_stats.mapped;
+ aStats->bookkeeping += base_stats.committed;
gArenas.mLock.Lock();
@@ -4770,7 +4767,7 @@ void _malloc_prefork(void) MOZ_NO_THREAD_SAFETY_ANALYSIS {
gArenas.mPurgeListLock.Lock();
- base_mtx.Lock();
+ sBaseAlloc.base_mtx.Lock();
huge_mtx.Lock();
}
@@ -4780,7 +4777,7 @@ void _malloc_postfork_parent(void) MOZ_NO_THREAD_SAFETY_ANALYSIS {
// Release all mutexes, now that fork() has completed.
huge_mtx.Unlock();
- base_mtx.Unlock();
+ sBaseAlloc.base_mtx.Unlock();
gArenas.mPurgeListLock.Unlock();
@@ -4801,7 +4798,7 @@ void _malloc_postfork_child(void) {
// Reinitialize all mutexes, now that fork() has completed.
huge_mtx.Init();
- base_mtx.Init();
+ sBaseAlloc.base_mtx.Init();
gArenas.mPurgeListLock.Init();