commit f22bc7265e0d693110bbc2c5b730684299da2ab7
parent fa2c1791e96293e659a4b8546c5833533a87d154
Author: Paul Bone <paul@bone.id.au>
Date: Wed, 29 Oct 2025 06:32:45 +0000
Bug 1976162 - pt 4. Rename BaseAlloc member variables r=glandium
Name these member variables according to our coding style.
Differential Revision: https://phabricator.services.mozilla.com/D256415
Diffstat:
3 files changed, 49 insertions(+), 47 deletions(-)
diff --git a/memory/build/BaseAlloc.cpp b/memory/build/BaseAlloc.cpp
@@ -13,9 +13,9 @@ using namespace mozilla;
MOZ_CONSTINIT BaseAlloc sBaseAlloc;
// Initialize base allocation data structures.
-void BaseAlloc::Init() MOZ_REQUIRES(gInitLock) { base_mtx.Init(); }
+void BaseAlloc::Init() MOZ_REQUIRES(gInitLock) { mMutex.Init(); }
-bool BaseAlloc::pages_alloc(size_t minsize) MOZ_REQUIRES(base_mtx) {
+bool BaseAlloc::pages_alloc(size_t minsize) MOZ_REQUIRES(mMutex) {
size_t csize;
size_t pminsize;
@@ -25,17 +25,17 @@ bool BaseAlloc::pages_alloc(size_t minsize) MOZ_REQUIRES(base_mtx) {
if (!base_pages) {
return false;
}
- base_next_addr = base_pages;
- base_past_addr = (void*)((uintptr_t)base_pages + csize);
+ mNextAddr = base_pages;
+ mPastAddr = (void*)((uintptr_t)base_pages + csize);
// Leave enough pages for minsize committed, since otherwise they would
// have to be immediately recommitted.
pminsize = PAGE_CEILING(minsize);
- base_next_decommitted = (void*)((uintptr_t)base_pages + pminsize);
+ mNextDecommitted = (void*)((uintptr_t)base_pages + pminsize);
if (pminsize < csize) {
- pages_decommit(base_next_decommitted, csize - pminsize);
+ pages_decommit(mNextDecommitted, csize - pminsize);
}
- mStats.mapped += csize;
- mStats.committed += pminsize;
+ mStats.mMapped += csize;
+ mStats.mCommitted += pminsize;
return true;
}
@@ -47,29 +47,28 @@ void* BaseAlloc::alloc(size_t aSize) {
// Round size up to nearest multiple of the cacheline size.
csize = CACHELINE_CEILING(aSize);
- MutexAutoLock lock(base_mtx);
+ MutexAutoLock lock(mMutex);
// Make sure there's enough space for the allocation.
- if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
+ if ((uintptr_t)mNextAddr + csize > (uintptr_t)mPastAddr) {
if (!pages_alloc(csize)) {
return nullptr;
}
}
// Allocate.
- ret = base_next_addr;
- base_next_addr = (void*)((uintptr_t)base_next_addr + csize);
+ ret = mNextAddr;
+ mNextAddr = (void*)((uintptr_t)mNextAddr + csize);
// Make sure enough pages are committed for the new allocation.
- if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
- void* pbase_next_addr = (void*)(PAGE_CEILING((uintptr_t)base_next_addr));
+ if ((uintptr_t)mNextAddr > (uintptr_t)mNextDecommitted) {
+ void* pbase_next_addr = (void*)(PAGE_CEILING((uintptr_t)mNextAddr));
- if (!pages_commit(
- base_next_decommitted,
- (uintptr_t)pbase_next_addr - (uintptr_t)base_next_decommitted)) {
+ if (!pages_commit(mNextDecommitted,
+ (uintptr_t)mNextAddr - (uintptr_t)mNextDecommitted)) {
return nullptr;
}
- mStats.committed +=
- (uintptr_t)pbase_next_addr - (uintptr_t)base_next_decommitted;
- base_next_decommitted = pbase_next_addr;
+ mStats.mCommitted +=
+ (uintptr_t)pbase_next_addr - (uintptr_t)mNextDecommitted;
+ mNextDecommitted = pbase_next_addr;
}
return ret;
diff --git a/memory/build/BaseAlloc.h b/memory/build/BaseAlloc.h
@@ -19,33 +19,36 @@ class BaseAlloc {
void* calloc(size_t aNumber, size_t aSize);
- Mutex base_mtx;
+ Mutex mMutex;
struct Stats {
- size_t mapped = 0;
- size_t committed = 0;
+ size_t mMapped = 0;
+ size_t mCommitted = 0;
};
- Stats GetStats() MOZ_EXCLUDES(base_mtx) {
- MutexAutoLock lock(base_mtx);
+ Stats GetStats() MOZ_EXCLUDES(mMutex) {
+ MutexAutoLock lock(mMutex);
- MOZ_ASSERT(mStats.mapped >= mStats.committed);
+ MOZ_ASSERT(mStats.mMapped >= mStats.mCommitted);
return mStats;
}
private:
// Allocate fresh pages to satsify at least minsize.
- bool pages_alloc(size_t minsize) MOZ_REQUIRES(base_mtx);
-
- // Current pages that are being used for internal memory allocations. These
- // pages are carved up in cacheline-size quanta, so that there is no chance of
- // false cache line sharing.
- void* base_next_addr MOZ_GUARDED_BY(base_mtx) = nullptr;
-
- void* base_next_decommitted MOZ_GUARDED_BY(base_mtx) = nullptr;
- // Address immediately past base_pages.
- void* base_past_addr MOZ_GUARDED_BY(base_mtx) = nullptr;
-
- Stats mStats MOZ_GUARDED_BY(base_mtx);
+ bool pages_alloc(size_t minsize) MOZ_REQUIRES(mMutex);
+
+ // BaseAlloc uses bump-pointer allocation from mNextAddr. In general
+ // mNextAddr <= mNextDecommitted <= mPastAddr.
+ //
+ // If an allocation would cause mNextAddr > mPastAddr then a new chunk is
+ // required (from pages_alloc()). Else-if an allocation would case
+ // mNextAddr > mNextDecommitted then some of the memory is decommitted and
+ // pages_committ() is needed before the memory can be used.
+ void* mNextAddr MOZ_GUARDED_BY(mMutex) = nullptr;
+ void* mNextDecommitted MOZ_GUARDED_BY(mMutex) = nullptr;
+ // Address immediately past the current chunk of pages.
+ void* mPastAddr MOZ_GUARDED_BY(mMutex) = nullptr;
+
+ Stats mStats MOZ_GUARDED_BY(mMutex);
};
extern BaseAlloc sBaseAlloc;
@@ -60,13 +63,13 @@ struct TypedBaseAlloc {
static T* alloc() {
T* ret;
- sBaseAlloc.base_mtx.Lock();
+ sBaseAlloc.mMutex.Lock();
if (sFirstFree) {
ret = sFirstFree;
sFirstFree = *(T**)ret;
- sBaseAlloc.base_mtx.Unlock();
+ sBaseAlloc.mMutex.Unlock();
} else {
- sBaseAlloc.base_mtx.Unlock();
+ sBaseAlloc.mMutex.Unlock();
ret = (T*)sBaseAlloc.alloc(size_of());
}
@@ -74,7 +77,7 @@ struct TypedBaseAlloc {
}
static void dealloc(T* aNode) {
- MutexAutoLock lock(sBaseAlloc.base_mtx);
+ MutexAutoLock lock(sBaseAlloc.mMutex);
*(T**)aNode = sFirstFree;
sFirstFree = aNode;
}
diff --git a/memory/build/mozjemalloc.cpp b/memory/build/mozjemalloc.cpp
@@ -4261,8 +4261,8 @@ inline void MozJemalloc::jemalloc_stats_internal(
// Get base mapped/allocated.
auto base_stats = sBaseAlloc.GetStats();
- non_arena_mapped += base_stats.mapped;
- aStats->bookkeeping += base_stats.committed;
+ non_arena_mapped += base_stats.mMapped;
+ aStats->bookkeeping += base_stats.mCommitted;
gArenas.mLock.Lock();
@@ -4767,7 +4767,7 @@ void _malloc_prefork(void) MOZ_NO_THREAD_SAFETY_ANALYSIS {
gArenas.mPurgeListLock.Lock();
- sBaseAlloc.base_mtx.Lock();
+ sBaseAlloc.mMutex.Lock();
huge_mtx.Lock();
}
@@ -4777,7 +4777,7 @@ void _malloc_postfork_parent(void) MOZ_NO_THREAD_SAFETY_ANALYSIS {
// Release all mutexes, now that fork() has completed.
huge_mtx.Unlock();
- sBaseAlloc.base_mtx.Unlock();
+ sBaseAlloc.mMutex.Unlock();
gArenas.mPurgeListLock.Unlock();
@@ -4798,7 +4798,7 @@ void _malloc_postfork_child(void) {
// Reinitialize all mutexes, now that fork() has completed.
huge_mtx.Init();
- sBaseAlloc.base_mtx.Init();
+ sBaseAlloc.mMutex.Init();
gArenas.mPurgeListLock.Init();