tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Chunk.h (7597B)


      1 /* This Source Code Form is subject to the terms of the Mozilla Public
      2 * License, v. 2.0. If a copy of the MPL was not distributed with this
      3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
      4 
      5 #ifndef CHUNK_H
      6 #define CHUNK_H
      7 
      8 #include "mozilla/Atomics.h"
      9 
     10 #include "RadixTree.h"
     11 #include "RedBlackTree.h"
     12 
     13 #include "mozilla/DoublyLinkedList.h"
     14 
     15 // ***************************************************************************
     16 // Structures for chunk headers for chunks used for non-huge allocations.
     17 
     18 struct arena_t;
     19 
     20 enum ChunkType {
     21  UNKNOWN_CHUNK,
     22  ZEROED_CHUNK,    // chunk only contains zeroes.
     23  ARENA_CHUNK,     // used to back arena runs created by arena_t::AllocRun.
     24  HUGE_CHUNK,      // used to back huge allocations (e.g. arena_t::MallocHuge).
     25  RECYCLED_CHUNK,  // chunk has been stored for future use by chunk_recycle.
     26 };
     27 
     28 // Each element of the chunk map corresponds to one page within the chunk.
     29 struct arena_chunk_map_t {
     30  // Linkage for run trees. Used for arena_t's tree or available runs.
     31  RedBlackTreeNode<arena_chunk_map_t> link;
     32 
     33  // Run address (or size) and various flags are stored together.  The bit
     34  // layout looks like (assuming 32-bit system):
     35  //
     36  //   ???????? ???????? ????---b fmckdzla
     37  //
     38  // ? : Unallocated: Run address for first/last pages, unset for internal
     39  //                  pages.
     40  //     Small: Run address.
     41  //     Large: Run size for first page, unset for trailing pages.
     42  // - : Unused.
     43  // b : Busy?
     44  // f : Fresh memory?
     45  // m : MADV_FREE/MADV_DONTNEED'ed?
     46  // c : decommitted?
     47  // k : key?
     48  // d : dirty?
     49  // z : zeroed?
     50  // l : large?
     51  // a : allocated?
     52  //
     53  // Following are example bit patterns for consecutive pages from the three
     54  // types of runs.
     55  //
     56  // r : run address
     57  // s : run size
     58  // x : don't care
     59  // - : 0
     60  // [cdzla] : bit set
     61  //
     62  //   Unallocated:
     63  //     ssssssss ssssssss ssss---- --c-----
     64  //     xxxxxxxx xxxxxxxx xxxx---- ----d---
     65  //     ssssssss ssssssss ssss---- -----z--
     66  //
     67  //     Note that the size fields are set for the first and last unallocated
     68  //     page only.  The pages in-between have invalid/"don't care" size fields,
     69  //     they're not cleared during things such as coalescing free runs.
     70  //
     71  //     Pages before the first or after the last page in a free run must be
     72  //     allocated or busy.  Run coalescing depends on the sizes being set in
     73  //     the first and last page.  Purging pages and releasing chunks require
     74  //     that unallocated pages are always coalesced and the first page has a
     75  //     correct size.
     76  //
     77  //   Small:
     78  //     rrrrrrrr rrrrrrrr rrrr---- -------a
     79  //     rrrrrrrr rrrrrrrr rrrr---- -------a
     80  //     rrrrrrrr rrrrrrrr rrrr---- -------a
     81  //
     82  //   Large:
     83  //     ssssssss ssssssss ssss---- ------la
     84  //     -------- -------- -------- ------la
     85  //     -------- -------- -------- ------la
     86  //
     87  //     Note that only the first page has the size set.
     88  //
     89  size_t bits;
     90 
     91 // A page can be in one of several states.
     92 //
     93 // CHUNK_MAP_ALLOCATED marks allocated pages, the only other bit that can be
     94 // combined is CHUNK_MAP_LARGE.
     95 //
     96 // CHUNK_MAP_LARGE may be combined with CHUNK_MAP_ALLOCATED to show that the
     97 // allocation is a "large" allocation (see SizeClass), rather than a run of
     98 // small allocations.  The interpretation of the gPageSizeMask bits depends onj
     99 // this bit, see the description above.
    100 //
    101 // CHUNK_MAP_DIRTY is used to mark pages that were allocated and are now freed.
    102 // They may contain their previous contents (or poison).  CHUNK_MAP_DIRTY, when
    103 // set, must be the only set bit.
    104 //
    105 // CHUNK_MAP_MADVISED marks pages which are madvised (with either MADV_DONTNEED
    106 // or MADV_FREE).  This is only valid if MALLOC_DECOMMIT is not defined.  When
    107 // set, it must be the only bit set.
    108 //
    109 // CHUNK_MAP_DECOMMITTED is used if CHUNK_MAP_DECOMMITTED is defined.  Unused
    110 // dirty pages may be decommitted and marked as CHUNK_MAP_DECOMMITTED.  They
    111 // must be re-committed with pages_commit() before they can be touched.
    112 //
    113 // CHUNK_MAP_FRESH is set on pages that have never been used before (the chunk
    114 // is newly allocated or they were decommitted and have now been recommitted.
    115 // CHUNK_MAP_FRESH is also used for "double purged" pages meaning that they were
    116 // madvised and later were unmapped and remapped to force them out of the
    117 // program's resident set.  This is enabled when MALLOC_DOUBLE_PURGE is defined
    118 // (eg on MacOS).
    119 //
    120 // CHUNK_MAP_BUSY is set by a thread when the thread wants to manipulate the
    121 // pages without holding a lock. Other threads must not touch these pages
    122 // regardless of whether they hold a lock.
    123 //
    124 // CHUNK_MAP_ZEROED is set on pages that are known to contain zeros.
    125 //
    126 // CHUNK_MAP_DIRTY, _DECOMMITED _MADVISED and _FRESH are always mutually
    127 // exclusive.
    128 //
    129 // CHUNK_MAP_KEY is never used on real pages, only on lookup keys.
    130 //
    131 #define CHUNK_MAP_BUSY ((size_t)0x100U)
    132 #define CHUNK_MAP_FRESH ((size_t)0x80U)
    133 #define CHUNK_MAP_MADVISED ((size_t)0x40U)
    134 #define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
    135 #define CHUNK_MAP_MADVISED_OR_DECOMMITTED \
    136  (CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
    137 #define CHUNK_MAP_FRESH_MADVISED_OR_DECOMMITTED \
    138  (CHUNK_MAP_FRESH | CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
    139 #define CHUNK_MAP_FRESH_MADVISED_DECOMMITTED_OR_BUSY              \
    140  (CHUNK_MAP_FRESH | CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED | \
    141   CHUNK_MAP_BUSY)
    142 #define CHUNK_MAP_KEY ((size_t)0x10U)
    143 #define CHUNK_MAP_DIRTY ((size_t)0x08U)
    144 #define CHUNK_MAP_ZEROED ((size_t)0x04U)
    145 #define CHUNK_MAP_LARGE ((size_t)0x02U)
    146 #define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
    147 };
    148 
    149 // Arena chunk header.
    150 struct arena_chunk_t {
    151  // Arena that owns the chunk.
    152  arena_t* mArena;
    153 
    154  // Linkage for the arena's list of dirty chunks.
    155  mozilla::DoublyLinkedListElement<arena_chunk_t> mChunksDirtyElim;
    156 
    157 #ifdef MALLOC_DOUBLE_PURGE
    158  // If we're double-purging, we maintain a linked list of chunks which
    159  // have pages which have been madvise(MADV_FREE)'d but not explicitly
    160  // purged.
    161  //
    162  // We're currently lazy and don't remove a chunk from this list when
    163  // all its madvised pages are recommitted.
    164  mozilla::DoublyLinkedListElement<arena_chunk_t> mChunksMavisedElim;
    165 #endif
    166 
    167  // Number of dirty pages that may be purged, the header is never counted
    168  // here.
    169  uint16_t mNumDirty = 0;
    170 
    171  // This will point to the page index of the first run that may have dirty
    172  // pages.
    173  uint16_t mDirtyRunHint;
    174 
    175  bool mIsPurging = false;
    176  bool mDying = false;
    177 
    178  // Map of pages within chunk that keeps track of free/large/small.
    179  arena_chunk_map_t mPageMap[];  // Dynamically sized.
    180 
    181  explicit arena_chunk_t(arena_t* aArena);
    182 
    183  bool IsEmpty();
    184 };
    185 
    186 [[nodiscard]] bool pages_commit(void* aAddr, size_t aSize);
    187 
    188 void pages_decommit(void* aAddr, size_t aSize);
    189 
    190 void chunks_init();
    191 
    192 void* chunk_alloc(size_t aSize, size_t aAlignment, bool aBase);
    193 
    194 void chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType);
    195 #ifdef MOZ_DEBUG
    196 void chunk_assert_zero(void* aPtr, size_t aSize);
    197 #endif
    198 
    199 extern mozilla::Atomic<size_t> gRecycledSize;
    200 
    201 extern AddressRadixTree<(sizeof(void*) << 3) - LOG2(kChunkSize)> gChunkRTree;
    202 
    203 enum ShouldCommit {
    204  // Reserve address space only, accessing the mapping will crash.
    205  ReserveOnly,
    206 
    207  // Reserve the address space and populate it with "valid" page mappings.
    208  // On windows this will commit memory, on Linux it populate memory as its
    209  // accessed (with overcommit behaviour).
    210  ReserveAndCommit,
    211 };
    212 
    213 void* pages_mmap_aligned(size_t size, size_t alignment,
    214                         ShouldCommit should_commit);
    215 
    216 #endif /* ! CHUNK_H */