tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

LifoAlloc.cpp (13088B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "ds/LifoAlloc.h"
      8 
      9 #include "mozilla/Likely.h"
     10 #include "mozilla/MathAlgorithms.h"
     11 
     12 #include <algorithm>
     13 
     14 #ifdef LIFO_CHUNK_PROTECT
     15 #  include "gc/Memory.h"
     16 #endif
     17 
     18 using namespace js;
     19 
     20 namespace js {
     21 namespace detail {
     22 
     23 /* static */
     24 UniquePtr<BumpChunk> BumpChunk::newWithCapacity(size_t size, arena_id_t arena) {
     25  MOZ_DIAGNOSTIC_ASSERT(size >= sizeof(BumpChunk));
     26  void* mem = js_arena_malloc(arena, size);
     27  if (!mem) {
     28    return nullptr;
     29  }
     30 
     31  UniquePtr<BumpChunk> result(new (mem) BumpChunk(size));
     32 
     33  // We assume that the alignment of LIFO_ALLOC_ALIGN is less than that of the
     34  // underlying memory allocator -- creating a new BumpChunk should always
     35  // satisfy the LIFO_ALLOC_ALIGN alignment constraint.
     36  MOZ_ASSERT(AlignPtr(result->begin()) == result->begin());
     37  return result;
     38 }
     39 
     40 #ifdef LIFO_CHUNK_PROTECT
     41 
     42 static uint8_t* AlignPtrUp(uint8_t* ptr, uintptr_t align) {
     43  MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
     44  uintptr_t uptr = uintptr_t(ptr);
     45  uintptr_t diff = uptr & (align - 1);
     46  diff = (align - diff) & (align - 1);
     47  uptr = uptr + diff;
     48  return (uint8_t*)uptr;
     49 }
     50 
     51 static uint8_t* AlignPtrDown(uint8_t* ptr, uintptr_t align) {
     52  MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
     53  uintptr_t uptr = uintptr_t(ptr);
     54  uptr = uptr & ~(align - 1);
     55  return (uint8_t*)uptr;
     56 }
     57 
     58 void BumpChunk::setReadOnly() {
     59  uintptr_t pageSize = gc::SystemPageSize();
     60  // The allocated chunks might not be aligned on page boundaries. This code
     61  // is used to ensure that we are changing the memory protection of pointers
     62  // which are within the range of the BumpChunk, or that the range formed by
     63  // [b .. e] is empty.
     64  uint8_t* b = base();
     65  uint8_t* e = capacity_;
     66  b = AlignPtrUp(b, pageSize);
     67  e = AlignPtrDown(e, pageSize);
     68  if (e <= b) {
     69    return;
     70  }
     71  gc::MakePagesReadOnly(b, e - b);
     72 }
     73 
     74 void BumpChunk::setReadWrite() {
     75  uintptr_t pageSize = gc::SystemPageSize();
     76  // The allocated chunks might not be aligned on page boundaries. This code
     77  // is used to ensure that we are changing the memory protection of pointers
     78  // which are within the range of the BumpChunk, or that the range formed by
     79  // [b .. e] is empty.
     80  uint8_t* b = base();
     81  uint8_t* e = capacity_;
     82  b = AlignPtrUp(b, pageSize);
     83  e = AlignPtrDown(e, pageSize);
     84  if (e <= b) {
     85    return;
     86  }
     87  gc::UnprotectPages(b, e - b);
     88 }
     89 
     90 #endif
     91 
     92 }  // namespace detail
     93 }  // namespace js
     94 
     95 void LifoAlloc::reset(size_t defaultChunkSize) {
     96  MOZ_ASSERT(mozilla::IsPowerOfTwo(defaultChunkSize));
     97 
     98  while (!chunks_.empty()) {
     99    chunks_.popFirst();
    100  }
    101  while (!oversize_.empty()) {
    102    oversize_.popFirst();
    103  }
    104  while (!unused_.empty()) {
    105    unused_.popFirst();
    106  }
    107  defaultChunkSize_ = defaultChunkSize;
    108  oversizeThreshold_ = defaultChunkSize;
    109  markCount = 0;
    110  curSize_ = 0;
    111  smallAllocsSize_ = 0;
    112 }
    113 
    114 void LifoAlloc::freeAll() {
    115  // When free-ing all chunks, we can no longer determine which chunks were
    116  // transferred and which were not, so simply clear the heuristic to zero
    117  // right away.
    118  smallAllocsSize_ = 0;
    119 
    120  while (!chunks_.empty()) {
    121    UniqueBumpChunk bc = chunks_.popFirst();
    122    decrementCurSize(bc->computedSizeOfIncludingThis());
    123  }
    124  while (!oversize_.empty()) {
    125    UniqueBumpChunk bc = oversize_.popFirst();
    126    decrementCurSize(bc->computedSizeOfIncludingThis());
    127  }
    128  while (!unused_.empty()) {
    129    UniqueBumpChunk bc = unused_.popFirst();
    130    decrementCurSize(bc->computedSizeOfIncludingThis());
    131  }
    132 
    133  // Nb: maintaining curSize_ correctly isn't easy.  Fortunately, this is an
    134  // excellent sanity check.
    135  MOZ_ASSERT(curSize_ == 0);
    136 }
    137 
    138 // Round at the same page granularity used by malloc.
    139 static size_t MallocGoodSize(size_t aSize) {
    140 #if defined(MOZ_MEMORY)
    141  return malloc_good_size(aSize);
    142 #else
    143  return aSize;
    144 #endif
    145 }
    146 
    147 // Heuristic to choose the size of the next BumpChunk for small allocations.
    148 // `start` is the size of the first chunk. `used` is the total size of all
    149 // BumpChunks in this LifoAlloc so far.
    150 static size_t NextSize(size_t start, size_t used) {
    151  // Double the size, up to 1 MB.
    152  const size_t mb = 1 * 1024 * 1024;
    153  if (used < mb) {
    154    return std::max(start, used);
    155  }
    156 
    157  // After 1 MB, grow more gradually, to waste less memory.
    158  // The sequence (in megabytes) begins:
    159  // 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, ...
    160  return RoundUp(used / 8, mb);
    161 }
    162 
    163 LifoAlloc::UniqueBumpChunk LifoAlloc::newChunkWithCapacity(size_t n,
    164                                                           bool oversize) {
    165  MOZ_ASSERT(fallibleScope_,
    166             "[OOM] Cannot allocate a new chunk in an infallible scope.");
    167 
    168  // Compute the size which should be requested in order to be able to fit |n|
    169  // bytes in a newly allocated chunk, or default to |defaultChunkSize_|.
    170 
    171  size_t minSize;
    172  if (MOZ_UNLIKELY(
    173          !detail::BumpChunk::allocSizeWithRedZone(n, &minSize) ||
    174          (minSize &
    175           (size_t(1) << (std::numeric_limits<size_t>::digits - 1))))) {
    176    return nullptr;
    177  }
    178 
    179  // Note: When computing chunkSize growth, we only are interested in chunks
    180  // used for small allocations. This excludes unused chunks, oversized chunks,
    181  // and chunks transferred in from another LifoAlloc.
    182  MOZ_ASSERT(curSize_ >= smallAllocsSize_);
    183  const size_t chunkSize = (oversize || minSize > defaultChunkSize_)
    184                               ? MallocGoodSize(minSize)
    185                               : NextSize(defaultChunkSize_, smallAllocsSize_);
    186 
    187  // Create a new BumpChunk, and allocate space for it.
    188  UniqueBumpChunk result =
    189      detail::BumpChunk::newWithCapacity(chunkSize, arena_);
    190  if (!result) {
    191    return nullptr;
    192  }
    193  MOZ_ASSERT(result->computedSizeOfIncludingThis() == chunkSize);
    194  return result;
    195 }
    196 
    197 LifoAlloc::UniqueBumpChunk LifoAlloc::getOrCreateChunk(size_t n) {
    198  // Look for existing unused BumpChunks to satisfy the request, and pick the
    199  // first one which is large enough, and move it into the list of used
    200  // chunks.
    201  if (!unused_.empty()) {
    202    if (unused_.begin()->canAlloc(n)) {
    203      return unused_.popFirst();
    204    }
    205 
    206    BumpChunkList::Iterator e(unused_.end());
    207    for (BumpChunkList::Iterator i(unused_.begin()); i->next() != e.get();
    208         ++i) {
    209      detail::BumpChunk* elem = i->next();
    210      MOZ_ASSERT(elem->empty());
    211      if (elem->canAlloc(n)) {
    212        BumpChunkList temp = unused_.splitAfter(i.get());
    213        UniqueBumpChunk newChunk = temp.popFirst();
    214        unused_.appendAll(std::move(temp));
    215        return newChunk;
    216      }
    217    }
    218  }
    219 
    220  // Allocate a new BumpChunk with enough space for the next allocation.
    221  UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
    222  if (!newChunk) {
    223    return newChunk;
    224  }
    225  incrementCurSize(newChunk->computedSizeOfIncludingThis());
    226  return newChunk;
    227 }
    228 
    229 void* LifoAlloc::allocImplColdPath(size_t n) {
    230  void* result;
    231  UniqueBumpChunk newChunk = getOrCreateChunk(n);
    232  if (!newChunk) {
    233    return nullptr;
    234  }
    235 
    236  // This new chunk is about to be used for small allocations.
    237  smallAllocsSize_ += newChunk->computedSizeOfIncludingThis();
    238 
    239  // Since we just created a large enough chunk, this can't fail.
    240  chunks_.append(std::move(newChunk));
    241  result = chunks_.last()->tryAlloc(n);
    242  MOZ_ASSERT(result);
    243  return result;
    244 }
    245 
    246 void* LifoAlloc::allocImplOversize(size_t n) {
    247  void* result;
    248  UniqueBumpChunk newChunk = newChunkWithCapacity(n, true);
    249  if (!newChunk) {
    250    return nullptr;
    251  }
    252  incrementCurSize(newChunk->computedSizeOfIncludingThis());
    253 
    254  // Since we just created a large enough chunk, this can't fail.
    255  oversize_.append(std::move(newChunk));
    256  result = oversize_.last()->tryAlloc(n);
    257  MOZ_ASSERT(result);
    258  return result;
    259 }
    260 
    261 bool LifoAlloc::ensureUnusedApproximateColdPath(size_t n, size_t total) {
    262  for (detail::BumpChunk& bc : unused_) {
    263    total += bc.unused();
    264    if (total >= n) {
    265      return true;
    266    }
    267  }
    268 
    269  UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
    270  if (!newChunk) {
    271    return false;
    272  }
    273  incrementCurSize(newChunk->computedSizeOfIncludingThis());
    274  unused_.pushFront(std::move(newChunk));
    275  return true;
    276 }
    277 
    278 LifoAlloc::Mark LifoAlloc::mark() {
    279  markCount++;
    280  Mark res;
    281  if (!chunks_.empty()) {
    282    res.chunk = chunks_.last()->mark();
    283  }
    284  if (!oversize_.empty()) {
    285    res.oversize = oversize_.last()->mark();
    286  }
    287  return res;
    288 }
    289 
    290 void LifoAlloc::release(Mark mark) {
    291  markCount--;
    292 #ifdef DEBUG
    293  auto assertIsContained = [](const detail::BumpChunk::Mark& m,
    294                              BumpChunkList& list) {
    295    if (m.markedChunk()) {
    296      bool contained = false;
    297      for (const detail::BumpChunk& chunk : list) {
    298        if (&chunk == m.markedChunk() && chunk.contains(m)) {
    299          contained = true;
    300          break;
    301        }
    302      }
    303      MOZ_ASSERT(contained);
    304    }
    305  };
    306  assertIsContained(mark.chunk, chunks_);
    307  assertIsContained(mark.oversize, oversize_);
    308 #endif
    309 
    310  BumpChunkList released;
    311  auto cutAtMark = [&released](const detail::BumpChunk::Mark& m,
    312                               BumpChunkList& list) {
    313    // Move the blocks which are after the mark to the set released chunks.
    314    if (!m.markedChunk()) {
    315      released = std::move(list);
    316    } else {
    317      released = list.splitAfter(m.markedChunk());
    318    }
    319 
    320    // Release everything which follows the mark in the last chunk.
    321    if (!list.empty()) {
    322      list.last()->release(m);
    323    }
    324  };
    325 
    326  // Release the content of all the blocks which are after the marks, and keep
    327  // blocks as unused.
    328  cutAtMark(mark.chunk, chunks_);
    329  for (detail::BumpChunk& bc : released) {
    330    bc.release();
    331 
    332    // Chunks moved from (after a mark) in chunks_ to unused_ are no longer
    333    // considered small allocations.
    334    smallAllocsSize_ -= bc.computedSizeOfIncludingThis();
    335  }
    336  unused_.appendAll(std::move(released));
    337 
    338  // Free the content of all the blocks which are after the marks.
    339  cutAtMark(mark.oversize, oversize_);
    340  while (!released.empty()) {
    341    UniqueBumpChunk bc = released.popFirst();
    342    decrementCurSize(bc->computedSizeOfIncludingThis());
    343  }
    344 }
    345 
    346 void LifoAlloc::steal(LifoAlloc* other) {
    347  MOZ_ASSERT(!other->markCount);
    348  MOZ_DIAGNOSTIC_ASSERT(unused_.empty());
    349  MOZ_DIAGNOSTIC_ASSERT(chunks_.empty());
    350  MOZ_DIAGNOSTIC_ASSERT(oversize_.empty());
    351 
    352  // Copy everything from |other| to |this| except for |peakSize_|, which
    353  // requires some care.
    354  chunks_ = std::move(other->chunks_);
    355  oversize_ = std::move(other->oversize_);
    356  unused_ = std::move(other->unused_);
    357  markCount = other->markCount;
    358  defaultChunkSize_ = other->defaultChunkSize_;
    359  oversizeThreshold_ = other->oversizeThreshold_;
    360  curSize_ = other->curSize_;
    361  peakSize_ = std::max(peakSize_, other->peakSize_);
    362  smallAllocsSize_ = other->smallAllocsSize_;
    363 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
    364  fallibleScope_ = other->fallibleScope_;
    365 #endif
    366 
    367  other->reset(defaultChunkSize_);
    368 }
    369 
    370 void LifoAlloc::transferFrom(LifoAlloc* other) {
    371  MOZ_ASSERT(!markCount);
    372  MOZ_ASSERT(!other->markCount);
    373 
    374  // This assertion is not really necessary, and if it is getting in your way
    375  // please feel free to just delete it, but it should generally point you in
    376  // a decent direction. LifoAllocs are entirely capable of having a mix of
    377  // allocations from different arenas, this is just a heuristic that we
    378  // expect will yield better performance.
    379  MOZ_ASSERT(arena_ == other->arena_);
    380 
    381  // Transferred chunks are not counted as part of |smallAllocsSize| as this
    382  // could introduce bias in the |NextSize| heuristics, leading to
    383  // over-allocations in *this* LifoAlloc. As well, to avoid interference with
    384  // small allocations made with |this|, the last chunk of the |chunks_| list
    385  // should remain the last chunk. Therefore, the transferred chunks are
    386  // prepended to the |chunks_| list.
    387  incrementCurSize(other->curSize_);
    388 
    389  appendUnused(std::move(other->unused_));
    390  chunks_.prependAll(std::move(other->chunks_));
    391  oversize_.prependAll(std::move(other->oversize_));
    392  other->curSize_ = 0;
    393  other->smallAllocsSize_ = 0;
    394 }
    395 
    396 void LifoAlloc::transferUnusedFrom(LifoAlloc* other) {
    397  MOZ_ASSERT(!markCount);
    398 
    399  size_t size = 0;
    400  for (detail::BumpChunk& bc : other->unused_) {
    401    size += bc.computedSizeOfIncludingThis();
    402  }
    403 
    404  appendUnused(std::move(other->unused_));
    405  incrementCurSize(size);
    406  other->decrementCurSize(size);
    407 }
    408 
    409 #ifdef LIFO_CHUNK_PROTECT
    410 void LifoAlloc::setReadOnly() {
    411  for (detail::BumpChunk& bc : chunks_) {
    412    bc.setReadOnly();
    413  }
    414  for (detail::BumpChunk& bc : oversize_) {
    415    bc.setReadOnly();
    416  }
    417  for (detail::BumpChunk& bc : unused_) {
    418    bc.setReadOnly();
    419  }
    420 }
    421 
    422 void LifoAlloc::setReadWrite() {
    423  for (detail::BumpChunk& bc : chunks_) {
    424    bc.setReadWrite();
    425  }
    426  for (detail::BumpChunk& bc : oversize_) {
    427    bc.setReadWrite();
    428  }
    429  for (detail::BumpChunk& bc : unused_) {
    430    bc.setReadWrite();
    431  }
    432 }
    433 #endif