tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

TestJemalloc.cpp (31226B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "mozilla/Literals.h"
      8 #include "mozilla/mozalloc.h"
      9 #include "mozilla/UniquePtr.h"
     10 #include "mozilla/Vector.h"
     11 #include "mozilla/gtest/MozHelpers.h"
     12 #include "mozmemory.h"
     13 #include "nsCOMPtr.h"
     14 #include "Utils.h"
     15 
     16 #include "gtest/gtest.h"
     17 
     18 #ifdef MOZ_PHC
     19 #  include "PHC.h"
     20 #endif
     21 
     22 using namespace mozilla;
     23 
     24 class AutoDisablePHCOnCurrentThread {
     25 public:
     26  AutoDisablePHCOnCurrentThread() {
     27 #ifdef MOZ_PHC
     28    mozilla::phc::DisablePHCOnCurrentThread();
     29 #endif
     30  }
     31 
     32  ~AutoDisablePHCOnCurrentThread() {
     33 #ifdef MOZ_PHC
     34    mozilla::phc::ReenablePHCOnCurrentThread();
     35 #endif
     36  }
     37 };
     38 
     39 static inline void TestOne(size_t size) {
     40  size_t req = size;
     41  size_t adv = malloc_good_size(req);
     42  char* p = (char*)malloc(req);
     43  size_t usable = moz_malloc_usable_size(p);
     44  // NB: Using EXPECT here so that we still free the memory on failure.
     45  EXPECT_EQ(adv, usable) << "malloc_good_size(" << req << ") --> " << adv
     46                         << "; "
     47                            "malloc_usable_size("
     48                         << req << ") --> " << usable;
     49  free(p);
     50 }
     51 
     52 static inline void TestThree(size_t size) {
     53  ASSERT_NO_FATAL_FAILURE(TestOne(size - 1));
     54  ASSERT_NO_FATAL_FAILURE(TestOne(size));
     55  ASSERT_NO_FATAL_FAILURE(TestOne(size + 1));
     56 }
     57 
     58 TEST(Jemalloc, UsableSizeInAdvance)
     59 {
     60  /*
     61   * Test every size up to a certain point, then (N-1, N, N+1) triplets for a
     62   * various sizes beyond that.
     63   */
     64 
     65  for (size_t n = 0; n < 16_KiB; n++) ASSERT_NO_FATAL_FAILURE(TestOne(n));
     66 
     67  for (size_t n = 16_KiB; n < 1_MiB; n += 4_KiB)
     68    ASSERT_NO_FATAL_FAILURE(TestThree(n));
     69 
     70  for (size_t n = 1_MiB; n < 8_MiB; n += 128_KiB)
     71    ASSERT_NO_FATAL_FAILURE(TestThree(n));
     72 }
     73 
     74 static int gStaticVar;
     75 
     76 bool InfoEq(jemalloc_ptr_info_t& aInfo, PtrInfoTag aTag, void* aAddr,
     77            size_t aSize, arena_id_t arenaId) {
     78  return aInfo.tag == aTag && aInfo.addr == aAddr && aInfo.size == aSize
     79 #ifdef MOZ_DEBUG
     80         && aInfo.arenaId == arenaId
     81 #endif
     82      ;
     83 }
     84 
     85 bool InfoEqFreedPage(jemalloc_ptr_info_t& aInfo, void* aAddr, size_t aPageSize,
     86                     arena_id_t arenaId) {
     87  size_t pageSizeMask = aPageSize - 1;
     88 
     89  return jemalloc_ptr_is_freed_page(&aInfo) &&
     90         aInfo.addr == (void*)(uintptr_t(aAddr) & ~pageSizeMask) &&
     91         aInfo.size == aPageSize
     92 #ifdef MOZ_DEBUG
     93         && aInfo.arenaId == arenaId
     94 #endif
     95      ;
     96 }
     97 
     98 TEST(Jemalloc, PtrInfo)
     99 {
    100  arena_id_t arenaId = moz_create_arena();
    101  ASSERT_TRUE(arenaId != 0);
    102 
    103  jemalloc_stats_t stats;
    104  jemalloc_stats(&stats);
    105 
    106  jemalloc_ptr_info_t info;
    107  Vector<char*> small, large, huge;
    108 
    109  // For small (less than half the page size) allocations, test every position
    110  // within many possible sizes.
    111  size_t small_max =
    112      stats.subpage_max ? stats.subpage_max : stats.quantum_wide_max;
    113  for (size_t n = 0; n <= small_max; n += 8) {
    114    auto p = (char*)moz_arena_malloc(arenaId, n);
    115    size_t usable = moz_malloc_size_of(p);
    116    ASSERT_TRUE(small.append(p));
    117    for (size_t j = 0; j < usable; j++) {
    118      jemalloc_ptr_info(&p[j], &info);
    119      ASSERT_TRUE(InfoEq(info, TagLiveAlloc, p, usable, arenaId));
    120    }
    121  }
    122 
    123  // Similar for large (small_max + 1 KiB .. 1MiB - 8KiB) allocations.
    124  for (size_t n = small_max + 1_KiB; n <= stats.large_max; n += 1_KiB) {
    125    auto p = (char*)moz_arena_malloc(arenaId, n);
    126    size_t usable = moz_malloc_size_of(p);
    127    ASSERT_TRUE(large.append(p));
    128    for (size_t j = 0; j < usable; j += 347) {
    129      jemalloc_ptr_info(&p[j], &info);
    130      ASSERT_TRUE(InfoEq(info, TagLiveAlloc, p, usable, arenaId));
    131    }
    132  }
    133 
    134  // Similar for huge (> 1MiB - 8KiB) allocations.
    135  for (size_t n = stats.chunksize; n <= 10_MiB; n += 512_KiB) {
    136    auto p = (char*)moz_arena_malloc(arenaId, n);
    137    size_t usable = moz_malloc_size_of(p);
    138    ASSERT_TRUE(huge.append(p));
    139    for (size_t j = 0; j < usable; j += 567) {
    140      jemalloc_ptr_info(&p[j], &info);
    141      ASSERT_TRUE(InfoEq(info, TagLiveAlloc, p, usable, arenaId));
    142    }
    143  }
    144 
    145  // The following loops check freed allocations. We step through the vectors
    146  // using prime-sized steps, which gives full coverage of the arrays while
    147  // avoiding deallocating in the same order we allocated.
    148  size_t len;
    149 
    150  // Free the small allocations and recheck them.
    151  int isFreedAlloc = 0, isFreedPage = 0;
    152  len = small.length();
    153  for (size_t i = 0, j = 0; i < len; i++, j = (j + 19) % len) {
    154    char* p = small[j];
    155    size_t usable = moz_malloc_size_of(p);
    156    free(p);
    157    for (size_t k = 0; k < usable; k++) {
    158      jemalloc_ptr_info(&p[k], &info);
    159      // There are two valid outcomes here.
    160      if (InfoEq(info, TagFreedAlloc, p, usable, arenaId)) {
    161        isFreedAlloc++;
    162      } else if (InfoEqFreedPage(info, &p[k], stats.page_size, arenaId)) {
    163        isFreedPage++;
    164      } else {
    165        ASSERT_TRUE(false);
    166      }
    167    }
    168  }
    169  // There should be both FreedAlloc and FreedPage results, but a lot more of
    170  // the former.
    171  ASSERT_TRUE(isFreedAlloc != 0);
    172  ASSERT_TRUE(isFreedPage != 0);
    173  ASSERT_TRUE(isFreedAlloc / isFreedPage > 8);
    174 
    175  // Free the large allocations and recheck them.
    176  len = large.length();
    177  for (size_t i = 0, j = 0; i < len; i++, j = (j + 31) % len) {
    178    char* p = large[j];
    179    size_t usable = moz_malloc_size_of(p);
    180    free(p);
    181    for (size_t k = 0; k < usable; k += 357) {
    182      jemalloc_ptr_info(&p[k], &info);
    183      ASSERT_TRUE(InfoEqFreedPage(info, &p[k], stats.page_size, arenaId));
    184    }
    185  }
    186 
    187  // Free the huge allocations and recheck them.
    188  len = huge.length();
    189  for (size_t i = 0, j = 0; i < len; i++, j = (j + 7) % len) {
    190    char* p = huge[j];
    191    size_t usable = moz_malloc_size_of(p);
    192    free(p);
    193    for (size_t k = 0; k < usable; k += 587) {
    194      jemalloc_ptr_info(&p[k], &info);
    195      ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
    196    }
    197  }
    198 
    199  // Null ptr.
    200  jemalloc_ptr_info(nullptr, &info);
    201  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
    202 
    203  // Near-null ptr.
    204  jemalloc_ptr_info((void*)0x123, &info);
    205  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
    206 
    207  // Maximum address.
    208  jemalloc_ptr_info((void*)uintptr_t(-1), &info);
    209  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
    210 
    211  // Stack memory.
    212  int stackVar = 0;
    213  jemalloc_ptr_info(&stackVar, &info);
    214  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
    215 
    216  // Code memory.
    217  jemalloc_ptr_info((const void*)&jemalloc_ptr_info, &info);
    218  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
    219 
    220  // Static memory.
    221  jemalloc_ptr_info(&gStaticVar, &info);
    222  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
    223 
    224  // Chunk header.
    225  UniquePtr<int> p = MakeUnique<int>();
    226  size_t chunksizeMask = stats.chunksize - 1;
    227  char* chunk = (char*)(uintptr_t(p.get()) & ~chunksizeMask);
    228  size_t chunkHeaderSize = stats.chunksize - stats.large_max - stats.page_size;
    229  for (size_t i = 0; i < chunkHeaderSize; i += 64) {
    230    jemalloc_ptr_info(&chunk[i], &info);
    231    ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
    232  }
    233 
    234  // Run header.
    235  size_t page_sizeMask = stats.page_size - 1;
    236  char* run = (char*)(uintptr_t(p.get()) & ~page_sizeMask);
    237  for (size_t i = 0; i < 4 * sizeof(void*); i++) {
    238    jemalloc_ptr_info(&run[i], &info);
    239    ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
    240  }
    241 
    242  // Entire chunk. It's impossible to check what is put into |info| for all of
    243  // these addresses; this is more about checking that we don't crash.
    244  for (size_t i = 0; i < stats.chunksize; i += 256) {
    245    jemalloc_ptr_info(&chunk[i], &info);
    246  }
    247 
    248  moz_dispose_arena(arenaId);
    249 }
    250 
    251 size_t sSizes[] = {1,      42,      79,      918,     1.4_KiB,
    252                   73_KiB, 129_KiB, 1.1_MiB, 2.6_MiB, 5.1_MiB};
    253 
    254 TEST(Jemalloc, Arenas)
    255 {
    256  arena_id_t arena = moz_create_arena();
    257  ASSERT_TRUE(arena != 0);
    258  void* ptr = moz_arena_malloc(arena, 42);
    259  ASSERT_TRUE(ptr != nullptr);
    260  ptr = moz_arena_realloc(arena, ptr, 64);
    261  ASSERT_TRUE(ptr != nullptr);
    262  moz_arena_free(arena, ptr);
    263  ptr = moz_arena_calloc(arena, 24, 2);
    264  // For convenience, free can be used to free arena pointers.
    265  free(ptr);
    266  moz_dispose_arena(arena);
    267 
    268  // Avoid death tests adding some unnecessary (long) delays.
    269  SAVE_GDB_SLEEP_LOCAL();
    270 
    271  // Can't use an arena after it's disposed.
    272  // ASSERT_DEATH_WRAP(moz_arena_malloc(arena, 80), "");
    273 
    274  // Arena id 0 can't be used to somehow get to the main arena.
    275  ASSERT_DEATH_WRAP(moz_arena_malloc(0, 80), "");
    276 
    277  arena = moz_create_arena();
    278  arena_id_t arena2 = moz_create_arena();
    279  // Ensure arena2 is used to prevent OSX errors:
    280  (void)arena2;
    281 
    282  // For convenience, realloc can also be used to reallocate arena pointers.
    283  // The result should be in the same arena. Test various size class
    284  // transitions.
    285  for (size_t from_size : sSizes) {
    286    SCOPED_TRACE(testing::Message() << "from_size = " << from_size);
    287    for (size_t to_size : sSizes) {
    288      SCOPED_TRACE(testing::Message() << "to_size = " << to_size);
    289      ptr = moz_arena_malloc(arena, from_size);
    290      ptr = realloc(ptr, to_size);
    291      // Freeing with the wrong arena should crash.
    292      ASSERT_DEATH_WRAP(moz_arena_free(arena2, ptr), "");
    293      // Likewise for moz_arena_realloc.
    294      ASSERT_DEATH_WRAP(moz_arena_realloc(arena2, ptr, from_size), "");
    295      // The following will crash if it's not in the right arena.
    296      moz_arena_free(arena, ptr);
    297    }
    298  }
    299 
    300  moz_dispose_arena(arena2);
    301  moz_dispose_arena(arena);
    302 
    303  RESTORE_GDB_SLEEP_LOCAL();
    304 }
    305 
    306 // Check that a buffer aPtr is entirely filled with a given character from
    307 // aOffset to aSize. For faster comparison, the caller is required to fill a
    308 // reference buffer with the wanted character, and give the size of that
    309 // reference buffer.
    310 static void bulk_compare(char* aPtr, size_t aOffset, size_t aSize,
    311                         char* aReference, size_t aReferenceSize) {
    312  for (size_t i = aOffset; i < aSize; i += aReferenceSize) {
    313    size_t length = std::min(aSize - i, aReferenceSize);
    314    if (memcmp(aPtr + i, aReference, length)) {
    315      // We got a mismatch, we now want to report more precisely where.
    316      for (size_t j = i; j < i + length; j++) {
    317        ASSERT_EQ(aPtr[j], *aReference);
    318      }
    319    }
    320  }
    321 }
    322 
    323 // A range iterator for size classes between two given values.
    324 class SizeClassesBetween {
    325 public:
    326  SizeClassesBetween(size_t aStart, size_t aEnd) : mStart(aStart), mEnd(aEnd) {}
    327 
    328  class Iterator {
    329   public:
    330    explicit Iterator(size_t aValue) : mValue(malloc_good_size(aValue)) {}
    331 
    332    operator size_t() const { return mValue; }
    333    size_t operator*() const { return mValue; }
    334    Iterator& operator++() {
    335      mValue = malloc_good_size(mValue + 1);
    336      return *this;
    337    }
    338 
    339   private:
    340    size_t mValue;
    341  };
    342 
    343  Iterator begin() { return Iterator(mStart); }
    344  Iterator end() { return Iterator(mEnd); }
    345 
    346 private:
    347  size_t mStart, mEnd;
    348 };
    349 
    350 #define ALIGNMENT_CEILING(s, alignment) \
    351  (((s) + ((alignment) - 1)) & (~((alignment) - 1)))
    352 
    353 #define ALIGNMENT_FLOOR(s, alignment) ((s) & (~((alignment) - 1)))
    354 
    355 static bool IsSameRoundedHugeClass(size_t aSize1, size_t aSize2,
    356                                   jemalloc_stats_t& aStats) {
    357  return (aSize1 > aStats.large_max && aSize2 > aStats.large_max &&
    358          ALIGNMENT_CEILING(aSize1 + aStats.page_size, aStats.chunksize) ==
    359              ALIGNMENT_CEILING(aSize2 + aStats.page_size, aStats.chunksize));
    360 }
    361 
    362 static bool CanReallocInPlace(size_t aFromSize, size_t aToSize,
    363                              jemalloc_stats_t& aStats) {
    364  // PHC allocations must be disabled because PHC reallocs differently to
    365  // mozjemalloc.
    366 #ifdef MOZ_PHC
    367  MOZ_RELEASE_ASSERT(!mozilla::phc::IsPHCEnabledOnCurrentThread());
    368 #endif
    369 
    370  if (aFromSize == malloc_good_size(aToSize)) {
    371    // Same size class: in-place.
    372    return true;
    373  }
    374  if (aFromSize >= aStats.page_size && aFromSize <= aStats.large_max &&
    375      aToSize >= aStats.page_size && aToSize <= aStats.large_max) {
    376    // Any large class to any large class: in-place when there is space to.
    377    return true;
    378  }
    379  if (IsSameRoundedHugeClass(aFromSize, aToSize, aStats)) {
    380    // Huge sizes that round up to the same multiple of the chunk size:
    381    // in-place.
    382    return true;
    383  }
    384  return false;
    385 }
    386 
    387 TEST(Jemalloc, InPlace)
    388 {
    389  // Disable PHC allocations for this test, because CanReallocInPlace() isn't
    390  // valid for PHC allocations.
    391  AutoDisablePHCOnCurrentThread disable;
    392 
    393  jemalloc_stats_t stats;
    394  jemalloc_stats(&stats);
    395 
    396  // Using a separate arena, which is always emptied after an iteration, ensures
    397  // that in-place reallocation happens in all cases it can happen. This test is
    398  // intended for developers to notice they may have to adapt other tests if
    399  // they change the conditions for in-place reallocation.
    400  arena_id_t arena = moz_create_arena();
    401 
    402  for (size_t from_size : SizeClassesBetween(1, 2 * stats.chunksize)) {
    403    SCOPED_TRACE(testing::Message() << "from_size = " << from_size);
    404    for (size_t to_size : sSizes) {
    405      SCOPED_TRACE(testing::Message() << "to_size = " << to_size);
    406      char* ptr = (char*)moz_arena_malloc(arena, from_size);
    407      char* ptr2 = (char*)moz_arena_realloc(arena, ptr, to_size);
    408      if (CanReallocInPlace(from_size, to_size, stats)) {
    409        EXPECT_EQ(ptr, ptr2);
    410      } else {
    411        EXPECT_NE(ptr, ptr2);
    412      }
    413      moz_arena_free(arena, ptr2);
    414    }
    415  }
    416 
    417  moz_dispose_arena(arena);
    418 }
    419 
    420 // Bug 1474254: disable this test for windows ccov builds because it leads to
    421 // timeout.
    422 #if !defined(XP_WIN) || !defined(MOZ_CODE_COVERAGE)
    423 TEST(Jemalloc, JunkPoison)
    424 {
    425  // Disable PHC allocations for this test, because CanReallocInPlace() isn't
    426  // valid for PHC allocations, and the testing UAFs aren't valid.
    427  AutoDisablePHCOnCurrentThread disable;
    428 
    429  jemalloc_stats_t stats;
    430  jemalloc_stats(&stats);
    431 
    432  // Avoid death tests adding some unnecessary (long) delays.
    433  SAVE_GDB_SLEEP_LOCAL();
    434 
    435  // Create buffers in a separate arena, for faster comparisons with
    436  // bulk_compare.
    437  arena_id_t buf_arena = moz_create_arena();
    438  char* junk_buf = (char*)moz_arena_malloc(buf_arena, stats.page_size);
    439  // Depending on its configuration, the allocator will either fill the
    440  // requested allocation with the junk byte (0xe4) or with zeroes, or do
    441  // nothing, in which case, since we're allocating in a fresh arena,
    442  // we'll be getting zeroes.
    443  char junk = stats.opt_junk ? '\xe4' : '\0';
    444  for (size_t i = 0; i < stats.page_size; i++) {
    445    ASSERT_EQ(junk_buf[i], junk);
    446  }
    447 
    448  char* poison_buf = (char*)moz_arena_malloc(buf_arena, stats.page_size);
    449  memset(poison_buf, 0xe5, stats.page_size);
    450 
    451  static const char fill = 0x42;
    452  char* fill_buf = (char*)moz_arena_malloc(buf_arena, stats.page_size);
    453  memset(fill_buf, fill, stats.page_size);
    454 
    455  arena_params_t params;
    456  // Allow as many dirty pages in the arena as possible, so that purge never
    457  // happens in it. Purge breaks some of the tests below randomly depending on
    458  // what other things happen on other threads.
    459  params.mMaxDirty = size_t(-1);
    460  arena_id_t arena = moz_create_arena_with_params(&params);
    461 
    462  // Mozjemalloc is configured to only poison the first four cache lines.
    463  const size_t poison_check_len = 256;
    464 
    465  // Allocating should junk the buffer, and freeing should poison the buffer.
    466  for (size_t size : sSizes) {
    467    if (size <= stats.large_max) {
    468      SCOPED_TRACE(testing::Message() << "size = " << size);
    469      char* buf = (char*)moz_arena_malloc(arena, size);
    470      size_t allocated = moz_malloc_usable_size(buf);
    471      if (stats.opt_junk || stats.opt_zero) {
    472        ASSERT_NO_FATAL_FAILURE(
    473            bulk_compare(buf, 0, allocated, junk_buf, stats.page_size));
    474      }
    475      moz_arena_free(arena, buf);
    476      // We purposefully do a use-after-free here, to check that the data was
    477      // poisoned.
    478      ASSERT_NO_FATAL_FAILURE(
    479          bulk_compare(buf, 0, std::min(allocated, poison_check_len),
    480                       poison_buf, stats.page_size));
    481    }
    482  }
    483 
    484  // Shrinking in the same size class should be in place and poison between the
    485  // new allocation size and the old one.
    486  size_t prev = 0;
    487  for (size_t size : SizeClassesBetween(1, 2 * stats.chunksize)) {
    488    SCOPED_TRACE(testing::Message() << "size = " << size);
    489    SCOPED_TRACE(testing::Message() << "prev = " << prev);
    490    char* ptr = (char*)moz_arena_malloc(arena, size);
    491    memset(ptr, fill, moz_malloc_usable_size(ptr));
    492    char* ptr2 = (char*)moz_arena_realloc(arena, ptr, prev + 1);
    493    ASSERT_EQ(ptr, ptr2);
    494    ASSERT_NO_FATAL_FAILURE(
    495        bulk_compare(ptr, 0, prev + 1, fill_buf, stats.page_size));
    496    ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr, prev + 1,
    497                                         std::min(size, poison_check_len),
    498                                         poison_buf, stats.page_size));
    499    moz_arena_free(arena, ptr);
    500    prev = size;
    501  }
    502 
    503  // In-place realloc should junk the new bytes when growing and poison the old
    504  // bytes when shrinking.
    505  for (size_t from_size : SizeClassesBetween(1, 2 * stats.chunksize)) {
    506    SCOPED_TRACE(testing::Message() << "from_size = " << from_size);
    507    for (size_t to_size : sSizes) {
    508      SCOPED_TRACE(testing::Message() << "to_size = " << to_size);
    509      if (CanReallocInPlace(from_size, to_size, stats)) {
    510        char* ptr = (char*)moz_arena_malloc(arena, from_size);
    511        memset(ptr, fill, moz_malloc_usable_size(ptr));
    512        char* ptr2 = (char*)moz_arena_realloc(arena, ptr, to_size);
    513        ASSERT_EQ(ptr, ptr2);
    514        // Shrinking allocation
    515        if (from_size >= to_size) {
    516          ASSERT_NO_FATAL_FAILURE(
    517              bulk_compare(ptr, 0, to_size, fill_buf, stats.page_size));
    518          // Huge allocations have guards and will crash when accessing
    519          // beyond the valid range.
    520          if (to_size > stats.large_max) {
    521            size_t page_limit = ALIGNMENT_CEILING(to_size, stats.page_size);
    522            ASSERT_NO_FATAL_FAILURE(bulk_compare(
    523                ptr, to_size, std::min(page_limit, poison_check_len),
    524                poison_buf, stats.page_size));
    525            ASSERT_DEATH_WRAP(ptr[page_limit] = 0, "");
    526          } else {
    527            ASSERT_NO_FATAL_FAILURE(bulk_compare(
    528                ptr, to_size, std::min(from_size, poison_check_len), poison_buf,
    529                stats.page_size));
    530          }
    531        } else {
    532          // Enlarging allocation
    533          ASSERT_NO_FATAL_FAILURE(
    534              bulk_compare(ptr, 0, from_size, fill_buf, stats.page_size));
    535          if (stats.opt_junk || stats.opt_zero) {
    536            ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr, from_size, to_size,
    537                                                 junk_buf, stats.page_size));
    538          }
    539          // Huge allocation, so should have a guard page following
    540          if (to_size > stats.large_max) {
    541            ASSERT_DEATH_WRAP(
    542                ptr[ALIGNMENT_CEILING(to_size, stats.page_size)] = 0, "");
    543          }
    544        }
    545        moz_arena_free(arena, ptr2);
    546      }
    547    }
    548  }
    549 
    550  // Growing to a different size class should poison the old allocation,
    551  // preserve the original bytes, and junk the new bytes in the new allocation.
    552  for (size_t from_size : SizeClassesBetween(1, 2 * stats.chunksize)) {
    553    SCOPED_TRACE(testing::Message() << "from_size = " << from_size);
    554    for (size_t to_size : sSizes) {
    555      if (from_size < to_size && malloc_good_size(to_size) != from_size &&
    556          !IsSameRoundedHugeClass(from_size, to_size, stats)) {
    557        SCOPED_TRACE(testing::Message() << "to_size = " << to_size);
    558        char* ptr = (char*)moz_arena_malloc(arena, from_size);
    559        memset(ptr, fill, moz_malloc_usable_size(ptr));
    560        // Avoid in-place realloc by allocating a buffer, expecting it to be
    561        // right after the buffer we just received. Buffers smaller than the
    562        // page size and exactly or larger than the size of the largest large
    563        // size class can't be reallocated in-place.
    564        char* avoid_inplace = nullptr;
    565        if (from_size >= stats.page_size && from_size < stats.large_max) {
    566          avoid_inplace = (char*)moz_arena_malloc(arena, stats.page_size);
    567          ASSERT_EQ(ptr + from_size, avoid_inplace);
    568        }
    569        char* ptr2 = (char*)moz_arena_realloc(arena, ptr, to_size);
    570        ASSERT_NE(ptr, ptr2);
    571        if (from_size <= stats.large_max) {
    572          ASSERT_NO_FATAL_FAILURE(
    573              bulk_compare(ptr, 0, std::min(from_size, poison_check_len),
    574                           poison_buf, stats.page_size));
    575        }
    576        ASSERT_NO_FATAL_FAILURE(
    577            bulk_compare(ptr2, 0, from_size, fill_buf, stats.page_size));
    578        if (stats.opt_junk || stats.opt_zero) {
    579          size_t rounded_to_size = malloc_good_size(to_size);
    580          ASSERT_NE(to_size, rounded_to_size);
    581          ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr2, from_size, rounded_to_size,
    582                                               junk_buf, stats.page_size));
    583        }
    584        moz_arena_free(arena, ptr2);
    585        moz_arena_free(arena, avoid_inplace);
    586      }
    587    }
    588  }
    589 
    590  // Shrinking to a different size class should poison the old allocation,
    591  // preserve the original bytes, and junk the extra bytes in the new
    592  // allocation.
    593  for (size_t from_size : SizeClassesBetween(1, 2 * stats.chunksize)) {
    594    SCOPED_TRACE(testing::Message() << "from_size = " << from_size);
    595    for (size_t to_size : sSizes) {
    596      if (from_size > to_size &&
    597          !CanReallocInPlace(from_size, to_size, stats)) {
    598        SCOPED_TRACE(testing::Message() << "to_size = " << to_size);
    599        char* ptr = (char*)moz_arena_malloc(arena, from_size);
    600        memset(ptr, fill, from_size);
    601        char* ptr2 = (char*)moz_arena_realloc(arena, ptr, to_size);
    602        ASSERT_NE(ptr, ptr2);
    603        if (from_size <= stats.large_max) {
    604          ASSERT_NO_FATAL_FAILURE(
    605              bulk_compare(ptr, 0, std::min(from_size, poison_check_len),
    606                           poison_buf, stats.page_size));
    607        }
    608        ASSERT_NO_FATAL_FAILURE(
    609            bulk_compare(ptr2, 0, to_size, fill_buf, stats.page_size));
    610        if (stats.opt_junk || stats.opt_zero) {
    611          size_t rounded_to_size = malloc_good_size(to_size);
    612          ASSERT_NE(to_size, rounded_to_size);
    613          ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr2, from_size, rounded_to_size,
    614                                               junk_buf, stats.page_size));
    615        }
    616        moz_arena_free(arena, ptr2);
    617      }
    618    }
    619  }
    620 
    621  moz_dispose_arena(arena);
    622 
    623  moz_arena_free(buf_arena, poison_buf);
    624  moz_arena_free(buf_arena, junk_buf);
    625  moz_arena_free(buf_arena, fill_buf);
    626  moz_dispose_arena(buf_arena);
    627 
    628  RESTORE_GDB_SLEEP_LOCAL();
    629 }
    630 #endif  // !defined(XP_WIN) || !defined(MOZ_CODE_COVERAGE)
    631 
    632 TEST(Jemalloc, TrailingGuard)
    633 {
    634  // Disable PHC allocations for this test, because even a single PHC
    635  // allocation occurring can throw it off.
    636  AutoDisablePHCOnCurrentThread disable;
    637 
    638  jemalloc_stats_t stats;
    639  jemalloc_stats(&stats);
    640 
    641  // Avoid death tests adding some unnecessary (long) delays.
    642  SAVE_GDB_SLEEP_LOCAL();
    643 
    644  arena_id_t arena = moz_create_arena();
    645  ASSERT_TRUE(arena != 0);
    646 
    647  // Do enough large allocations to fill a chunk, and then one additional one,
    648  // and check that the guard page is still present after the one-but-last
    649  // allocation, i.e. that we didn't allocate the guard.
    650  Vector<void*> ptr_list;
    651  for (size_t cnt = 0; cnt < stats.large_max / stats.page_size; cnt++) {
    652    void* ptr = moz_arena_malloc(arena, stats.page_size);
    653    ASSERT_TRUE(ptr != nullptr);
    654    ASSERT_TRUE(ptr_list.append(ptr));
    655  }
    656 
    657  void* last_ptr_in_chunk = ptr_list[ptr_list.length() - 1];
    658  void* extra_ptr = moz_arena_malloc(arena, stats.page_size);
    659  void* guard_page = (void*)ALIGNMENT_CEILING(
    660      (uintptr_t)last_ptr_in_chunk + stats.page_size, stats.page_size);
    661  jemalloc_ptr_info_t info;
    662  jemalloc_ptr_info(guard_page, &info);
    663  ASSERT_TRUE(jemalloc_ptr_is_freed_page(&info));
    664 
    665  ASSERT_DEATH_WRAP(*(char*)guard_page = 0, "");
    666 
    667  for (void* ptr : ptr_list) {
    668    moz_arena_free(arena, ptr);
    669  }
    670  moz_arena_free(arena, extra_ptr);
    671 
    672  moz_dispose_arena(arena);
    673 
    674  RESTORE_GDB_SLEEP_LOCAL();
    675 }
    676 
    677 TEST(Jemalloc, LeadingGuard)
    678 {
    679  // Disable PHC allocations for this test, because even a single PHC
    680  // allocation occurring can throw it off.
    681  AutoDisablePHCOnCurrentThread disable;
    682 
    683  jemalloc_stats_t stats;
    684  jemalloc_stats(&stats);
    685 
    686  // Avoid death tests adding some unnecessary (long) delays.
    687  SAVE_GDB_SLEEP_LOCAL();
    688 
    689  arena_id_t arena = moz_create_arena();
    690  ASSERT_TRUE(arena != 0);
    691 
    692  // Do a simple normal allocation, but force all the allocation space
    693  // in the chunk to be used up. This allows us to check that we get
    694  // the safe area right in the logic that follows (all memory will be
    695  // committed and initialized), and it forces this pointer to the start
    696  // of the zone to sit at the very start of the usable chunk area.
    697  void* ptr = moz_arena_malloc(arena, stats.large_max);
    698  ASSERT_TRUE(ptr != nullptr);
    699  // If ptr is chunk-aligned, the above allocation went wrong.
    700  void* chunk_start = (void*)ALIGNMENT_FLOOR((uintptr_t)ptr, stats.chunksize);
    701  ASSERT_NE((uintptr_t)ptr, (uintptr_t)chunk_start);
    702  // If ptr is 1 page after the chunk start (so right after the header),
    703  // we must have missed adding the guard page.
    704  ASSERT_NE((uintptr_t)ptr, (uintptr_t)chunk_start + stats.page_size);
    705  // The actual start depends on the amount of metadata versus the page
    706  // size, so we can't check equality without pulling in too many
    707  // implementation details.
    708 
    709  // Guard page should be right before data area
    710  void* guard_page = (void*)(((uintptr_t)ptr) - sizeof(void*));
    711  jemalloc_ptr_info_t info;
    712  jemalloc_ptr_info(guard_page, &info);
    713  ASSERT_TRUE(info.tag == TagUnknown);
    714  ASSERT_DEATH_WRAP(*(char*)guard_page = 0, "");
    715 
    716  moz_arena_free(arena, ptr);
    717  moz_dispose_arena(arena);
    718 
    719  RESTORE_GDB_SLEEP_LOCAL();
    720 }
    721 
    722 TEST(Jemalloc, DisposeArena)
    723 {
    724  jemalloc_stats_t stats;
    725  jemalloc_stats(&stats);
    726 
    727  // Avoid death tests adding some unnecessary (long) delays.
    728  SAVE_GDB_SLEEP_LOCAL();
    729 
    730  arena_id_t arena = moz_create_arena();
    731  void* ptr = moz_arena_malloc(arena, 42);
    732  // Disposing of the arena when it's not empty is a MOZ_CRASH-worthy error.
    733  ASSERT_DEATH_WRAP(moz_dispose_arena(arena), "");
    734  moz_arena_free(arena, ptr);
    735  moz_dispose_arena(arena);
    736 
    737  arena = moz_create_arena();
    738  ptr = moz_arena_malloc(arena, stats.page_size * 2);
    739  // Disposing of the arena when it's not empty is a MOZ_CRASH-worthy error.
    740  ASSERT_DEATH_WRAP(moz_dispose_arena(arena), "");
    741  moz_arena_free(arena, ptr);
    742  moz_dispose_arena(arena);
    743 
    744  arena = moz_create_arena();
    745  ptr = moz_arena_malloc(arena, stats.chunksize * 2);
    746 #ifdef MOZ_DEBUG
    747  // On debug builds, we do the expensive check that arenas are empty.
    748  ASSERT_DEATH_WRAP(moz_dispose_arena(arena), "");
    749  moz_arena_free(arena, ptr);
    750  moz_dispose_arena(arena);
    751 #else
    752  // Currently, the allocator can't trivially check whether the arena is empty
    753  // of huge allocations, so disposing of it works.
    754  moz_dispose_arena(arena);
    755  // But trying to free a pointer that belongs to it will MOZ_CRASH.
    756  ASSERT_DEATH_WRAP(free(ptr), "");
    757  // Likewise for realloc
    758  ASSERT_DEATH_WRAP(ptr = realloc(ptr, stats.chunksize * 3), "");
    759 #endif
    760 
    761  // Using the arena after it's been disposed of is MOZ_CRASH-worthy.
    762  ASSERT_DEATH_WRAP(moz_arena_malloc(arena, 42), "");
    763 
    764  RESTORE_GDB_SLEEP_LOCAL();
    765 }
    766 
    767 static void CheckPtr(void* ptr, size_t size) {
    768  EXPECT_TRUE(ptr);
    769  jemalloc_ptr_info_t info;
    770  jemalloc_ptr_info(ptr, &info);
    771  EXPECT_EQ(info.tag, TagLiveAlloc);
    772  EXPECT_EQ(info.size, malloc_good_size(size));
    773 }
    774 
    775 static void CheckStats(const char* operation, unsigned iteration,
    776                       jemalloc_stats_lite_t& baseline,
    777                       jemalloc_stats_lite_t& stats, size_t num_ops,
    778                       ptrdiff_t bytes_diff) {
    779  if ((baseline.allocated_bytes + bytes_diff != stats.allocated_bytes
    780 
    781       || baseline.num_operations + num_ops != stats.num_operations)) {
    782    // All the tests that check stats, perform some operation, then check stats
    783    // again can race with other threads.  But the test can't be made thread
    784    // safe without a sagnificant amount of work.  However this IS a problem
    785    // when stepping through the test using a debugger, since other threads are
    786    // likely to run while the current thread is paused.  Instead of neading a
    787    // debugger our printf here can help understand a failing test.
    788    fprintf(stderr, "Check stats failed after iteration %u operation %s\n",
    789            iteration, operation);
    790 
    791    EXPECT_EQ(baseline.allocated_bytes + bytes_diff, stats.allocated_bytes);
    792    EXPECT_EQ(baseline.num_operations + num_ops, stats.num_operations);
    793  }
    794 }
    795 
    796 TEST(Jemalloc, StatsLite)
    797 {
    798  // Disable PHC allocations for this test, because even a single PHC
    799  // allocation occurring can throw it off.
    800  AutoDisablePHCOnCurrentThread disable;
    801 
    802  // Use this data to make an allocation, resize it twice, then free it.  Some
    803  // The data uses a few size classes and does a combination of in-place and
    804  // moving reallocations.
    805  struct {
    806    // The initial allocation size.
    807    size_t initial;
    808    // The first reallocation size and number of operations of the reallocation.
    809    size_t next;
    810    size_t next_ops;
    811    // The final reallocation size and number of operations of the reallocation.
    812    size_t last;
    813    size_t last_ops;
    814  } TestData[] = {
    815      /* clang-format off */
    816      { 16,      15,     0, 256,     2},
    817      {128_KiB,  64_KiB, 1,  68_KiB, 1},
    818      {  4_MiB,  16_MiB, 2,   3_MiB, 2},
    819      { 16_KiB, 512,     2,  32_MiB, 2},
    820      /* clang-format on */
    821  };
    822 
    823  arena_id_t my_arena = moz_create_arena();
    824 
    825  unsigned i = 0;
    826  for (auto data : TestData) {
    827    // Assert that the API returns /something/ a bit sensible.
    828    jemalloc_stats_lite_t baseline;
    829    jemalloc_stats_lite(&baseline);
    830 
    831    // Allocate an object.
    832    void* ptr = moz_arena_malloc(my_arena, data.initial);
    833    CheckPtr(ptr, data.initial);
    834 
    835    jemalloc_stats_lite_t stats1;
    836    jemalloc_stats_lite(&stats1);
    837    CheckStats("malloc()", i, baseline, stats1, 1,
    838               malloc_good_size(data.initial));
    839 
    840    // realloc the item in-place.
    841    ptr = moz_arena_realloc(my_arena, ptr, data.next);
    842    CheckPtr(ptr, data.next);
    843 
    844    jemalloc_stats_lite_t stats2;
    845    jemalloc_stats_lite(&stats2);
    846    CheckStats("realloc() 1", i, stats1, stats2, data.next_ops,
    847               malloc_good_size(data.next) - malloc_good_size(data.initial));
    848 
    849    // realloc so it has to move to a different location
    850    ptr = moz_arena_realloc(my_arena, ptr, data.last);
    851    CheckPtr(ptr, data.last);
    852 
    853    jemalloc_stats_lite_t stats3;
    854    jemalloc_stats_lite(&stats3);
    855    CheckStats("realloc() 2", i, stats2, stats3, data.last_ops,
    856               malloc_good_size(data.last) - malloc_good_size(data.next));
    857 
    858    moz_arena_free(my_arena, ptr);
    859    jemalloc_stats_lite_t stats4;
    860    jemalloc_stats_lite(&stats4);
    861    CheckStats("free()", i, stats3, stats4, 1, -malloc_good_size(data.last));
    862 
    863    i++;
    864  }
    865 
    866  moz_dispose_arena(my_arena);
    867 }