low_level_alloc.cc (23120B)
1 // Copyright 2017 The Abseil Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // https://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 // A low-level allocator that can be used by other low-level 16 // modules without introducing dependency cycles. 17 // This allocator is slow and wasteful of memory; 18 // it should not be used when performance is key. 19 20 #include "absl/base/internal/low_level_alloc.h" 21 22 #include <type_traits> 23 24 #include "absl/base/call_once.h" 25 #include "absl/base/config.h" 26 #include "absl/base/internal/direct_mmap.h" 27 #include "absl/base/internal/scheduling_mode.h" 28 #include "absl/base/macros.h" 29 #include "absl/base/thread_annotations.h" 30 31 // LowLevelAlloc requires that the platform support low-level 32 // allocation of virtual memory. Platforms lacking this cannot use 33 // LowLevelAlloc. 34 #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING 35 36 #ifndef _WIN32 37 #include <pthread.h> 38 #include <signal.h> 39 #include <sys/mman.h> 40 #include <unistd.h> 41 #else 42 #include <windows.h> 43 #endif 44 45 #ifdef __linux__ 46 #include <sys/prctl.h> 47 #endif 48 49 #include <string.h> 50 51 #include <algorithm> 52 #include <atomic> 53 #include <cerrno> 54 #include <cstddef> 55 #include <new> // for placement-new 56 57 #include "absl/base/dynamic_annotations.h" 58 #include "absl/base/internal/raw_logging.h" 59 #include "absl/base/internal/spinlock.h" 60 61 #if defined(MAP_ANON) && !defined(MAP_ANONYMOUS) 62 #define MAP_ANONYMOUS MAP_ANON 63 #endif 64 65 namespace absl { 66 ABSL_NAMESPACE_BEGIN 67 namespace base_internal { 68 69 // A first-fit allocator with amortized logarithmic free() time. 70 71 // --------------------------------------------------------------------------- 72 static const int kMaxLevel = 30; 73 74 namespace { 75 // This struct describes one allocated block, or one free block. 76 struct AllocList { 77 struct Header { 78 // Size of entire region, including this field. Must be 79 // first. Valid in both allocated and unallocated blocks. 80 uintptr_t size; 81 82 // kMagicAllocated or kMagicUnallocated xor this. 83 uintptr_t magic; 84 85 // Pointer to parent arena. 86 LowLevelAlloc::Arena *arena; 87 88 // Aligns regions to 0 mod 2*sizeof(void*). 89 void *dummy_for_alignment; 90 } header; 91 92 // Next two fields: in unallocated blocks: freelist skiplist data 93 // in allocated blocks: overlaps with client data 94 95 // Levels in skiplist used. 96 int levels; 97 98 // Actually has levels elements. The AllocList node may not have room 99 // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels(). 100 AllocList *next[kMaxLevel]; 101 }; 102 } // namespace 103 104 // --------------------------------------------------------------------------- 105 // A trivial skiplist implementation. This is used to keep the freelist 106 // in address order while taking only logarithmic time per insert and delete. 107 108 // An integer approximation of log2(size/base) 109 // Requires size >= base. 110 static int IntLog2(size_t size, size_t base) { 111 int result = 0; 112 for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result) 113 result++; 114 } 115 // floor(size / 2**result) <= base < floor(size / 2**(result-1)) 116 // => log2(size/(base+1)) <= result < 1+log2(size/base) 117 // => result ~= log2(size/base) 118 return result; 119 } 120 121 // Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1. 122 static int Random(uint32_t *state) { 123 uint32_t r = *state; 124 int result = 1; 125 while ((((r = r * 1103515245 + 12345) >> 30) & 1) == 0) { 126 result++; 127 } 128 *state = r; 129 return result; 130 } 131 132 // Return a number of skiplist levels for a node of size bytes, where 133 // base is the minimum node size. Compute level=log2(size / base)+n 134 // where n is 1 if random is false and otherwise a random number generated with 135 // the standard distribution for a skiplist: See Random() above. 136 // Bigger nodes tend to have more skiplist levels due to the log2(size / base) 137 // term, so first-fit searches touch fewer nodes. "level" is clipped so 138 // level<kMaxLevel and next[level-1] will fit in the node. 139 // 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel 140 static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) { 141 // max_fit is the maximum number of levels that will fit in a node for the 142 // given size. We can't return more than max_fit, no matter what the 143 // random number generator says. 144 size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *); 145 int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1); 146 if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit); 147 if (level > kMaxLevel - 1) level = kMaxLevel - 1; 148 ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level"); 149 return level; 150 } 151 152 // Return "atleast", the first element of AllocList *head s.t. *atleast >= *e. 153 // For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater 154 // points to the last element at level i in the AllocList less than *e, or is 155 // head if no such element exists. 156 static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e, 157 AllocList **prev) { 158 AllocList *p = head; 159 for (int level = head->levels - 1; level >= 0; level--) { 160 for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) { 161 } 162 prev[level] = p; 163 } 164 return (head->levels == 0) ? nullptr : prev[0]->next[0]; 165 } 166 167 // Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch. 168 // Requires that e->levels be previously set by the caller (using 169 // LLA_SkiplistLevels()) 170 static void LLA_SkiplistInsert(AllocList *head, AllocList *e, 171 AllocList **prev) { 172 LLA_SkiplistSearch(head, e, prev); 173 for (; head->levels < e->levels; head->levels++) { // extend prev pointers 174 prev[head->levels] = head; // to all *e's levels 175 } 176 for (int i = 0; i != e->levels; i++) { // add element to list 177 e->next[i] = prev[i]->next[i]; 178 prev[i]->next[i] = e; 179 } 180 } 181 182 // Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch(). 183 // Requires that e->levels be previous set by the caller (using 184 // LLA_SkiplistLevels()) 185 static void LLA_SkiplistDelete(AllocList *head, AllocList *e, 186 AllocList **prev) { 187 AllocList *found = LLA_SkiplistSearch(head, e, prev); 188 ABSL_RAW_CHECK(e == found, "element not in freelist"); 189 for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) { 190 prev[i]->next[i] = e->next[i]; 191 } 192 while (head->levels > 0 && head->next[head->levels - 1] == nullptr) { 193 head->levels--; // reduce head->levels if level unused 194 } 195 } 196 197 // --------------------------------------------------------------------------- 198 // Arena implementation 199 200 // Metadata for an LowLevelAlloc arena instance. 201 struct LowLevelAlloc::Arena { 202 // Constructs an arena with the given LowLevelAlloc flags. 203 explicit Arena(uint32_t flags_value); 204 205 base_internal::SpinLock mu; 206 // Head of free list, sorted by address 207 AllocList freelist ABSL_GUARDED_BY(mu); 208 // Count of allocated blocks 209 int32_t allocation_count ABSL_GUARDED_BY(mu); 210 // flags passed to NewArena 211 const uint32_t flags; 212 // Result of sysconf(_SC_PAGESIZE) 213 const size_t pagesize; 214 // Lowest power of two >= max(16, sizeof(AllocList)) 215 const size_t round_up; 216 // Smallest allocation block size 217 const size_t min_size; 218 // PRNG state 219 uint32_t random ABSL_GUARDED_BY(mu); 220 }; 221 222 namespace { 223 // Static storage space for the lazily-constructed, default global arena 224 // instances. We require this space because the whole point of LowLevelAlloc 225 // is to avoid relying on malloc/new. 226 alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof( 227 LowLevelAlloc::Arena)]; 228 alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof( 229 LowLevelAlloc::Arena)]; 230 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 231 alignas( 232 LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage 233 [sizeof(LowLevelAlloc::Arena)]; 234 #endif 235 236 // We must use LowLevelCallOnce here to construct the global arenas, rather than 237 // using function-level statics, to avoid recursively invoking the scheduler. 238 absl::once_flag create_globals_once; 239 240 void CreateGlobalArenas() { 241 new (&default_arena_storage) 242 LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook); 243 new (&unhooked_arena_storage) LowLevelAlloc::Arena(0); 244 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 245 new (&unhooked_async_sig_safe_arena_storage) 246 LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe); 247 #endif 248 } 249 250 // Returns a global arena that does not call into hooks. Used by NewArena() 251 // when kCallMallocHook is not set. 252 LowLevelAlloc::Arena *UnhookedArena() { 253 base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); 254 return reinterpret_cast<LowLevelAlloc::Arena *>(&unhooked_arena_storage); 255 } 256 257 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 258 // Returns a global arena that is async-signal safe. Used by NewArena() when 259 // kAsyncSignalSafe is set. 260 LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() { 261 base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); 262 return reinterpret_cast<LowLevelAlloc::Arena *>( 263 &unhooked_async_sig_safe_arena_storage); 264 } 265 #endif 266 267 } // namespace 268 269 // Returns the default arena, as used by LowLevelAlloc::Alloc() and friends. 270 LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { 271 base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); 272 return reinterpret_cast<LowLevelAlloc::Arena *>(&default_arena_storage); 273 } 274 275 // magic numbers to identify allocated and unallocated blocks 276 static const uintptr_t kMagicAllocated = 0x4c833e95U; 277 static const uintptr_t kMagicUnallocated = ~kMagicAllocated; 278 279 namespace { 280 class ABSL_SCOPED_LOCKABLE ArenaLock { 281 public: 282 explicit ArenaLock(LowLevelAlloc::Arena *arena) 283 ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu) 284 : arena_(arena) { 285 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 286 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { 287 sigset_t all; 288 sigfillset(&all); 289 mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0; 290 } 291 #endif 292 arena_->mu.Lock(); 293 } 294 ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); } 295 void Leave() ABSL_UNLOCK_FUNCTION() { 296 arena_->mu.Unlock(); 297 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 298 if (mask_valid_) { 299 const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr); 300 if (err != 0) { 301 ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err); 302 } 303 } 304 #endif 305 left_ = true; 306 } 307 308 private: 309 bool left_ = false; // whether left region 310 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 311 bool mask_valid_ = false; 312 sigset_t mask_; // old mask of blocked signals 313 #endif 314 LowLevelAlloc::Arena *arena_; 315 ArenaLock(const ArenaLock &) = delete; 316 ArenaLock &operator=(const ArenaLock &) = delete; 317 }; 318 } // namespace 319 320 // create an appropriate magic number for an object at "ptr" 321 // "magic" should be kMagicAllocated or kMagicUnallocated 322 inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) { 323 return magic ^ reinterpret_cast<uintptr_t>(ptr); 324 } 325 326 namespace { 327 size_t GetPageSize() { 328 #ifdef _WIN32 329 SYSTEM_INFO system_info; 330 GetSystemInfo(&system_info); 331 return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity); 332 #elif defined(__wasm__) || defined(__asmjs__) || defined(__hexagon__) 333 return getpagesize(); 334 #else 335 return static_cast<size_t>(sysconf(_SC_PAGESIZE)); 336 #endif 337 } 338 339 size_t RoundedUpBlockSize() { 340 // Round up block sizes to a power of two close to the header size. 341 size_t round_up = 16; 342 while (round_up < sizeof(AllocList::Header)) { 343 round_up += round_up; 344 } 345 return round_up; 346 } 347 348 } // namespace 349 350 LowLevelAlloc::Arena::Arena(uint32_t flags_value) 351 : mu(base_internal::SCHEDULE_KERNEL_ONLY), 352 allocation_count(0), 353 flags(flags_value), 354 pagesize(GetPageSize()), 355 round_up(RoundedUpBlockSize()), 356 min_size(2 * round_up), 357 random(0) { 358 freelist.header.size = 0; 359 freelist.header.magic = Magic(kMagicUnallocated, &freelist.header); 360 freelist.header.arena = this; 361 freelist.levels = 0; 362 memset(freelist.next, 0, sizeof(freelist.next)); 363 } 364 365 // L < meta_data_arena->mu 366 LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) { 367 Arena *meta_data_arena = DefaultArena(); 368 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 369 if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { 370 meta_data_arena = UnhookedAsyncSigSafeArena(); 371 } else // NOLINT(readability/braces) 372 #endif 373 if ((flags & LowLevelAlloc::kCallMallocHook) == 0) { 374 meta_data_arena = UnhookedArena(); 375 } 376 Arena *result = 377 new (AllocWithArena(sizeof(*result), meta_data_arena)) Arena(flags); 378 return result; 379 } 380 381 // L < arena->mu, L < arena->arena->mu 382 bool LowLevelAlloc::DeleteArena(Arena *arena) { 383 ABSL_RAW_CHECK( 384 arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(), 385 "may not delete default arena"); 386 ArenaLock section(arena); 387 if (arena->allocation_count != 0) { 388 section.Leave(); 389 return false; 390 } 391 while (arena->freelist.next[0] != nullptr) { 392 AllocList *region = arena->freelist.next[0]; 393 size_t size = region->header.size; 394 arena->freelist.next[0] = region->next[0]; 395 ABSL_RAW_CHECK( 396 region->header.magic == Magic(kMagicUnallocated, ®ion->header), 397 "bad magic number in DeleteArena()"); 398 ABSL_RAW_CHECK(region->header.arena == arena, 399 "bad arena pointer in DeleteArena()"); 400 ABSL_RAW_CHECK(size % arena->pagesize == 0, 401 "empty arena has non-page-aligned block size"); 402 ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0, 403 "empty arena has non-page-aligned block"); 404 int munmap_result; 405 #ifdef _WIN32 406 munmap_result = VirtualFree(region, 0, MEM_RELEASE); 407 ABSL_RAW_CHECK(munmap_result != 0, 408 "LowLevelAlloc::DeleteArena: VitualFree failed"); 409 #else 410 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 411 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) { 412 munmap_result = munmap(region, size); 413 } else { 414 munmap_result = base_internal::DirectMunmap(region, size); 415 } 416 #else 417 munmap_result = munmap(region, size); 418 #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 419 if (munmap_result != 0) { 420 ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d", 421 errno); 422 } 423 #endif // _WIN32 424 } 425 section.Leave(); 426 arena->~Arena(); 427 Free(arena); 428 return true; 429 } 430 431 // --------------------------------------------------------------------------- 432 433 // Addition, checking for overflow. The intent is to die if an external client 434 // manages to push through a request that would cause arithmetic to fail. 435 static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) { 436 uintptr_t sum = a + b; 437 ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow"); 438 return sum; 439 } 440 441 // Return value rounded up to next multiple of align. 442 // align must be a power of two. 443 static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) { 444 return CheckedAdd(addr, align - 1) & ~(align - 1); 445 } 446 447 // Equivalent to "return prev->next[i]" but with sanity checking 448 // that the freelist is in the correct order, that it 449 // consists of regions marked "unallocated", and that no two regions 450 // are adjacent in memory (they should have been coalesced). 451 static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) 452 ABSL_EXCLUSIVE_LOCKS_REQUIRED(arena->mu) { 453 ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()"); 454 AllocList *next = prev->next[i]; 455 if (next != nullptr) { 456 ABSL_RAW_CHECK( 457 next->header.magic == Magic(kMagicUnallocated, &next->header), 458 "bad magic number in Next()"); 459 ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()"); 460 if (prev != &arena->freelist) { 461 ABSL_RAW_CHECK(prev < next, "unordered freelist"); 462 ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size < 463 reinterpret_cast<char *>(next), 464 "malformed freelist"); 465 } 466 } 467 return next; 468 } 469 470 // Coalesce list item "a" with its successor if they are adjacent. 471 static void Coalesce(AllocList *a) { 472 AllocList *n = a->next[0]; 473 if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size == 474 reinterpret_cast<char *>(n)) { 475 LowLevelAlloc::Arena *arena = a->header.arena; 476 arena->mu.AssertHeld(); 477 a->header.size += n->header.size; 478 n->header.magic = 0; 479 n->header.arena = nullptr; 480 AllocList *prev[kMaxLevel]; 481 LLA_SkiplistDelete(&arena->freelist, n, prev); 482 LLA_SkiplistDelete(&arena->freelist, a, prev); 483 a->levels = 484 LLA_SkiplistLevels(a->header.size, arena->min_size, &arena->random); 485 LLA_SkiplistInsert(&arena->freelist, a, prev); 486 } 487 } 488 489 // Adds block at location "v" to the free list 490 static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) 491 ABSL_EXCLUSIVE_LOCKS_REQUIRED(arena->mu) { 492 AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) - 493 sizeof(f->header)); 494 ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), 495 "bad magic number in AddToFreelist()"); 496 ABSL_RAW_CHECK(f->header.arena == arena, 497 "bad arena pointer in AddToFreelist()"); 498 f->levels = 499 LLA_SkiplistLevels(f->header.size, arena->min_size, &arena->random); 500 AllocList *prev[kMaxLevel]; 501 LLA_SkiplistInsert(&arena->freelist, f, prev); 502 f->header.magic = Magic(kMagicUnallocated, &f->header); 503 Coalesce(f); // maybe coalesce with successor 504 Coalesce(prev[0]); // maybe coalesce with predecessor 505 } 506 507 // Frees storage allocated by LowLevelAlloc::Alloc(). 508 // L < arena->mu 509 void LowLevelAlloc::Free(void *v) { 510 if (v != nullptr) { 511 AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) - 512 sizeof(f->header)); 513 LowLevelAlloc::Arena *arena = f->header.arena; 514 ArenaLock section(arena); 515 AddToFreelist(v, arena); 516 ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free"); 517 arena->allocation_count--; 518 section.Leave(); 519 } 520 } 521 522 // allocates and returns a block of size bytes, to be freed with Free() 523 // L < arena->mu 524 static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { 525 void *result = nullptr; 526 if (request != 0) { 527 AllocList *s; // will point to region that satisfies request 528 ArenaLock section(arena); 529 // round up with header 530 size_t req_rnd = 531 RoundUp(CheckedAdd(request, sizeof(s->header)), arena->round_up); 532 for (;;) { // loop until we find a suitable region 533 // find the minimum levels that a block of this size must have 534 int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1; 535 if (i < arena->freelist.levels) { // potential blocks exist 536 AllocList *before = &arena->freelist; // predecessor of s 537 while ((s = Next(i, before, arena)) != nullptr && 538 s->header.size < req_rnd) { 539 before = s; 540 } 541 if (s != nullptr) { // we found a region 542 break; 543 } 544 } 545 // we unlock before mmap() both because mmap() may call a callback hook, 546 // and because it may be slow. 547 arena->mu.Unlock(); 548 // mmap generous 64K chunks to decrease 549 // the chances/impact of fragmentation: 550 size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); 551 void *new_pages; 552 #ifdef _WIN32 553 new_pages = VirtualAlloc(nullptr, new_pages_size, 554 MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); 555 ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed"); 556 #else 557 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 558 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { 559 new_pages = base_internal::DirectMmap(nullptr, new_pages_size, 560 PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 561 } else { 562 new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, 563 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 564 } 565 #else 566 new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, 567 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 568 #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 569 if (new_pages == MAP_FAILED) { 570 ABSL_RAW_LOG(FATAL, "mmap error: %d", errno); 571 } 572 573 #ifdef __linux__ 574 #if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME) 575 // Attempt to name the allocated address range in /proc/$PID/smaps on 576 // Linux. 577 // 578 // This invocation of prctl() may fail if the Linux kernel was not 579 // configured with the CONFIG_ANON_VMA_NAME option. This is OK since 580 // the naming of arenas is primarily a debugging aid. 581 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_pages_size, 582 "absl"); 583 #endif 584 #endif // __linux__ 585 #endif // _WIN32 586 arena->mu.Lock(); 587 s = reinterpret_cast<AllocList *>(new_pages); 588 s->header.size = new_pages_size; 589 // Pretend the block is allocated; call AddToFreelist() to free it. 590 s->header.magic = Magic(kMagicAllocated, &s->header); 591 s->header.arena = arena; 592 AddToFreelist(&s->levels, arena); // insert new region into free list 593 } 594 AllocList *prev[kMaxLevel]; 595 LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list 596 // s points to the first free region that's big enough 597 if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) { 598 // big enough to split 599 AllocList *n = 600 reinterpret_cast<AllocList *>(req_rnd + reinterpret_cast<char *>(s)); 601 n->header.size = s->header.size - req_rnd; 602 n->header.magic = Magic(kMagicAllocated, &n->header); 603 n->header.arena = arena; 604 s->header.size = req_rnd; 605 AddToFreelist(&n->levels, arena); 606 } 607 s->header.magic = Magic(kMagicAllocated, &s->header); 608 ABSL_RAW_CHECK(s->header.arena == arena, ""); 609 arena->allocation_count++; 610 section.Leave(); 611 result = &s->levels; 612 } 613 ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request); 614 return result; 615 } 616 617 void *LowLevelAlloc::Alloc(size_t request) { 618 void *result = DoAllocWithArena(request, DefaultArena()); 619 return result; 620 } 621 622 void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) { 623 ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena"); 624 void *result = DoAllocWithArena(request, arena); 625 return result; 626 } 627 628 } // namespace base_internal 629 ABSL_NAMESPACE_END 630 } // namespace absl 631 632 #endif // ABSL_LOW_LEVEL_ALLOC_MISSING