tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

create_thread_identity.cc (5895B)


      1 // Copyright 2017 The Abseil Authors.
      2 //
      3 // Licensed under the Apache License, Version 2.0 (the "License");
      4 // you may not use this file except in compliance with the License.
      5 // You may obtain a copy of the License at
      6 //
      7 //      https://www.apache.org/licenses/LICENSE-2.0
      8 //
      9 // Unless required by applicable law or agreed to in writing, software
     10 // distributed under the License is distributed on an "AS IS" BASIS,
     11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 // See the License for the specific language governing permissions and
     13 // limitations under the License.
     14 
     15 #include <stdint.h>
     16 
     17 #include <new>
     18 
     19 // This file is a no-op if the required LowLevelAlloc support is missing.
     20 #include "absl/base/internal/low_level_alloc.h"
     21 #include "absl/synchronization/internal/waiter.h"
     22 #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
     23 
     24 #include <string.h>
     25 
     26 #include "absl/base/attributes.h"
     27 #include "absl/base/internal/spinlock.h"
     28 #include "absl/base/internal/thread_identity.h"
     29 #include "absl/synchronization/internal/per_thread_sem.h"
     30 
     31 namespace absl {
     32 ABSL_NAMESPACE_BEGIN
     33 namespace synchronization_internal {
     34 
     35 // ThreadIdentity storage is persistent, we maintain a free-list of previously
     36 // released ThreadIdentity objects.
     37 ABSL_CONST_INIT static base_internal::SpinLock freelist_lock(
     38    absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
     39 ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist;
     40 
     41 // A per-thread destructor for reclaiming associated ThreadIdentity objects.
     42 // Since we must preserve their storage, we cache them for re-use instead of
     43 // truly destructing the object.
     44 static void ReclaimThreadIdentity(void* v) {
     45  base_internal::ThreadIdentity* identity =
     46      static_cast<base_internal::ThreadIdentity*>(v);
     47 
     48  // all_locks might have been allocated by the Mutex implementation.
     49  // We free it here when we are notified that our thread is dying.
     50  if (identity->per_thread_synch.all_locks != nullptr) {
     51    base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks);
     52  }
     53 
     54  // We must explicitly clear the current thread's identity:
     55  // (a) Subsequent (unrelated) per-thread destructors may require an identity.
     56  //     We must guarantee a new identity is used in this case (this instructor
     57  //     will be reinvoked up to PTHREAD_DESTRUCTOR_ITERATIONS in this case).
     58  // (b) ThreadIdentity implementations may depend on memory that is not
     59  //     reinitialized before reuse.  We must allow explicit clearing of the
     60  //     association state in this case.
     61  base_internal::ClearCurrentThreadIdentity();
     62  {
     63    base_internal::SpinLockHolder l(&freelist_lock);
     64    identity->next = thread_identity_freelist;
     65    thread_identity_freelist = identity;
     66  }
     67 }
     68 
     69 // Return value rounded up to next multiple of align.
     70 // Align must be a power of two.
     71 static intptr_t RoundUp(intptr_t addr, intptr_t align) {
     72  return (addr + align - 1) & ~(align - 1);
     73 }
     74 
     75 void OneTimeInitThreadIdentity(base_internal::ThreadIdentity* identity) {
     76  PerThreadSem::Init(identity);
     77  identity->ticker.store(0, std::memory_order_relaxed);
     78  identity->wait_start.store(0, std::memory_order_relaxed);
     79  identity->is_idle.store(false, std::memory_order_relaxed);
     80 }
     81 
     82 static void ResetThreadIdentityBetweenReuse(
     83    base_internal::ThreadIdentity* identity) {
     84  base_internal::PerThreadSynch* pts = &identity->per_thread_synch;
     85  pts->next = nullptr;
     86  pts->skip = nullptr;
     87  pts->may_skip = false;
     88  pts->waitp = nullptr;
     89  pts->suppress_fatal_errors = false;
     90  pts->readers = 0;
     91  pts->priority = 0;
     92  pts->next_priority_read_cycles = 0;
     93  pts->state.store(base_internal::PerThreadSynch::State::kAvailable,
     94                   std::memory_order_relaxed);
     95  pts->maybe_unlocking = false;
     96  pts->wake = false;
     97  pts->cond_waiter = false;
     98  pts->all_locks = nullptr;
     99  identity->blocked_count_ptr = nullptr;
    100  identity->ticker.store(0, std::memory_order_relaxed);
    101  identity->wait_start.store(0, std::memory_order_relaxed);
    102  identity->is_idle.store(false, std::memory_order_relaxed);
    103  identity->next = nullptr;
    104 }
    105 
    106 static base_internal::ThreadIdentity* NewThreadIdentity() {
    107  base_internal::ThreadIdentity* identity = nullptr;
    108 
    109  {
    110    // Re-use a previously released object if possible.
    111    base_internal::SpinLockHolder l(&freelist_lock);
    112    if (thread_identity_freelist) {
    113      identity = thread_identity_freelist;  // Take list-head.
    114      thread_identity_freelist = thread_identity_freelist->next;
    115    }
    116  }
    117 
    118  if (identity == nullptr) {
    119    // Allocate enough space to align ThreadIdentity to a multiple of
    120    // PerThreadSynch::kAlignment. This space is never released (it is
    121    // added to a freelist by ReclaimThreadIdentity instead).
    122    void* allocation = base_internal::LowLevelAlloc::Alloc(
    123        sizeof(*identity) + base_internal::PerThreadSynch::kAlignment - 1);
    124    // Round up the address to the required alignment.
    125    identity = reinterpret_cast<base_internal::ThreadIdentity*>(
    126        RoundUp(reinterpret_cast<intptr_t>(allocation),
    127                base_internal::PerThreadSynch::kAlignment));
    128    // Note that *identity is never constructed.
    129    // TODO(b/357097463): change this "one time init" to be a proper
    130    // constructor.
    131    OneTimeInitThreadIdentity(identity);
    132  }
    133  ResetThreadIdentityBetweenReuse(identity);
    134 
    135  return identity;
    136 }
    137 
    138 // Allocates and attaches ThreadIdentity object for the calling thread.  Returns
    139 // the new identity.
    140 // REQUIRES: CurrentThreadIdentity(false) == nullptr
    141 base_internal::ThreadIdentity* CreateThreadIdentity() {
    142  base_internal::ThreadIdentity* identity = NewThreadIdentity();
    143  // Associate the value with the current thread, and attach our destructor.
    144  base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity);
    145  return identity;
    146 }
    147 
    148 }  // namespace synchronization_internal
    149 ABSL_NAMESPACE_END
    150 }  // namespace absl
    151 
    152 #endif  // ABSL_LOW_LEVEL_ALLOC_MISSING