spinlock.h (10185B)
1 // 2 // Copyright 2017 The Abseil Authors. 3 // 4 // Licensed under the Apache License, Version 2.0 (the "License"); 5 // you may not use this file except in compliance with the License. 6 // You may obtain a copy of the License at 7 // 8 // https://www.apache.org/licenses/LICENSE-2.0 9 // 10 // Unless required by applicable law or agreed to in writing, software 11 // distributed under the License is distributed on an "AS IS" BASIS, 12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 // See the License for the specific language governing permissions and 14 // limitations under the License. 15 // 16 17 // Most users requiring mutual exclusion should use Mutex. 18 // SpinLock is provided for use in two situations: 19 // - for use by Abseil internal code that Mutex itself depends on 20 // - for async signal safety (see below) 21 22 // SpinLock with a base_internal::SchedulingMode::SCHEDULE_KERNEL_ONLY is async 23 // signal safe. If a spinlock is used within a signal handler, all code that 24 // acquires the lock must ensure that the signal cannot arrive while they are 25 // holding the lock. Typically, this is done by blocking the signal. 26 // 27 // Threads waiting on a SpinLock may be woken in an arbitrary order. 28 29 #ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_ 30 #define ABSL_BASE_INTERNAL_SPINLOCK_H_ 31 32 #include <atomic> 33 #include <cstdint> 34 35 #include "absl/base/attributes.h" 36 #include "absl/base/const_init.h" 37 #include "absl/base/dynamic_annotations.h" 38 #include "absl/base/internal/low_level_scheduling.h" 39 #include "absl/base/internal/raw_logging.h" 40 #include "absl/base/internal/scheduling_mode.h" 41 #include "absl/base/internal/tsan_mutex_interface.h" 42 #include "absl/base/thread_annotations.h" 43 44 namespace tcmalloc { 45 namespace tcmalloc_internal { 46 47 class AllocationGuardSpinLockHolder; 48 49 } // namespace tcmalloc_internal 50 } // namespace tcmalloc 51 52 namespace absl { 53 ABSL_NAMESPACE_BEGIN 54 namespace base_internal { 55 56 class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { 57 public: 58 SpinLock() : lockword_(kSpinLockCooperative) { 59 ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); 60 } 61 62 // Constructors that allow non-cooperative spinlocks to be created for use 63 // inside thread schedulers. Normal clients should not use these. 64 explicit SpinLock(base_internal::SchedulingMode mode); 65 66 // Constructor for global SpinLock instances. See absl/base/const_init.h. 67 constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) 68 : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {} 69 70 // For global SpinLock instances prefer trivial destructor when possible. 71 // Default but non-trivial destructor in some build configurations causes an 72 // extra static initializer. 73 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE 74 ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); } 75 #else 76 ~SpinLock() = default; 77 #endif 78 79 // Acquire this SpinLock. 80 inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { 81 ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); 82 if (!TryLockImpl()) { 83 SlowLock(); 84 } 85 ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); 86 } 87 88 // Try to acquire this SpinLock without blocking and return true if the 89 // acquisition was successful. If the lock was not acquired, false is 90 // returned. If this SpinLock is free at the time of the call, TryLock 91 // will return true with high probability. 92 [[nodiscard]] inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { 93 ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); 94 bool res = TryLockImpl(); 95 ABSL_TSAN_MUTEX_POST_LOCK( 96 this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed), 97 0); 98 return res; 99 } 100 101 // Release this SpinLock, which must be held by the calling thread. 102 inline void Unlock() ABSL_UNLOCK_FUNCTION() { 103 ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); 104 uint32_t lock_value = lockword_.load(std::memory_order_relaxed); 105 lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, 106 std::memory_order_release); 107 108 if ((lock_value & kSpinLockDisabledScheduling) != 0) { 109 base_internal::SchedulingGuard::EnableRescheduling(true); 110 } 111 if ((lock_value & kWaitTimeMask) != 0) { 112 // Collect contentionz profile info, and speed the wakeup of any waiter. 113 // The wait_cycles value indicates how long this thread spent waiting 114 // for the lock. 115 SlowUnlock(lock_value); 116 } 117 ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); 118 } 119 120 // Determine if the lock is held. When the lock is held by the invoking 121 // thread, true will always be returned. Intended to be used as 122 // CHECK(lock.IsHeld()). 123 [[nodiscard]] inline bool IsHeld() const { 124 return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; 125 } 126 127 // Return immediately if this thread holds the SpinLock exclusively. 128 // Otherwise, report an error by crashing with a diagnostic. 129 inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() { 130 if (!IsHeld()) { 131 ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock"); 132 } 133 } 134 135 protected: 136 // These should not be exported except for testing. 137 138 // Store number of cycles between wait_start_time and wait_end_time in a 139 // lock value. 140 static uint32_t EncodeWaitCycles(int64_t wait_start_time, 141 int64_t wait_end_time); 142 143 // Extract number of wait cycles in a lock value. 144 static int64_t DecodeWaitCycles(uint32_t lock_value); 145 146 // Provide access to protected method above. Use for testing only. 147 friend struct SpinLockTest; 148 friend class tcmalloc::tcmalloc_internal::AllocationGuardSpinLockHolder; 149 150 private: 151 // lockword_ is used to store the following: 152 // 153 // bit[0] encodes whether a lock is being held. 154 // bit[1] encodes whether a lock uses cooperative scheduling. 155 // bit[2] encodes whether the current lock holder disabled scheduling when 156 // acquiring the lock. Only set when kSpinLockHeld is also set. 157 // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. 158 // This is set by the lock holder to indicate how long it waited on 159 // the lock before eventually acquiring it. The number of cycles is 160 // encoded as a 29-bit unsigned int, or in the case that the current 161 // holder did not wait but another waiter is queued, the LSB 162 // (kSpinLockSleeper) is set. The implementation does not explicitly 163 // track the number of queued waiters beyond this. It must always be 164 // assumed that waiters may exist if the current holder was required to 165 // queue. 166 // 167 // Invariant: if the lock is not held, the value is either 0 or 168 // kSpinLockCooperative. 169 static constexpr uint32_t kSpinLockHeld = 1; 170 static constexpr uint32_t kSpinLockCooperative = 2; 171 static constexpr uint32_t kSpinLockDisabledScheduling = 4; 172 static constexpr uint32_t kSpinLockSleeper = 8; 173 // Includes kSpinLockSleeper. 174 static constexpr uint32_t kWaitTimeMask = 175 ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling); 176 177 // Returns true if the provided scheduling mode is cooperative. 178 static constexpr bool IsCooperative( 179 base_internal::SchedulingMode scheduling_mode) { 180 return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; 181 } 182 183 bool IsCooperative() const { 184 return lockword_.load(std::memory_order_relaxed) & kSpinLockCooperative; 185 } 186 187 uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); 188 void SlowLock() ABSL_ATTRIBUTE_COLD; 189 void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; 190 uint32_t SpinLoop(); 191 192 inline bool TryLockImpl() { 193 uint32_t lock_value = lockword_.load(std::memory_order_relaxed); 194 return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0; 195 } 196 197 std::atomic<uint32_t> lockword_; 198 199 SpinLock(const SpinLock&) = delete; 200 SpinLock& operator=(const SpinLock&) = delete; 201 }; 202 203 // Corresponding locker object that arranges to acquire a spinlock for 204 // the duration of a C++ scope. 205 class ABSL_SCOPED_LOCKABLE [[nodiscard]] SpinLockHolder { 206 public: 207 inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) 208 : lock_(l) { 209 l->Lock(); 210 } 211 inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); } 212 213 SpinLockHolder(const SpinLockHolder&) = delete; 214 SpinLockHolder& operator=(const SpinLockHolder&) = delete; 215 216 private: 217 SpinLock* lock_; 218 }; 219 220 // Register a hook for profiling support. 221 // 222 // The function pointer registered here will be called whenever a spinlock is 223 // contended. The callback is given an opaque handle to the contended spinlock 224 // and the number of wait cycles. This is thread-safe, but only a single 225 // profiler can be registered. It is an error to call this function multiple 226 // times with different arguments. 227 void RegisterSpinLockProfiler(void (*fn)(const void* lock, 228 int64_t wait_cycles)); 229 230 //------------------------------------------------------------------------------ 231 // Public interface ends here. 232 //------------------------------------------------------------------------------ 233 234 // If (result & kSpinLockHeld) == 0, then *this was successfully locked. 235 // Otherwise, returns last observed value for lockword_. 236 inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, 237 uint32_t wait_cycles) { 238 if ((lock_value & kSpinLockHeld) != 0) { 239 return lock_value; 240 } 241 242 uint32_t sched_disabled_bit = 0; 243 if ((lock_value & kSpinLockCooperative) == 0) { 244 // For non-cooperative locks we must make sure we mark ourselves as 245 // non-reschedulable before we attempt to CompareAndSwap. 246 if (base_internal::SchedulingGuard::DisableRescheduling()) { 247 sched_disabled_bit = kSpinLockDisabledScheduling; 248 } 249 } 250 251 if (!lockword_.compare_exchange_strong( 252 lock_value, 253 kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, 254 std::memory_order_acquire, std::memory_order_relaxed)) { 255 base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); 256 } 257 258 return lock_value; 259 } 260 261 } // namespace base_internal 262 ABSL_NAMESPACE_END 263 } // namespace absl 264 265 #endif // ABSL_BASE_INTERNAL_SPINLOCK_H_