umutex.h (9216B)
1 // © 2016 and later: Unicode, Inc. and others. 2 // License & terms of use: http://www.unicode.org/copyright.html 3 /* 4 ********************************************************************** 5 * Copyright (C) 1997-2015, International Business Machines 6 * Corporation and others. All Rights Reserved. 7 ********************************************************************** 8 * 9 * File UMUTEX.H 10 * 11 * Modification History: 12 * 13 * Date Name Description 14 * 04/02/97 aliu Creation. 15 * 04/07/99 srl rewrite - C interface, multiple mutices 16 * 05/13/99 stephen Changed to umutex (from cmutex) 17 ****************************************************************************** 18 */ 19 20 #ifndef UMUTEX_H 21 #define UMUTEX_H 22 23 #ifndef __wasi__ 24 #include <atomic> 25 #include <condition_variable> 26 #include <mutex> 27 #endif 28 29 #include <type_traits> 30 31 #include "unicode/utypes.h" 32 #include "unicode/uclean.h" 33 #include "unicode/uobject.h" 34 35 #include "putilimp.h" 36 37 #if defined(U_USER_ATOMICS_H) || defined(U_USER_MUTEX_H) 38 // Support for including an alternate implementation of atomic & mutex operations has been withdrawn. 39 // See issue ICU-20185. 40 #error U_USER_ATOMICS and U_USER_MUTEX_H are not supported 41 #endif 42 43 U_NAMESPACE_BEGIN 44 45 /**************************************************************************** 46 * 47 * Low Level Atomic Operations, ICU wrappers for. 48 * 49 ****************************************************************************/ 50 51 #ifndef __wasi__ 52 53 typedef std::atomic<int32_t> u_atomic_int32_t; 54 55 inline int32_t umtx_loadAcquire(u_atomic_int32_t &var) { 56 return var.load(std::memory_order_acquire); 57 } 58 59 inline void umtx_storeRelease(u_atomic_int32_t &var, int32_t val) { 60 var.store(val, std::memory_order_release); 61 } 62 63 inline int32_t umtx_atomic_inc(u_atomic_int32_t *var) { 64 return var->fetch_add(1) + 1; 65 } 66 67 inline int32_t umtx_atomic_dec(u_atomic_int32_t *var) { 68 return var->fetch_sub(1) - 1; 69 } 70 71 #else 72 73 typedef int32_t u_atomic_int32_t; 74 75 inline int32_t umtx_loadAcquire(u_atomic_int32_t &var) { 76 return var; 77 } 78 79 inline void umtx_storeRelease(u_atomic_int32_t &var, int32_t val) { 80 var = val; 81 } 82 83 inline int32_t umtx_atomic_inc(u_atomic_int32_t *var) { 84 return ++(*var); 85 } 86 87 inline int32_t umtx_atomic_dec(u_atomic_int32_t *var) { 88 return --(*var); 89 } 90 91 #endif 92 93 /************************************************************************************************* 94 * 95 * UInitOnce Definitions. 96 * 97 *************************************************************************************************/ 98 99 struct U_COMMON_API_CLASS UInitOnce { 100 private: 101 friend U_COMMON_API UBool U_EXPORT2 umtx_initImplPreInit(UInitOnce&); 102 friend U_COMMON_API void U_EXPORT2 umtx_initImplPostInit(UInitOnce&); 103 template <typename T> friend void umtx_initOnce(UInitOnce&, T*, void (T::*)()); 104 friend void umtx_initOnce(UInitOnce&, void (*)()); 105 friend void umtx_initOnce(UInitOnce&, void (*)(UErrorCode&), UErrorCode&); 106 template <typename T> friend void umtx_initOnce(UInitOnce&, void (*)(T), T); 107 template <typename T> friend void umtx_initOnce(UInitOnce&, void (*)(T, UErrorCode&), T, UErrorCode&); 108 109 u_atomic_int32_t fState{0}; 110 UErrorCode fErrCode{U_ZERO_ERROR}; 111 112 public: 113 U_COMMON_API void reset() { fState = 0; } 114 U_COMMON_API UBool isReset() { return umtx_loadAcquire(fState) == 0; } 115 // Note: isReset() is used by service registration code. 116 // Thread safety of this usage needs review. 117 }; 118 119 U_COMMON_API UBool U_EXPORT2 umtx_initImplPreInit(UInitOnce &); 120 U_COMMON_API void U_EXPORT2 umtx_initImplPostInit(UInitOnce &); 121 122 template<class T> void umtx_initOnce(UInitOnce &uio, T *obj, void (U_CALLCONV T::*fp)()) { 123 if (umtx_loadAcquire(uio.fState) == 2) { 124 return; 125 } 126 if (umtx_initImplPreInit(uio)) { 127 (obj->*fp)(); 128 umtx_initImplPostInit(uio); 129 } 130 } 131 132 133 // umtx_initOnce variant for plain functions, or static class functions. 134 // No context parameter. 135 inline void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)()) { 136 if (umtx_loadAcquire(uio.fState) == 2) { 137 return; 138 } 139 if (umtx_initImplPreInit(uio)) { 140 (*fp)(); 141 umtx_initImplPostInit(uio); 142 } 143 } 144 145 // umtx_initOnce variant for plain functions, or static class functions. 146 // With ErrorCode, No context parameter. 147 inline void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(UErrorCode &), UErrorCode &errCode) { 148 if (U_FAILURE(errCode)) { 149 return; 150 } 151 if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { 152 // We run the initialization. 153 (*fp)(errCode); 154 uio.fErrCode = errCode; 155 umtx_initImplPostInit(uio); 156 } else { 157 // Someone else already ran the initialization. 158 if (U_FAILURE(uio.fErrCode)) { 159 errCode = uio.fErrCode; 160 } 161 } 162 } 163 164 // umtx_initOnce variant for plain functions, or static class functions, 165 // with a context parameter. 166 template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T), T context) { 167 if (umtx_loadAcquire(uio.fState) == 2) { 168 return; 169 } 170 if (umtx_initImplPreInit(uio)) { 171 (*fp)(context); 172 umtx_initImplPostInit(uio); 173 } 174 } 175 176 // umtx_initOnce variant for plain functions, or static class functions, 177 // with a context parameter and an error code. 178 template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { 179 if (U_FAILURE(errCode)) { 180 return; 181 } 182 if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { 183 // We run the initialization. 184 (*fp)(context, errCode); 185 uio.fErrCode = errCode; 186 umtx_initImplPostInit(uio); 187 } else { 188 // Someone else already ran the initialization. 189 if (U_FAILURE(uio.fErrCode)) { 190 errCode = uio.fErrCode; 191 } 192 } 193 } 194 195 // UMutex should be constexpr-constructible, so that no initialization code 196 // is run during startup. 197 // This works on all C++ libraries except MS VS before VS2019. 198 #if (defined(_CPPLIB_VER) && !defined(_MSVC_STL_VERSION)) || \ 199 (defined(_MSVC_STL_VERSION) && _MSVC_STL_VERSION < 142) 200 // (VS std lib older than VS2017) || (VS std lib version < VS2019) 201 # define UMUTEX_CONSTEXPR 202 #else 203 # define UMUTEX_CONSTEXPR constexpr 204 #endif 205 206 /** 207 * UMutex - ICU Mutex class. 208 * 209 * This is the preferred Mutex class for use within ICU implementation code. 210 * It is a thin wrapper over C++ std::mutex, with these additions: 211 * - Static instances are safe, not triggering static construction or destruction, 212 * and the associated order of construction or destruction issues. 213 * - Plumbed into u_cleanup() for destructing the underlying std::mutex, 214 * which frees any OS level resources they may be holding. 215 * 216 * Limitations: 217 * - Static or global instances only. Cannot be heap allocated. Cannot appear as a 218 * member of another class. 219 * - No condition variables or other advanced features. If needed, you will need to use 220 * std::mutex and std::condition_variable directly. For an example, see unifiedcache.cpp 221 * 222 * Typical Usage: 223 * static UMutex myMutex; 224 * 225 * { 226 * Mutex lock(myMutex); 227 * ... // Do stuff that is protected by myMutex; 228 * } // myMutex is released when lock goes out of scope. 229 */ 230 231 class U_COMMON_API_CLASS UMutex { 232 public: 233 U_COMMON_API UMUTEX_CONSTEXPR UMutex() {} 234 U_COMMON_API ~UMutex() = default; 235 236 U_COMMON_API UMutex(const UMutex& other) = delete; 237 U_COMMON_API UMutex& operator=(const UMutex& other) = delete; 238 U_COMMON_API void* operator new(size_t) = delete; 239 240 // requirements for C++ BasicLockable, allows UMutex to work with std::lock_guard 241 U_COMMON_API void lock() { 242 #ifndef __wasi__ 243 std::mutex *m = fMutex.load(std::memory_order_acquire); 244 if (m == nullptr) { m = getMutex(); } 245 m->lock(); 246 #endif 247 } 248 U_COMMON_API void unlock() { 249 #ifndef __wasi__ 250 fMutex.load(std::memory_order_relaxed)->unlock(); 251 #endif 252 } 253 254 U_COMMON_API static void cleanup(); 255 256 private: 257 #ifndef __wasi__ 258 alignas(std::mutex) char fStorage[sizeof(std::mutex)] {}; 259 std::atomic<std::mutex *> fMutex { nullptr }; 260 #endif 261 262 /** All initialized UMutexes are kept in a linked list, so that they can be found, 263 * and the underlying std::mutex destructed, by u_cleanup(). 264 */ 265 UMutex *fListLink { nullptr }; 266 static UMutex *gListHead; 267 268 /** Out-of-line function to lazily initialize a UMutex on first use. 269 * Initial fast check is inline, in lock(). The returned value may never 270 * be nullptr. 271 */ 272 #ifndef __wasi__ 273 std::mutex *getMutex(); 274 #endif 275 }; 276 277 278 /* Lock a mutex. 279 * @param mutex The given mutex to be locked. Pass NULL to specify 280 * the global ICU mutex. Recursive locks are an error 281 * and may cause a deadlock on some platforms. 282 */ 283 U_CAPI void U_EXPORT2 umtx_lock(UMutex* mutex); 284 285 /* Unlock a mutex. 286 * @param mutex The given mutex to be unlocked. Pass NULL to specify 287 * the global ICU mutex. 288 */ 289 U_CAPI void U_EXPORT2 umtx_unlock (UMutex* mutex); 290 291 292 U_NAMESPACE_END 293 294 #endif /* UMUTEX_H */ 295 /*eof*/