tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

sctp_process_lock.h (27606B)


      1 /*-
      2 * SPDX-License-Identifier: BSD-3-Clause
      3 *
      4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
      5 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
      6 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
      7 *
      8 * Redistribution and use in source and binary forms, with or without
      9 * modification, are permitted provided that the following conditions are met:
     10 *
     11 * a) Redistributions of source code must retain the above copyright notice,
     12 *   this list of conditions and the following disclaimer.
     13 *
     14 * b) Redistributions in binary form must reproduce the above copyright
     15 *    notice, this list of conditions and the following disclaimer in
     16 *   the documentation and/or other materials provided with the distribution.
     17 *
     18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
     19 *    contributors may be used to endorse or promote products derived
     20 *    from this software without specific prior written permission.
     21 *
     22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     32 * THE POSSIBILITY OF SUCH DAMAGE.
     33 */
     34 #ifndef __sctp_process_lock_h__
     35 #define __sctp_process_lock_h__
     36 
     37 /*
     38 * Need to yet define five atomic fuctions or
     39 * their equivalant.
     40 * - atomic_add_int(&foo, val) - add atomically the value
     41 * - atomic_fetchadd_int(&foo, val) - does same as atomic_add_int
     42 *				      but value it was is returned.
     43 * - atomic_subtract_int(&foo, val) - can be made from atomic_add_int()
     44 *
     45 * - atomic_cmpset_int(&foo, value, newvalue) - Does a set of newvalue
     46 *					        in foo if and only if
     47 *					        foo is value. Returns 0
     48 *					        on success.
     49 */
     50 
     51 #ifdef SCTP_PER_SOCKET_LOCKING
     52 /*
     53 * per socket level locking
     54 */
     55 
     56 #if defined(_WIN32)
     57 /* Lock for INFO stuff */
     58 #define SCTP_INP_INFO_LOCK_INIT()
     59 #define SCTP_INP_INFO_RLOCK()
     60 #define SCTP_INP_INFO_RUNLOCK()
     61 #define SCTP_INP_INFO_WLOCK()
     62 #define SCTP_INP_INFO_WUNLOCK()
     63 #define SCTP_INP_INFO_LOCK_ASSERT()
     64 #define SCTP_INP_INFO_RLOCK_ASSERT()
     65 #define SCTP_INP_INFO_WLOCK_ASSERT()
     66 #define SCTP_INP_INFO_LOCK_DESTROY()
     67 #define SCTP_IPI_COUNT_INIT()
     68 #define SCTP_IPI_COUNT_DESTROY()
     69 #else
     70 #define SCTP_INP_INFO_LOCK_INIT()
     71 #define SCTP_INP_INFO_RLOCK()
     72 #define SCTP_INP_INFO_RUNLOCK()
     73 #define SCTP_INP_INFO_WLOCK()
     74 #define SCTP_INP_INFO_WUNLOCK()
     75 #define SCTP_INP_INFO_LOCK_ASSERT()
     76 #define SCTP_INP_INFO_RLOCK_ASSERT()
     77 #define SCTP_INP_INFO_WLOCK_ASSERT()
     78 #define SCTP_INP_INFO_LOCK_DESTROY()
     79 #define SCTP_IPI_COUNT_INIT()
     80 #define SCTP_IPI_COUNT_DESTROY()
     81 #endif
     82 
     83 /* Lock for INP */
     84 #define SCTP_INP_LOCK_INIT(_inp)
     85 #define SCTP_INP_LOCK_DESTROY(_inp)
     86 
     87 #define SCTP_INP_RLOCK(_inp)
     88 #define SCTP_INP_RUNLOCK(_inp)
     89 #define SCTP_INP_WLOCK(_inp)
     90 #define SCTP_INP_WUNLOCK(_inp)
     91 #define SCTP_INP_RLOCK_ASSERT(_inp)
     92 #define SCTP_INP_WLOCK_ASSERT(_inp)
     93 #define SCTP_INP_INCR_REF(_inp)
     94 #define SCTP_INP_DECR_REF(_inp)
     95 
     96 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp)
     97 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp)
     98 #define SCTP_ASOC_CREATE_LOCK(_inp)
     99 #define SCTP_ASOC_CREATE_UNLOCK(_inp)
    100 
    101 #define SCTP_INP_READ_LOCK_INIT(_inp)
    102 #define SCTP_INP_READ_LOCK_DESTROY(_inp)
    103 #define SCTP_INP_READ_LOCK(_inp)
    104 #define SCTP_INP_READ_UNLOCK(_inp)
    105 #define SCTP_INP_READ_LOCK_ASSERT(_inp)
    106 
    107 /* Lock for TCB */
    108 #define SCTP_TCB_LOCK_INIT(_tcb)
    109 #define SCTP_TCB_LOCK_DESTROY(_tcb)
    110 #define SCTP_TCB_LOCK(_tcb)
    111 #define SCTP_TCB_TRYLOCK(_tcb) 1
    112 #define SCTP_TCB_UNLOCK(_tcb)
    113 #define SCTP_TCB_UNLOCK_IFOWNED(_tcb)
    114 #define SCTP_TCB_LOCK_ASSERT(_tcb)
    115 
    116 #else
    117 /*
    118 * per tcb level locking
    119 */
    120 #define SCTP_IPI_COUNT_INIT()
    121 
    122 #if defined(_WIN32)
    123 #define SCTP_WQ_ADDR_INIT() \
    124 InitializeCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
    125 #define SCTP_WQ_ADDR_DESTROY() \
    126 DeleteCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
    127 #define SCTP_WQ_ADDR_LOCK() \
    128 EnterCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
    129 #define SCTP_WQ_ADDR_UNLOCK() \
    130 LeaveCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
    131 #define SCTP_WQ_ADDR_LOCK_ASSERT()
    132 
    133 #if WINVER < 0x0600
    134 #define SCTP_INP_INFO_LOCK_INIT() \
    135 InitializeCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
    136 #define SCTP_INP_INFO_LOCK_DESTROY() \
    137 DeleteCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
    138 #define SCTP_INP_INFO_RLOCK() \
    139 EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
    140 #define SCTP_INP_INFO_TRYLOCK()	\
    141 TryEnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
    142 #define SCTP_INP_INFO_WLOCK() \
    143 EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
    144 #define SCTP_INP_INFO_RUNLOCK() \
    145 LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
    146 #define SCTP_INP_INFO_WUNLOCK()	\
    147 LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
    148 #define SCTP_INP_INFO_LOCK_ASSERT()
    149 #define SCTP_INP_INFO_RLOCK_ASSERT()
    150 #define SCTP_INP_INFO_WLOCK_ASSERT()
    151 #else
    152 #define SCTP_INP_INFO_LOCK_INIT() \
    153 InitializeSRWLock(&SCTP_BASE_INFO(ipi_ep_mtx))
    154 #define SCTP_INP_INFO_LOCK_DESTROY()
    155 #define SCTP_INP_INFO_RLOCK() \
    156 AcquireSRWLockShared(&SCTP_BASE_INFO(ipi_ep_mtx))
    157 #define SCTP_INP_INFO_TRYLOCK() \
    158 TryAcquireSRWLockShared(&SCTP_BASE_INFO(ipi_ep_mtx))
    159 #define SCTP_INP_INFO_WLOCK() \
    160 AcquireSRWLockExclusive(&SCTP_BASE_INFO(ipi_ep_mtx))
    161 #define SCTP_INP_INFO_RUNLOCK() \
    162 ReleaseSRWLockShared(&SCTP_BASE_INFO(ipi_ep_mtx))
    163 #define SCTP_INP_INFO_WUNLOCK() \
    164 ReleaseSRWLockExclusive(&SCTP_BASE_INFO(ipi_ep_mtx))
    165 #define SCTP_INP_INFO_LOCK_ASSERT()
    166 #define SCTP_INP_INFO_RLOCK_ASSERT()
    167 #define SCTP_INP_INFO_WLOCK_ASSERT()
    168 #endif
    169 
    170 #define SCTP_IP_PKTLOG_INIT() \
    171 InitializeCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
    172 #define SCTP_IP_PKTLOG_DESTROY () \
    173 DeleteCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
    174 #define SCTP_IP_PKTLOG_LOCK() \
    175 EnterCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
    176 #define SCTP_IP_PKTLOG_UNLOCK() \
    177 LeaveCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
    178 
    179 /*
    180 * The INP locks we will use for locking an SCTP endpoint, so for example if
    181 * we want to change something at the endpoint level for example random_store
    182 * or cookie secrets we lock the INP level.
    183 */
    184 #define SCTP_INP_READ_LOCK_INIT(_inp) \
    185 InitializeCriticalSection(&(_inp)->inp_rdata_mtx)
    186 #define SCTP_INP_READ_LOCK_DESTROY(_inp) \
    187 DeleteCriticalSection(&(_inp)->inp_rdata_mtx)
    188 #define SCTP_INP_READ_LOCK(_inp) \
    189 EnterCriticalSection(&(_inp)->inp_rdata_mtx)
    190 #define SCTP_INP_READ_UNLOCK(_inp) \
    191 LeaveCriticalSection(&(_inp)->inp_rdata_mtx)
    192 #define SCTP_INP_READ_LOCK_ASSERT(_inp)
    193 
    194 #define SCTP_INP_LOCK_INIT(_inp) \
    195 InitializeCriticalSection(&(_inp)->inp_mtx)
    196 #define SCTP_INP_LOCK_DESTROY(_inp) \
    197 DeleteCriticalSection(&(_inp)->inp_mtx)
    198 #ifdef SCTP_LOCK_LOGGING
    199 #define SCTP_INP_RLOCK(_inp) do { 						\
    200 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
    201 	sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
    202 	EnterCriticalSection(&(_inp)->inp_mtx);				\
    203 } while (0)
    204 #define SCTP_INP_WLOCK(_inp) do { 						\
    205 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
    206 	sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
    207 EnterCriticalSection(&(_inp)->inp_mtx);					\
    208 } while (0)
    209 #else
    210 #define SCTP_INP_RLOCK(_inp) \
    211 EnterCriticalSection(&(_inp)->inp_mtx)
    212 #define SCTP_INP_WLOCK(_inp) \
    213 EnterCriticalSection(&(_inp)->inp_mtx)
    214 #endif
    215 #define SCTP_INP_RLOCK_ASSERT(_tcb)
    216 #define SCTP_INP_WLOCK_ASSERT(_tcb)
    217 
    218 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
    219 #define SCTP_INP_DECR_REF(_inp) atomic_subtract_int(&((_inp)->refcount), 1)
    220 
    221 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
    222 InitializeCriticalSection(&(_inp)->inp_create_mtx)
    223 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
    224 DeleteCriticalSection(&(_inp)->inp_create_mtx)
    225 #ifdef SCTP_LOCK_LOGGING
    226 #define SCTP_ASOC_CREATE_LOCK(_inp) do {					\
    227 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
    228 	sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);		\
    229 EnterCriticalSection(&(_inp)->inp_create_mtx);				\
    230 } while (0)
    231 #else
    232 #define SCTP_ASOC_CREATE_LOCK(_inp) \
    233 EnterCriticalSection(&(_inp)->inp_create_mtx)
    234 #endif
    235 
    236 #define SCTP_INP_RUNLOCK(_inp) \
    237 LeaveCriticalSection(&(_inp)->inp_mtx)
    238 #define SCTP_INP_WUNLOCK(_inp) \
    239 LeaveCriticalSection(&(_inp)->inp_mtx)
    240 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
    241 LeaveCriticalSection(&(_inp)->inp_create_mtx)
    242 
    243 /*
    244 * For the majority of things (once we have found the association) we will
    245 * lock the actual association mutex. This will protect all the assoiciation
    246 * level queues and streams and such. We will need to lock the socket layer
    247 * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
    248 * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
    249 */
    250 
    251 #define SCTP_TCB_LOCK_INIT(_tcb) \
    252 InitializeCriticalSection(&(_tcb)->tcb_mtx)
    253 #define SCTP_TCB_LOCK_DESTROY(_tcb) \
    254 DeleteCriticalSection(&(_tcb)->tcb_mtx)
    255 #ifdef SCTP_LOCK_LOGGING
    256 #define SCTP_TCB_LOCK(_tcb) do {						\
    257 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
    258 	sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);		\
    259 EnterCriticalSection(&(_tcb)->tcb_mtx);					\
    260 } while (0)
    261 #else
    262 #define SCTP_TCB_LOCK(_tcb) \
    263 EnterCriticalSection(&(_tcb)->tcb_mtx)
    264 #endif
    265 #define SCTP_TCB_TRYLOCK(_tcb) 	((TryEnterCriticalSection(&(_tcb)->tcb_mtx)))
    266 #define SCTP_TCB_UNLOCK(_tcb) \
    267 LeaveCriticalSection(&(_tcb)->tcb_mtx)
    268 #define SCTP_TCB_LOCK_ASSERT(_tcb)
    269 
    270 #else /* all Userspaces except Windows */
    271 #define SCTP_WQ_ADDR_INIT() \
    272 (void)pthread_mutex_init(&SCTP_BASE_INFO(wq_addr_mtx), &SCTP_BASE_VAR(mtx_attr))
    273 #define SCTP_WQ_ADDR_DESTROY() \
    274 (void)pthread_mutex_destroy(&SCTP_BASE_INFO(wq_addr_mtx))
    275 #ifdef INVARIANTS
    276 #define SCTP_WQ_ADDR_LOCK() \
    277 KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s:%d: wq_addr_mtx already locked", __FILE__, __LINE__))
    278 #define SCTP_WQ_ADDR_UNLOCK() \
    279 KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s:%d: wq_addr_mtx not locked", __FILE__, __LINE__))
    280 #else
    281 #define SCTP_WQ_ADDR_LOCK() \
    282 (void)pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx))
    283 #define SCTP_WQ_ADDR_UNLOCK() \
    284 (void)pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx))
    285 #endif
    286 #define SCTP_WQ_ADDR_LOCK_ASSERT() \
    287 KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(wq_addr_mtx)) == EBUSY, ("%s:%d: wq_addr_mtx not locked", __FILE__, __LINE__))
    288 
    289 #define SCTP_INP_INFO_LOCK_INIT() \
    290 (void)pthread_rwlock_init(&SCTP_BASE_INFO(ipi_ep_mtx), &SCTP_BASE_VAR(rwlock_attr))
    291 #define SCTP_INP_INFO_LOCK_DESTROY() \
    292 (void)pthread_rwlock_destroy(&SCTP_BASE_INFO(ipi_ep_mtx))
    293 #ifdef INVARIANTS
    294 #define SCTP_INP_INFO_RLOCK() \
    295 KASSERT(pthread_rwlock_rdlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s%d: ipi_ep_mtx already locked", __FILE__, __LINE__))
    296 #define SCTP_INP_INFO_WLOCK() \
    297 KASSERT(pthread_rwlock_wrlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s:%d: ipi_ep_mtx already locked", __FILE__, __LINE__))
    298 #define SCTP_INP_INFO_RUNLOCK() \
    299 KASSERT(pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s:%d: ipi_ep_mtx not locked", __FILE__, __LINE__))
    300 #define SCTP_INP_INFO_WUNLOCK() \
    301 KASSERT(pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s:%d: ipi_ep_mtx not locked", __FILE__, __LINE__))
    302 #else
    303 #define SCTP_INP_INFO_RLOCK() \
    304 (void)pthread_rwlock_rdlock(&SCTP_BASE_INFO(ipi_ep_mtx))
    305 #define SCTP_INP_INFO_WLOCK() \
    306 (void)pthread_rwlock_wrlock(&SCTP_BASE_INFO(ipi_ep_mtx))
    307 #define SCTP_INP_INFO_RUNLOCK() \
    308 (void)pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
    309 #define SCTP_INP_INFO_WUNLOCK() \
    310 (void)pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
    311 #endif
    312 #define SCTP_INP_INFO_LOCK_ASSERT()
    313 #define SCTP_INP_INFO_RLOCK_ASSERT()
    314 #define SCTP_INP_INFO_WLOCK_ASSERT()
    315 #define SCTP_INP_INFO_TRYLOCK() \
    316 (!(pthread_rwlock_tryrdlock(&SCTP_BASE_INFO(ipi_ep_mtx))))
    317 
    318 #define SCTP_IP_PKTLOG_INIT() \
    319 (void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), &SCTP_BASE_VAR(mtx_attr))
    320 #define SCTP_IP_PKTLOG_DESTROY() \
    321 (void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx))
    322 #ifdef INVARIANTS
    323 #define SCTP_IP_PKTLOG_LOCK() \
    324 KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s:%d: ipi_pktlog_mtx already locked", __FILE__, __LINE__))
    325 #define SCTP_IP_PKTLOG_UNLOCK() \
    326 KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s:%d: ipi_pktlog_mtx not locked", __FILE__, __LINE__))
    327 #else
    328 #define SCTP_IP_PKTLOG_LOCK() \
    329 (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
    330 #define SCTP_IP_PKTLOG_UNLOCK() \
    331 (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
    332 #endif
    333 
    334 
    335 /*
    336 * The INP locks we will use for locking an SCTP endpoint, so for example if
    337 * we want to change something at the endpoint level for example random_store
    338 * or cookie secrets we lock the INP level.
    339 */
    340 #define SCTP_INP_READ_LOCK_INIT(_inp) \
    341 (void)pthread_mutex_init(&(_inp)->inp_rdata_mtx, &SCTP_BASE_VAR(mtx_attr))
    342 #define SCTP_INP_READ_LOCK_DESTROY(_inp) \
    343 (void)pthread_mutex_destroy(&(_inp)->inp_rdata_mtx)
    344 #ifdef INVARIANTS
    345 #define SCTP_INP_READ_LOCK(_inp) \
    346 KASSERT(pthread_mutex_lock(&(_inp)->inp_rdata_mtx) == 0, ("%s:%d: inp_rdata_mtx already locked", __FILE__, __LINE__))
    347 #define SCTP_INP_READ_UNLOCK(_inp) \
    348 KASSERT(pthread_mutex_unlock(&(_inp)->inp_rdata_mtx) == 0, ("%s:%d: inp_rdata_mtx not locked", __FILE__, __LINE__))
    349 #else
    350 #define SCTP_INP_READ_LOCK(_inp) \
    351 (void)pthread_mutex_lock(&(_inp)->inp_rdata_mtx)
    352 #define SCTP_INP_READ_UNLOCK(_inp) \
    353 (void)pthread_mutex_unlock(&(_inp)->inp_rdata_mtx)
    354 #endif
    355 #define SCTP_INP_READ_LOCK_ASSERT(_inp) \
    356 KASSERT(pthread_mutex_trylock(&(_inp)->inp_rdata_mtx) == EBUSY, ("%s:%d: inp_rdata_mtx not locked", __FILE__, __LINE__))
    357 
    358 #define SCTP_INP_LOCK_INIT(_inp) \
    359 (void)pthread_mutex_init(&(_inp)->inp_mtx, &SCTP_BASE_VAR(mtx_attr))
    360 #define SCTP_INP_LOCK_DESTROY(_inp) \
    361 (void)pthread_mutex_destroy(&(_inp)->inp_mtx)
    362 #ifdef INVARIANTS
    363 #ifdef SCTP_LOCK_LOGGING
    364 #define SCTP_INP_RLOCK(_inp) do {											\
    365 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)						\
    366 	sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);								\
    367 KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx already locked", __FILE__, __LINE__));	\
    368 } while (0)
    369 #define SCTP_INP_WLOCK(_inp) do {											\
    370 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)						\
    371 	sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);								\
    372 KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx already locked", __FILE__, __LINE__));	\
    373 } while (0)
    374 #else
    375 #define SCTP_INP_RLOCK(_inp) \
    376 KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx already locked", __FILE__, __LINE__))
    377 #define SCTP_INP_WLOCK(_inp) \
    378 KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx already locked", __FILE__, __LINE__))
    379 #endif
    380 #define SCTP_INP_RUNLOCK(_inp) \
    381 KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx not locked", __FILE__, __LINE__))
    382 #define SCTP_INP_WUNLOCK(_inp) \
    383 KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx not locked", __FILE__, __LINE__))
    384 #else
    385 #ifdef SCTP_LOCK_LOGGING
    386 #define SCTP_INP_RLOCK(_inp) do {						\
    387 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
    388 	sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
    389 (void)pthread_mutex_lock(&(_inp)->inp_mtx);				\
    390 } while (0)
    391 #define SCTP_INP_WLOCK(_inp) do {						\
    392 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
    393 	sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
    394 (void)pthread_mutex_lock(&(_inp)->inp_mtx);				\
    395 } while (0)
    396 #else
    397 #define SCTP_INP_RLOCK(_inp) \
    398 (void)pthread_mutex_lock(&(_inp)->inp_mtx)
    399 #define SCTP_INP_WLOCK(_inp) \
    400 (void)pthread_mutex_lock(&(_inp)->inp_mtx)
    401 #endif
    402 #define SCTP_INP_RUNLOCK(_inp) \
    403 (void)pthread_mutex_unlock(&(_inp)->inp_mtx)
    404 #define SCTP_INP_WUNLOCK(_inp) \
    405 (void)pthread_mutex_unlock(&(_inp)->inp_mtx)
    406 #endif
    407 #define SCTP_INP_RLOCK_ASSERT(_inp) \
    408 KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s:%d: inp_mtx not locked", __FILE__, __LINE__))
    409 #define SCTP_INP_WLOCK_ASSERT(_inp) \
    410 KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s:%d: inp_mtx not locked", __FILE__, __LINE__))
    411 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
    412 #define SCTP_INP_DECR_REF(_inp) atomic_subtract_int(&((_inp)->refcount), 1)
    413 
    414 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
    415 (void)pthread_mutex_init(&(_inp)->inp_create_mtx, &SCTP_BASE_VAR(mtx_attr))
    416 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
    417 (void)pthread_mutex_destroy(&(_inp)->inp_create_mtx)
    418 #ifdef INVARIANTS
    419 #ifdef SCTP_LOCK_LOGGING
    420 #define SCTP_ASOC_CREATE_LOCK(_inp) do {											\
    421 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)							\
    422 	sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);								\
    423 KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s:%d: inp_create_mtx already locked", __FILE__, __LINE__));	\
    424 } while (0)
    425 #else
    426 #define SCTP_ASOC_CREATE_LOCK(_inp) \
    427 KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s:%d: inp_create_mtx already locked", __FILE__, __LINE__))
    428 #endif
    429 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
    430 KASSERT(pthread_mutex_unlock(&(_inp)->inp_create_mtx) == 0, ("%s:%d: inp_create_mtx not locked", __FILE__, __LINE__))
    431 #else
    432 #ifdef SCTP_LOCK_LOGGING
    433 #define SCTP_ASOC_CREATE_LOCK(_inp) do {					\
    434 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
    435 	sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);		\
    436 (void)pthread_mutex_lock(&(_inp)->inp_create_mtx);			\
    437 } while (0)
    438 #else
    439 #define SCTP_ASOC_CREATE_LOCK(_inp) \
    440 (void)pthread_mutex_lock(&(_inp)->inp_create_mtx)
    441 #endif
    442 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
    443 (void)pthread_mutex_unlock(&(_inp)->inp_create_mtx)
    444 #endif
    445 /*
    446 * For the majority of things (once we have found the association) we will
    447 * lock the actual association mutex. This will protect all the assoiciation
    448 * level queues and streams and such. We will need to lock the socket layer
    449 * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
    450 * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
    451 */
    452 
    453 #define SCTP_TCB_LOCK_INIT(_tcb) \
    454 (void)pthread_mutex_init(&(_tcb)->tcb_mtx, &SCTP_BASE_VAR(mtx_attr))
    455 #define SCTP_TCB_LOCK_DESTROY(_tcb) \
    456 (void)pthread_mutex_destroy(&(_tcb)->tcb_mtx)
    457 #ifdef INVARIANTS
    458 #ifdef SCTP_LOCK_LOGGING
    459 #define SCTP_TCB_LOCK(_tcb) do {											\
    460 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) 						\
    461 	sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);							\
    462 KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s:%d: tcb_mtx already locked", __FILE__, __LINE__))	\
    463 } while (0)
    464 #else
    465 #define SCTP_TCB_LOCK(_tcb) \
    466 KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s:%d: tcb_mtx already locked", __FILE__, __LINE__))
    467 #endif
    468 #define SCTP_TCB_UNLOCK(_tcb) \
    469 KASSERT(pthread_mutex_unlock(&(_tcb)->tcb_mtx) == 0, ("%s:%d: tcb_mtx not locked", __FILE__, __LINE__))
    470 #else
    471 #ifdef SCTP_LOCK_LOGGING
    472 #define SCTP_TCB_LOCK(_tcb) do {						\
    473 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) 	\
    474 	sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);		\
    475 (void)pthread_mutex_lock(&(_tcb)->tcb_mtx);				\
    476 } while (0)
    477 #else
    478 #define SCTP_TCB_LOCK(_tcb) \
    479 (void)pthread_mutex_lock(&(_tcb)->tcb_mtx)
    480 #endif
    481 #define SCTP_TCB_UNLOCK(_tcb) (void)pthread_mutex_unlock(&(_tcb)->tcb_mtx)
    482 #endif
    483 #define SCTP_TCB_LOCK_ASSERT(_tcb) \
    484 KASSERT(pthread_mutex_trylock(&(_tcb)->tcb_mtx) == EBUSY, ("%s:%d: tcb_mtx not locked", __FILE__, __LINE__))
    485 #define SCTP_TCB_TRYLOCK(_tcb) (!(pthread_mutex_trylock(&(_tcb)->tcb_mtx)))
    486 #endif
    487 
    488 #endif /* SCTP_PER_SOCKET_LOCKING */
    489 
    490 
    491 /*
    492 * common locks
    493 */
    494 
    495 /* copied over to compile */
    496 #define SCTP_INP_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
    497 #define SCTP_INP_READ_CONTENDED(_inp) (0) /* Don't know if this is possible */
    498 #define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
    499 
    500 /* socket locks */
    501 
    502 #if defined(_WIN32)
    503 #define SOCKBUF_LOCK_ASSERT(_so_buf)
    504 #define SOCKBUF_LOCK(_so_buf) \
    505 EnterCriticalSection(&(_so_buf)->sb_mtx)
    506 #define SOCKBUF_UNLOCK(_so_buf) \
    507 LeaveCriticalSection(&(_so_buf)->sb_mtx)
    508 #define SOCK_LOCK(_so) \
    509 SOCKBUF_LOCK(&(_so)->so_rcv)
    510 #define SOCK_UNLOCK(_so) \
    511 SOCKBUF_UNLOCK(&(_so)->so_rcv)
    512 #else
    513 #define SOCKBUF_LOCK_ASSERT(_so_buf) \
    514 KASSERT(pthread_mutex_trylock(SOCKBUF_MTX(_so_buf)) == EBUSY, ("%s:%d: socket buffer not locked", __FILE__, __LINE__))
    515 #ifdef INVARIANTS
    516 #define SOCKBUF_LOCK(_so_buf) \
    517 KASSERT(pthread_mutex_lock(SOCKBUF_MTX(_so_buf)) == 0, ("%s:%d: sockbuf_mtx already locked", __FILE__, __LINE__))
    518 #define SOCKBUF_UNLOCK(_so_buf) \
    519 KASSERT(pthread_mutex_unlock(SOCKBUF_MTX(_so_buf)) == 0, ("%s:%d: sockbuf_mtx not locked", __FILE__, __LINE__))
    520 #else
    521 #define SOCKBUF_LOCK(_so_buf) \
    522 pthread_mutex_lock(SOCKBUF_MTX(_so_buf))
    523 #define SOCKBUF_UNLOCK(_so_buf) \
    524 pthread_mutex_unlock(SOCKBUF_MTX(_so_buf))
    525 #endif
    526 #define SOCK_LOCK(_so) \
    527 SOCKBUF_LOCK(&(_so)->so_rcv)
    528 #define SOCK_UNLOCK(_so) \
    529 SOCKBUF_UNLOCK(&(_so)->so_rcv)
    530 #endif
    531 
    532 #define SCTP_STATLOG_INIT_LOCK()
    533 #define SCTP_STATLOG_LOCK()
    534 #define SCTP_STATLOG_UNLOCK()
    535 #define SCTP_STATLOG_DESTROY()
    536 
    537 #if defined(_WIN32)
    538 /* address list locks */
    539 #if WINVER < 0x0600
    540 #define SCTP_IPI_ADDR_INIT() \
    541 InitializeCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
    542 #define SCTP_IPI_ADDR_DESTROY() \
    543 DeleteCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
    544 #define SCTP_IPI_ADDR_RLOCK() \
    545 EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
    546 #define SCTP_IPI_ADDR_RUNLOCK() \
    547 LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
    548 #define SCTP_IPI_ADDR_WLOCK() \
    549 EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
    550 #define SCTP_IPI_ADDR_WUNLOCK() \
    551 LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
    552 #define SCTP_IPI_ADDR_LOCK_ASSERT()
    553 #define SCTP_IPI_ADDR_WLOCK_ASSERT()
    554 #else
    555 #define SCTP_IPI_ADDR_INIT() \
    556 InitializeSRWLock(&SCTP_BASE_INFO(ipi_addr_mtx))
    557 #define SCTP_IPI_ADDR_DESTROY()
    558 #define SCTP_IPI_ADDR_RLOCK() \
    559 AcquireSRWLockShared(&SCTP_BASE_INFO(ipi_addr_mtx))
    560 #define SCTP_IPI_ADDR_RUNLOCK() \
    561 ReleaseSRWLockShared(&SCTP_BASE_INFO(ipi_addr_mtx))
    562 #define SCTP_IPI_ADDR_WLOCK() \
    563 AcquireSRWLockExclusive(&SCTP_BASE_INFO(ipi_addr_mtx))
    564 #define SCTP_IPI_ADDR_WUNLOCK() \
    565 ReleaseSRWLockExclusive(&SCTP_BASE_INFO(ipi_addr_mtx))
    566 #define SCTP_IPI_ADDR_LOCK_ASSERT()
    567 #define SCTP_IPI_ADDR_WLOCK_ASSERT()
    568 #endif
    569 
    570 /* iterator locks */
    571 #define SCTP_ITERATOR_LOCK_INIT() \
    572 InitializeCriticalSection(&sctp_it_ctl.it_mtx)
    573 #define SCTP_ITERATOR_LOCK_DESTROY() \
    574 DeleteCriticalSection(&sctp_it_ctl.it_mtx)
    575 #define SCTP_ITERATOR_LOCK() \
    576 	EnterCriticalSection(&sctp_it_ctl.it_mtx)
    577 #define SCTP_ITERATOR_UNLOCK() \
    578 LeaveCriticalSection(&sctp_it_ctl.it_mtx)
    579 
    580 #define SCTP_IPI_ITERATOR_WQ_INIT() \
    581 InitializeCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
    582 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
    583 DeleteCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
    584 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
    585 EnterCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
    586 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
    587 LeaveCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
    588 
    589 #else
    590 /* address list locks */
    591 #define SCTP_IPI_ADDR_INIT() \
    592 (void)pthread_rwlock_init(&SCTP_BASE_INFO(ipi_addr_mtx), &SCTP_BASE_VAR(rwlock_attr))
    593 #define SCTP_IPI_ADDR_DESTROY() \
    594 (void)pthread_rwlock_destroy(&SCTP_BASE_INFO(ipi_addr_mtx))
    595 #ifdef INVARIANTS
    596 #define SCTP_IPI_ADDR_RLOCK() \
    597 KASSERT(pthread_rwlock_rdlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s:%d: ipi_addr_mtx already locked", __FILE__, __LINE__))
    598 #define SCTP_IPI_ADDR_RUNLOCK() \
    599 KASSERT(pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s:%d: ipi_addr_mtx not locked", __FILE__, __LINE__))
    600 #define SCTP_IPI_ADDR_WLOCK() \
    601 KASSERT(pthread_rwlock_wrlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s:%d: ipi_addr_mtx already locked", __FILE__, __LINE__))
    602 #define SCTP_IPI_ADDR_WUNLOCK() \
    603 KASSERT(pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s:%d: ipi_addr_mtx not locked", __FILE__, __LINE__))
    604 #else
    605 #define SCTP_IPI_ADDR_RLOCK() \
    606 (void)pthread_rwlock_rdlock(&SCTP_BASE_INFO(ipi_addr_mtx))
    607 #define SCTP_IPI_ADDR_RUNLOCK() \
    608 (void)pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
    609 #define SCTP_IPI_ADDR_WLOCK() \
    610 (void)pthread_rwlock_wrlock(&SCTP_BASE_INFO(ipi_addr_mtx))
    611 #define SCTP_IPI_ADDR_WUNLOCK() \
    612 (void)pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
    613 #endif
    614 #define SCTP_IPI_ADDR_LOCK_ASSERT()
    615 #define SCTP_IPI_ADDR_WLOCK_ASSERT()
    616 
    617 /* iterator locks */
    618 #define SCTP_ITERATOR_LOCK_INIT() \
    619 (void)pthread_mutex_init(&sctp_it_ctl.it_mtx, &SCTP_BASE_VAR(mtx_attr))
    620 #define SCTP_ITERATOR_LOCK_DESTROY() \
    621 (void)pthread_mutex_destroy(&sctp_it_ctl.it_mtx)
    622 #ifdef INVARIANTS
    623 #define SCTP_ITERATOR_LOCK() \
    624 KASSERT(pthread_mutex_lock(&sctp_it_ctl.it_mtx) == 0, ("%s:%d: it_mtx already locked", __FILE__, __LINE__))
    625 #define SCTP_ITERATOR_UNLOCK() \
    626 KASSERT(pthread_mutex_unlock(&sctp_it_ctl.it_mtx) == 0, ("%s:%d: it_mtx not locked", __FILE__, __LINE__))
    627 #else
    628 #define SCTP_ITERATOR_LOCK() \
    629 (void)pthread_mutex_lock(&sctp_it_ctl.it_mtx)
    630 #define SCTP_ITERATOR_UNLOCK() \
    631 (void)pthread_mutex_unlock(&sctp_it_ctl.it_mtx)
    632 #endif
    633 
    634 #define SCTP_IPI_ITERATOR_WQ_INIT() \
    635 (void)pthread_mutex_init(&sctp_it_ctl.ipi_iterator_wq_mtx, &SCTP_BASE_VAR(mtx_attr))
    636 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
    637 (void)pthread_mutex_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx)
    638 #ifdef INVARIANTS
    639 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
    640 KASSERT(pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s:%d: ipi_iterator_wq_mtx already locked", __FILE__, __LINE__))
    641 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
    642 KASSERT(pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s:%d: ipi_iterator_wq_mtx not locked", __FILE__, __LINE__))
    643 #else
    644 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
    645 (void)pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx)
    646 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
    647 (void)pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx)
    648 #endif
    649 #endif
    650 
    651 #define SCTP_INCR_EP_COUNT() \
    652 atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
    653 
    654 #define SCTP_DECR_EP_COUNT() \
    655 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
    656 
    657 #define SCTP_INCR_ASOC_COUNT() \
    658 atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
    659 
    660 #define SCTP_DECR_ASOC_COUNT() \
    661 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
    662 
    663 #define SCTP_INCR_LADDR_COUNT() \
    664 atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
    665 
    666 #define SCTP_DECR_LADDR_COUNT() \
    667 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
    668 
    669 #define SCTP_INCR_RADDR_COUNT() \
    670 atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
    671 
    672 #define SCTP_DECR_RADDR_COUNT() \
    673 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
    674 
    675 #define SCTP_INCR_CHK_COUNT() \
    676 atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
    677 
    678 #define SCTP_DECR_CHK_COUNT() \
    679 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
    680 
    681 #define SCTP_INCR_READQ_COUNT() \
    682 atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
    683 
    684 #define SCTP_DECR_READQ_COUNT() \
    685 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
    686 
    687 #define SCTP_INCR_STRMOQ_COUNT() \
    688 atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)
    689 
    690 #define SCTP_DECR_STRMOQ_COUNT() \
    691 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)
    692 
    693 #endif