sctputil.c (252186B)
1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <netinet/sctp_os.h> 36 #include <netinet/sctp_pcb.h> 37 #include <netinet/sctputil.h> 38 #include <netinet/sctp_var.h> 39 #include <netinet/sctp_sysctl.h> 40 #ifdef INET6 41 #if defined(__Userspace__) || defined(__FreeBSD__) 42 #include <netinet6/sctp6_var.h> 43 #endif 44 #endif 45 #include <netinet/sctp_header.h> 46 #include <netinet/sctp_output.h> 47 #include <netinet/sctp_uio.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_indata.h> 50 #include <netinet/sctp_auth.h> 51 #include <netinet/sctp_asconf.h> 52 #include <netinet/sctp_bsd_addr.h> 53 #if defined(__Userspace__) 54 #include <netinet/sctp_constants.h> 55 #endif 56 #if defined(__FreeBSD__) && !defined(__Userspace__) 57 #include <netinet/sctp_kdtrace.h> 58 #if defined(INET6) || defined(INET) 59 #include <netinet/tcp_var.h> 60 #endif 61 #include <netinet/udp.h> 62 #include <netinet/udp_var.h> 63 #include <sys/proc.h> 64 #ifdef INET6 65 #include <netinet/icmp6.h> 66 #endif 67 #endif 68 69 #if defined(_WIN32) && !defined(__Userspace__) 70 #if !defined(SCTP_LOCAL_TRACE_BUF) 71 #include "eventrace_netinet.h" 72 #include "sctputil.tmh" /* this is the file that will be auto generated */ 73 #endif 74 #else 75 #ifndef KTR_SCTP 76 #define KTR_SCTP KTR_SUBSYS 77 #endif 78 #endif 79 80 #if defined(_WIN32) && !defined(_MSC_VER) 81 #include <minmax.h> 82 #endif 83 84 extern const struct sctp_cc_functions sctp_cc_functions[]; 85 extern const struct sctp_ss_functions sctp_ss_functions[]; 86 87 void 88 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 89 { 90 #if defined(SCTP_LOCAL_TRACE_BUF) 91 struct sctp_cwnd_log sctp_clog; 92 93 sctp_clog.x.sb.stcb = stcb; 94 sctp_clog.x.sb.so_sbcc = SCTP_SBAVAIL(sb); 95 if (stcb) 96 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 97 else 98 sctp_clog.x.sb.stcb_sbcc = 0; 99 sctp_clog.x.sb.incr = incr; 100 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 101 SCTP_LOG_EVENT_SB, 102 from, 103 sctp_clog.x.misc.log1, 104 sctp_clog.x.misc.log2, 105 sctp_clog.x.misc.log3, 106 sctp_clog.x.misc.log4); 107 #endif 108 } 109 110 void 111 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 112 { 113 #if defined(SCTP_LOCAL_TRACE_BUF) 114 struct sctp_cwnd_log sctp_clog; 115 116 sctp_clog.x.close.inp = (void *)inp; 117 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 118 if (stcb) { 119 sctp_clog.x.close.stcb = (void *)stcb; 120 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 121 } else { 122 sctp_clog.x.close.stcb = 0; 123 sctp_clog.x.close.state = 0; 124 } 125 sctp_clog.x.close.loc = loc; 126 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 127 SCTP_LOG_EVENT_CLOSE, 128 0, 129 sctp_clog.x.misc.log1, 130 sctp_clog.x.misc.log2, 131 sctp_clog.x.misc.log3, 132 sctp_clog.x.misc.log4); 133 #endif 134 } 135 136 void 137 rto_logging(struct sctp_nets *net, int from) 138 { 139 #if defined(SCTP_LOCAL_TRACE_BUF) 140 struct sctp_cwnd_log sctp_clog; 141 142 memset(&sctp_clog, 0, sizeof(sctp_clog)); 143 sctp_clog.x.rto.net = (void *) net; 144 sctp_clog.x.rto.rtt = net->rtt / 1000; 145 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 146 SCTP_LOG_EVENT_RTT, 147 from, 148 sctp_clog.x.misc.log1, 149 sctp_clog.x.misc.log2, 150 sctp_clog.x.misc.log3, 151 sctp_clog.x.misc.log4); 152 #endif 153 } 154 155 void 156 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 157 { 158 #if defined(SCTP_LOCAL_TRACE_BUF) 159 struct sctp_cwnd_log sctp_clog; 160 161 sctp_clog.x.strlog.stcb = stcb; 162 sctp_clog.x.strlog.n_tsn = tsn; 163 sctp_clog.x.strlog.n_sseq = sseq; 164 sctp_clog.x.strlog.e_tsn = 0; 165 sctp_clog.x.strlog.e_sseq = 0; 166 sctp_clog.x.strlog.strm = stream; 167 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 168 SCTP_LOG_EVENT_STRM, 169 from, 170 sctp_clog.x.misc.log1, 171 sctp_clog.x.misc.log2, 172 sctp_clog.x.misc.log3, 173 sctp_clog.x.misc.log4); 174 #endif 175 } 176 177 void 178 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 179 { 180 #if defined(SCTP_LOCAL_TRACE_BUF) 181 struct sctp_cwnd_log sctp_clog; 182 183 sctp_clog.x.nagle.stcb = (void *)stcb; 184 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 185 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 186 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 187 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 188 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 189 SCTP_LOG_EVENT_NAGLE, 190 action, 191 sctp_clog.x.misc.log1, 192 sctp_clog.x.misc.log2, 193 sctp_clog.x.misc.log3, 194 sctp_clog.x.misc.log4); 195 #endif 196 } 197 198 void 199 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 200 { 201 #if defined(SCTP_LOCAL_TRACE_BUF) 202 struct sctp_cwnd_log sctp_clog; 203 204 sctp_clog.x.sack.cumack = cumack; 205 sctp_clog.x.sack.oldcumack = old_cumack; 206 sctp_clog.x.sack.tsn = tsn; 207 sctp_clog.x.sack.numGaps = gaps; 208 sctp_clog.x.sack.numDups = dups; 209 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 210 SCTP_LOG_EVENT_SACK, 211 from, 212 sctp_clog.x.misc.log1, 213 sctp_clog.x.misc.log2, 214 sctp_clog.x.misc.log3, 215 sctp_clog.x.misc.log4); 216 #endif 217 } 218 219 void 220 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 221 { 222 #if defined(SCTP_LOCAL_TRACE_BUF) 223 struct sctp_cwnd_log sctp_clog; 224 225 memset(&sctp_clog, 0, sizeof(sctp_clog)); 226 sctp_clog.x.map.base = map; 227 sctp_clog.x.map.cum = cum; 228 sctp_clog.x.map.high = high; 229 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 230 SCTP_LOG_EVENT_MAP, 231 from, 232 sctp_clog.x.misc.log1, 233 sctp_clog.x.misc.log2, 234 sctp_clog.x.misc.log3, 235 sctp_clog.x.misc.log4); 236 #endif 237 } 238 239 void 240 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 241 { 242 #if defined(SCTP_LOCAL_TRACE_BUF) 243 struct sctp_cwnd_log sctp_clog; 244 245 memset(&sctp_clog, 0, sizeof(sctp_clog)); 246 sctp_clog.x.fr.largest_tsn = biggest_tsn; 247 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 248 sctp_clog.x.fr.tsn = tsn; 249 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 250 SCTP_LOG_EVENT_FR, 251 from, 252 sctp_clog.x.misc.log1, 253 sctp_clog.x.misc.log2, 254 sctp_clog.x.misc.log3, 255 sctp_clog.x.misc.log4); 256 #endif 257 } 258 259 #ifdef SCTP_MBUF_LOGGING 260 void 261 sctp_log_mb(struct mbuf *m, int from) 262 { 263 #if defined(SCTP_LOCAL_TRACE_BUF) 264 struct sctp_cwnd_log sctp_clog; 265 266 sctp_clog.x.mb.mp = m; 267 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 268 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 269 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 270 if (SCTP_BUF_IS_EXTENDED(m)) { 271 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 272 #if defined(__APPLE__) && !defined(__Userspace__) 273 /* APPLE does not use a ref_cnt, but a forward/backward ref queue */ 274 #else 275 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 276 #endif 277 } else { 278 sctp_clog.x.mb.ext = 0; 279 sctp_clog.x.mb.refcnt = 0; 280 } 281 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 282 SCTP_LOG_EVENT_MBUF, 283 from, 284 sctp_clog.x.misc.log1, 285 sctp_clog.x.misc.log2, 286 sctp_clog.x.misc.log3, 287 sctp_clog.x.misc.log4); 288 #endif 289 } 290 291 void 292 sctp_log_mbc(struct mbuf *m, int from) 293 { 294 struct mbuf *mat; 295 296 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 297 sctp_log_mb(mat, from); 298 } 299 } 300 #endif 301 302 void 303 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 304 { 305 #if defined(SCTP_LOCAL_TRACE_BUF) 306 struct sctp_cwnd_log sctp_clog; 307 308 if (control == NULL) { 309 SCTP_PRINTF("Gak log of NULL?\n"); 310 return; 311 } 312 sctp_clog.x.strlog.stcb = control->stcb; 313 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 314 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 315 sctp_clog.x.strlog.strm = control->sinfo_stream; 316 if (poschk != NULL) { 317 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 318 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 319 } else { 320 sctp_clog.x.strlog.e_tsn = 0; 321 sctp_clog.x.strlog.e_sseq = 0; 322 } 323 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 324 SCTP_LOG_EVENT_STRM, 325 from, 326 sctp_clog.x.misc.log1, 327 sctp_clog.x.misc.log2, 328 sctp_clog.x.misc.log3, 329 sctp_clog.x.misc.log4); 330 #endif 331 } 332 333 void 334 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 335 { 336 #if defined(SCTP_LOCAL_TRACE_BUF) 337 struct sctp_cwnd_log sctp_clog; 338 339 sctp_clog.x.cwnd.net = net; 340 if (stcb->asoc.send_queue_cnt > 255) 341 sctp_clog.x.cwnd.cnt_in_send = 255; 342 else 343 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 344 if (stcb->asoc.stream_queue_cnt > 255) 345 sctp_clog.x.cwnd.cnt_in_str = 255; 346 else 347 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 348 349 if (net) { 350 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 351 sctp_clog.x.cwnd.inflight = net->flight_size; 352 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 353 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 354 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 355 } 356 if (SCTP_CWNDLOG_PRESEND == from) { 357 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 358 } 359 sctp_clog.x.cwnd.cwnd_augment = augment; 360 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 361 SCTP_LOG_EVENT_CWND, 362 from, 363 sctp_clog.x.misc.log1, 364 sctp_clog.x.misc.log2, 365 sctp_clog.x.misc.log3, 366 sctp_clog.x.misc.log4); 367 #endif 368 } 369 370 #if !defined(__APPLE__) && !defined(__Userspace__) 371 void 372 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 373 { 374 #if defined(SCTP_LOCAL_TRACE_BUF) 375 struct sctp_cwnd_log sctp_clog; 376 377 memset(&sctp_clog, 0, sizeof(sctp_clog)); 378 if (inp) { 379 sctp_clog.x.lock.sock = (void *) inp->sctp_socket; 380 381 } else { 382 sctp_clog.x.lock.sock = (void *) NULL; 383 } 384 sctp_clog.x.lock.inp = (void *) inp; 385 #if defined(__FreeBSD__) 386 if (stcb) { 387 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 388 } else { 389 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 390 } 391 if (inp) { 392 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 393 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 394 } else { 395 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 396 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 397 } 398 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 399 if (inp && (inp->sctp_socket)) { 400 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 401 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 402 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 403 } else { 404 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 405 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 406 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 407 } 408 #endif 409 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 410 SCTP_LOG_LOCK_EVENT, 411 from, 412 sctp_clog.x.misc.log1, 413 sctp_clog.x.misc.log2, 414 sctp_clog.x.misc.log3, 415 sctp_clog.x.misc.log4); 416 #endif 417 } 418 #endif 419 420 void 421 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 422 { 423 #if defined(SCTP_LOCAL_TRACE_BUF) 424 struct sctp_cwnd_log sctp_clog; 425 426 memset(&sctp_clog, 0, sizeof(sctp_clog)); 427 sctp_clog.x.cwnd.net = net; 428 sctp_clog.x.cwnd.cwnd_new_value = error; 429 sctp_clog.x.cwnd.inflight = net->flight_size; 430 sctp_clog.x.cwnd.cwnd_augment = burst; 431 if (stcb->asoc.send_queue_cnt > 255) 432 sctp_clog.x.cwnd.cnt_in_send = 255; 433 else 434 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 435 if (stcb->asoc.stream_queue_cnt > 255) 436 sctp_clog.x.cwnd.cnt_in_str = 255; 437 else 438 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 439 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 440 SCTP_LOG_EVENT_MAXBURST, 441 from, 442 sctp_clog.x.misc.log1, 443 sctp_clog.x.misc.log2, 444 sctp_clog.x.misc.log3, 445 sctp_clog.x.misc.log4); 446 #endif 447 } 448 449 void 450 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 451 { 452 #if defined(SCTP_LOCAL_TRACE_BUF) 453 struct sctp_cwnd_log sctp_clog; 454 455 sctp_clog.x.rwnd.rwnd = peers_rwnd; 456 sctp_clog.x.rwnd.send_size = snd_size; 457 sctp_clog.x.rwnd.overhead = overhead; 458 sctp_clog.x.rwnd.new_rwnd = 0; 459 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 460 SCTP_LOG_EVENT_RWND, 461 from, 462 sctp_clog.x.misc.log1, 463 sctp_clog.x.misc.log2, 464 sctp_clog.x.misc.log3, 465 sctp_clog.x.misc.log4); 466 #endif 467 } 468 469 void 470 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 471 { 472 #if defined(SCTP_LOCAL_TRACE_BUF) 473 struct sctp_cwnd_log sctp_clog; 474 475 sctp_clog.x.rwnd.rwnd = peers_rwnd; 476 sctp_clog.x.rwnd.send_size = flight_size; 477 sctp_clog.x.rwnd.overhead = overhead; 478 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 479 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 480 SCTP_LOG_EVENT_RWND, 481 from, 482 sctp_clog.x.misc.log1, 483 sctp_clog.x.misc.log2, 484 sctp_clog.x.misc.log3, 485 sctp_clog.x.misc.log4); 486 #endif 487 } 488 489 #ifdef SCTP_MBCNT_LOGGING 490 static void 491 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 492 { 493 #if defined(SCTP_LOCAL_TRACE_BUF) 494 struct sctp_cwnd_log sctp_clog; 495 496 sctp_clog.x.mbcnt.total_queue_size = total_oq; 497 sctp_clog.x.mbcnt.size_change = book; 498 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 499 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 500 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 501 SCTP_LOG_EVENT_MBCNT, 502 from, 503 sctp_clog.x.misc.log1, 504 sctp_clog.x.misc.log2, 505 sctp_clog.x.misc.log3, 506 sctp_clog.x.misc.log4); 507 #endif 508 } 509 #endif 510 511 void 512 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 513 { 514 #if defined(SCTP_LOCAL_TRACE_BUF) 515 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 516 SCTP_LOG_MISC_EVENT, 517 from, 518 a, b, c, d); 519 #endif 520 } 521 522 void 523 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 524 { 525 #if defined(SCTP_LOCAL_TRACE_BUF) 526 struct sctp_cwnd_log sctp_clog; 527 528 sctp_clog.x.wake.stcb = (void *)stcb; 529 sctp_clog.x.wake.wake_cnt = wake_cnt; 530 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 531 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 532 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 533 534 if (stcb->asoc.stream_queue_cnt < 0xff) 535 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 536 else 537 sctp_clog.x.wake.stream_qcnt = 0xff; 538 539 if (stcb->asoc.chunks_on_out_queue < 0xff) 540 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 541 else 542 sctp_clog.x.wake.chunks_on_oque = 0xff; 543 544 sctp_clog.x.wake.sctpflags = 0; 545 /* set in the defered mode stuff */ 546 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 547 sctp_clog.x.wake.sctpflags |= 1; 548 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 549 sctp_clog.x.wake.sctpflags |= 2; 550 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 551 sctp_clog.x.wake.sctpflags |= 4; 552 /* what about the sb */ 553 if (stcb->sctp_socket) { 554 struct socket *so = stcb->sctp_socket; 555 556 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 557 } else { 558 sctp_clog.x.wake.sbflags = 0xff; 559 } 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_WAKE, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 void 571 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 572 { 573 #if defined(SCTP_LOCAL_TRACE_BUF) 574 struct sctp_cwnd_log sctp_clog; 575 576 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 577 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 578 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 579 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 580 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 581 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024); 582 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 583 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 584 SCTP_LOG_EVENT_BLOCK, 585 from, 586 sctp_clog.x.misc.log1, 587 sctp_clog.x.misc.log2, 588 sctp_clog.x.misc.log3, 589 sctp_clog.x.misc.log4); 590 #endif 591 } 592 593 int 594 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 595 { 596 /* May need to fix this if ktrdump does not work */ 597 return (0); 598 } 599 600 #ifdef SCTP_AUDITING_ENABLED 601 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 602 static int sctp_audit_indx = 0; 603 604 static 605 void 606 sctp_print_audit_report(void) 607 { 608 int i; 609 int cnt; 610 611 cnt = 0; 612 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 613 if ((sctp_audit_data[i][0] == 0xe0) && 614 (sctp_audit_data[i][1] == 0x01)) { 615 cnt = 0; 616 SCTP_PRINTF("\n"); 617 } else if (sctp_audit_data[i][0] == 0xf0) { 618 cnt = 0; 619 SCTP_PRINTF("\n"); 620 } else if ((sctp_audit_data[i][0] == 0xc0) && 621 (sctp_audit_data[i][1] == 0x01)) { 622 SCTP_PRINTF("\n"); 623 cnt = 0; 624 } 625 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 626 (uint32_t) sctp_audit_data[i][1]); 627 cnt++; 628 if ((cnt % 14) == 0) 629 SCTP_PRINTF("\n"); 630 } 631 for (i = 0; i < sctp_audit_indx; i++) { 632 if ((sctp_audit_data[i][0] == 0xe0) && 633 (sctp_audit_data[i][1] == 0x01)) { 634 cnt = 0; 635 SCTP_PRINTF("\n"); 636 } else if (sctp_audit_data[i][0] == 0xf0) { 637 cnt = 0; 638 SCTP_PRINTF("\n"); 639 } else if ((sctp_audit_data[i][0] == 0xc0) && 640 (sctp_audit_data[i][1] == 0x01)) { 641 SCTP_PRINTF("\n"); 642 cnt = 0; 643 } 644 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 645 (uint32_t) sctp_audit_data[i][1]); 646 cnt++; 647 if ((cnt % 14) == 0) 648 SCTP_PRINTF("\n"); 649 } 650 SCTP_PRINTF("\n"); 651 } 652 653 void 654 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 655 struct sctp_nets *net) 656 { 657 int resend_cnt, tot_out, rep, tot_book_cnt; 658 struct sctp_nets *lnet; 659 struct sctp_tmit_chunk *chk; 660 661 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 662 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 663 sctp_audit_indx++; 664 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 665 sctp_audit_indx = 0; 666 } 667 if (inp == NULL) { 668 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 669 sctp_audit_data[sctp_audit_indx][1] = 0x01; 670 sctp_audit_indx++; 671 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 672 sctp_audit_indx = 0; 673 } 674 return; 675 } 676 if (stcb == NULL) { 677 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 678 sctp_audit_data[sctp_audit_indx][1] = 0x02; 679 sctp_audit_indx++; 680 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 681 sctp_audit_indx = 0; 682 } 683 return; 684 } 685 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 686 sctp_audit_data[sctp_audit_indx][1] = 687 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 688 sctp_audit_indx++; 689 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 690 sctp_audit_indx = 0; 691 } 692 rep = 0; 693 tot_book_cnt = 0; 694 resend_cnt = tot_out = 0; 695 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 696 if (chk->sent == SCTP_DATAGRAM_RESEND) { 697 resend_cnt++; 698 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 699 tot_out += chk->book_size; 700 tot_book_cnt++; 701 } 702 } 703 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 704 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 705 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 706 sctp_audit_indx++; 707 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 708 sctp_audit_indx = 0; 709 } 710 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 711 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 712 rep = 1; 713 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 714 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 715 sctp_audit_data[sctp_audit_indx][1] = 716 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 717 sctp_audit_indx++; 718 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 719 sctp_audit_indx = 0; 720 } 721 } 722 if (tot_out != stcb->asoc.total_flight) { 723 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 724 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 725 sctp_audit_indx++; 726 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 727 sctp_audit_indx = 0; 728 } 729 rep = 1; 730 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 731 (int)stcb->asoc.total_flight); 732 stcb->asoc.total_flight = tot_out; 733 } 734 if (tot_book_cnt != stcb->asoc.total_flight_count) { 735 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 736 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 737 sctp_audit_indx++; 738 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 739 sctp_audit_indx = 0; 740 } 741 rep = 1; 742 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 743 744 stcb->asoc.total_flight_count = tot_book_cnt; 745 } 746 tot_out = 0; 747 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 748 tot_out += lnet->flight_size; 749 } 750 if (tot_out != stcb->asoc.total_flight) { 751 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 752 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 753 sctp_audit_indx++; 754 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 755 sctp_audit_indx = 0; 756 } 757 rep = 1; 758 SCTP_PRINTF("real flight:%d net total was %d\n", 759 stcb->asoc.total_flight, tot_out); 760 /* now corrective action */ 761 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 762 tot_out = 0; 763 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 764 if ((chk->whoTo == lnet) && 765 (chk->sent < SCTP_DATAGRAM_RESEND)) { 766 tot_out += chk->book_size; 767 } 768 } 769 if (lnet->flight_size != tot_out) { 770 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 771 (void *)lnet, lnet->flight_size, 772 tot_out); 773 lnet->flight_size = tot_out; 774 } 775 } 776 } 777 if (rep) { 778 sctp_print_audit_report(); 779 } 780 } 781 782 void 783 sctp_audit_log(uint8_t ev, uint8_t fd) 784 { 785 786 sctp_audit_data[sctp_audit_indx][0] = ev; 787 sctp_audit_data[sctp_audit_indx][1] = fd; 788 sctp_audit_indx++; 789 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 790 sctp_audit_indx = 0; 791 } 792 } 793 794 #endif 795 796 /* 797 * The conversion from time to ticks and vice versa is done by rounding 798 * upwards. This way we can test in the code the time to be positive and 799 * know that this corresponds to a positive number of ticks. 800 */ 801 802 uint32_t 803 sctp_msecs_to_ticks(uint32_t msecs) 804 { 805 uint64_t temp; 806 uint32_t ticks; 807 808 if (hz == 1000) { 809 ticks = msecs; 810 } else { 811 temp = (((uint64_t)msecs * hz) + 999) / 1000; 812 if (temp > UINT32_MAX) { 813 ticks = UINT32_MAX; 814 } else { 815 ticks = (uint32_t)temp; 816 } 817 } 818 return (ticks); 819 } 820 821 uint32_t 822 sctp_ticks_to_msecs(uint32_t ticks) 823 { 824 uint64_t temp; 825 uint32_t msecs; 826 827 if (hz == 1000) { 828 msecs = ticks; 829 } else { 830 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 831 if (temp > UINT32_MAX) { 832 msecs = UINT32_MAX; 833 } else { 834 msecs = (uint32_t)temp; 835 } 836 } 837 return (msecs); 838 } 839 840 uint32_t 841 sctp_secs_to_ticks(uint32_t secs) 842 { 843 uint64_t temp; 844 uint32_t ticks; 845 846 temp = (uint64_t)secs * hz; 847 if (temp > UINT32_MAX) { 848 ticks = UINT32_MAX; 849 } else { 850 ticks = (uint32_t)temp; 851 } 852 return (ticks); 853 } 854 855 uint32_t 856 sctp_ticks_to_secs(uint32_t ticks) 857 { 858 uint64_t temp; 859 uint32_t secs; 860 861 temp = ((uint64_t)ticks + (hz - 1)) / hz; 862 if (temp > UINT32_MAX) { 863 secs = UINT32_MAX; 864 } else { 865 secs = (uint32_t)temp; 866 } 867 return (secs); 868 } 869 870 /* 871 * sctp_stop_timers_for_shutdown() should be called 872 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 873 * state to make sure that all timers are stopped. 874 */ 875 void 876 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 877 { 878 struct sctp_inpcb *inp; 879 struct sctp_nets *net; 880 881 inp = stcb->sctp_ep; 882 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 887 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 888 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 889 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 890 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 891 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 892 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 893 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 894 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 895 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 896 } 897 } 898 899 void 900 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 901 { 902 struct sctp_inpcb *inp; 903 struct sctp_nets *net; 904 905 inp = stcb->sctp_ep; 906 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 907 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 908 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 909 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 910 if (stop_assoc_kill_timer) { 911 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 913 } 914 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 915 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 916 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 917 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 918 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 919 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 920 /* Mobility adaptation */ 921 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 922 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 923 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 924 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 925 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 926 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 927 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 928 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 929 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 930 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 931 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 932 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 933 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 934 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 935 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 936 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 937 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 938 } 939 } 940 941 /* 942 * A list of sizes based on typical mtu's, used only if next hop size not 943 * returned. These values MUST be multiples of 4 and MUST be ordered. 944 */ 945 static uint32_t sctp_mtu_sizes[] = { 946 68, 947 296, 948 508, 949 512, 950 544, 951 576, 952 1004, 953 1492, 954 1500, 955 1536, 956 2000, 957 2048, 958 4352, 959 4464, 960 8168, 961 17912, 962 32000, 963 65532 964 }; 965 966 /* 967 * Return the largest MTU in sctp_mtu_sizes smaller than val. 968 * If val is smaller than the minimum, just return the largest 969 * multiple of 4 smaller or equal to val. 970 * Ensure that the result is a multiple of 4. 971 */ 972 uint32_t 973 sctp_get_prev_mtu(uint32_t val) 974 { 975 uint32_t i; 976 977 val &= 0xfffffffc; 978 if (val <= sctp_mtu_sizes[0]) { 979 return (val); 980 } 981 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val <= sctp_mtu_sizes[i]) { 983 break; 984 } 985 } 986 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 987 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 988 return (sctp_mtu_sizes[i - 1]); 989 } 990 991 /* 992 * Return the smallest MTU in sctp_mtu_sizes larger than val. 993 * If val is larger than the maximum, just return the largest multiple of 4 smaller 994 * or equal to val. 995 * Ensure that the result is a multiple of 4. 996 */ 997 uint32_t 998 sctp_get_next_mtu(uint32_t val) 999 { 1000 /* select another MTU that is just bigger than this one */ 1001 uint32_t i; 1002 1003 val &= 0xfffffffc; 1004 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 1005 if (val < sctp_mtu_sizes[i]) { 1006 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 1007 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 1008 return (sctp_mtu_sizes[i]); 1009 } 1010 } 1011 return (val); 1012 } 1013 1014 void 1015 sctp_fill_random_store(struct sctp_pcb *m) 1016 { 1017 /* 1018 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 1019 * our counter. The result becomes our good random numbers and we 1020 * then setup to give these out. Note that we do no locking to 1021 * protect this. This is ok, since if competing folks call this we 1022 * will get more gobbled gook in the random store which is what we 1023 * want. There is a danger that two guys will use the same random 1024 * numbers, but thats ok too since that is random as well :-> 1025 */ 1026 m->store_at = 0; 1027 #if defined(__Userspace__) && defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) 1028 for (int i = 0; i < (int) (sizeof(m->random_store) / sizeof(m->random_store[0])); i++) { 1029 m->random_store[i] = (uint8_t) rand(); 1030 } 1031 #else 1032 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1033 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1034 sizeof(m->random_counter), (uint8_t *)m->random_store); 1035 #endif 1036 m->random_counter++; 1037 } 1038 1039 uint32_t 1040 sctp_select_initial_TSN(struct sctp_pcb *inp) 1041 { 1042 /* 1043 * A true implementation should use random selection process to get 1044 * the initial stream sequence number, using RFC1750 as a good 1045 * guideline 1046 */ 1047 uint32_t x, *xp; 1048 uint8_t *p; 1049 int store_at, new_store; 1050 1051 if (inp->initial_sequence_debug != 0) { 1052 uint32_t ret; 1053 1054 ret = inp->initial_sequence_debug; 1055 inp->initial_sequence_debug++; 1056 return (ret); 1057 } 1058 retry: 1059 store_at = inp->store_at; 1060 new_store = store_at + sizeof(uint32_t); 1061 if (new_store >= (SCTP_SIGNATURE_SIZE-3)) { 1062 new_store = 0; 1063 } 1064 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1065 goto retry; 1066 } 1067 if (new_store == 0) { 1068 /* Refill the random store */ 1069 sctp_fill_random_store(inp); 1070 } 1071 p = &inp->random_store[store_at]; 1072 xp = (uint32_t *)p; 1073 x = *xp; 1074 return (x); 1075 } 1076 1077 uint32_t 1078 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1079 { 1080 uint32_t x; 1081 struct timeval now; 1082 1083 if (check) { 1084 (void)SCTP_GETTIME_TIMEVAL(&now); 1085 } 1086 for (;;) { 1087 x = sctp_select_initial_TSN(&inp->sctp_ep); 1088 if (x == 0) { 1089 /* we never use 0 */ 1090 continue; 1091 } 1092 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1093 break; 1094 } 1095 } 1096 return (x); 1097 } 1098 1099 int32_t 1100 sctp_map_assoc_state(int kernel_state) 1101 { 1102 int32_t user_state; 1103 1104 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1105 user_state = SCTP_CLOSED; 1106 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1107 user_state = SCTP_SHUTDOWN_PENDING; 1108 } else { 1109 switch (kernel_state & SCTP_STATE_MASK) { 1110 case SCTP_STATE_EMPTY: 1111 user_state = SCTP_CLOSED; 1112 break; 1113 case SCTP_STATE_INUSE: 1114 user_state = SCTP_CLOSED; 1115 break; 1116 case SCTP_STATE_COOKIE_WAIT: 1117 user_state = SCTP_COOKIE_WAIT; 1118 break; 1119 case SCTP_STATE_COOKIE_ECHOED: 1120 user_state = SCTP_COOKIE_ECHOED; 1121 break; 1122 case SCTP_STATE_OPEN: 1123 user_state = SCTP_ESTABLISHED; 1124 break; 1125 case SCTP_STATE_SHUTDOWN_SENT: 1126 user_state = SCTP_SHUTDOWN_SENT; 1127 break; 1128 case SCTP_STATE_SHUTDOWN_RECEIVED: 1129 user_state = SCTP_SHUTDOWN_RECEIVED; 1130 break; 1131 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1132 user_state = SCTP_SHUTDOWN_ACK_SENT; 1133 break; 1134 default: 1135 user_state = SCTP_CLOSED; 1136 break; 1137 } 1138 } 1139 return (user_state); 1140 } 1141 1142 int 1143 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1144 uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, 1145 uint16_t o_strms) 1146 { 1147 struct sctp_association *asoc; 1148 /* 1149 * Anything set to zero is taken care of by the allocation routine's 1150 * bzero 1151 */ 1152 1153 /* 1154 * Up front select what scoping to apply on addresses I tell my peer 1155 * Not sure what to do with these right now, we will need to come up 1156 * with a way to set them. We may need to pass them through from the 1157 * caller in the sctp_aloc_assoc() function. 1158 */ 1159 int i; 1160 #if defined(SCTP_DETAILED_STR_STATS) 1161 int j; 1162 #endif 1163 1164 asoc = &stcb->asoc; 1165 /* init all variables to a known value. */ 1166 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1167 asoc->max_burst = inp->sctp_ep.max_burst; 1168 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1169 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1170 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1171 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1172 asoc->ecn_supported = inp->ecn_supported; 1173 asoc->prsctp_supported = inp->prsctp_supported; 1174 asoc->auth_supported = inp->auth_supported; 1175 asoc->asconf_supported = inp->asconf_supported; 1176 asoc->reconfig_supported = inp->reconfig_supported; 1177 asoc->nrsack_supported = inp->nrsack_supported; 1178 asoc->pktdrop_supported = inp->pktdrop_supported; 1179 asoc->idata_supported = inp->idata_supported; 1180 asoc->rcv_edmid = inp->rcv_edmid; 1181 asoc->snd_edmid = SCTP_EDMID_NONE; 1182 asoc->sctp_cmt_pf = (uint8_t)0; 1183 asoc->sctp_frag_point = inp->sctp_frag_point; 1184 asoc->sctp_features = inp->sctp_features; 1185 asoc->default_dscp = inp->sctp_ep.default_dscp; 1186 asoc->max_cwnd = inp->max_cwnd; 1187 #ifdef INET6 1188 if (inp->sctp_ep.default_flowlabel) { 1189 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1190 } else { 1191 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1192 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1193 asoc->default_flowlabel &= 0x000fffff; 1194 asoc->default_flowlabel |= 0x80000000; 1195 } else { 1196 asoc->default_flowlabel = 0; 1197 } 1198 } 1199 #endif 1200 asoc->sb_send_resv = 0; 1201 if (override_tag) { 1202 asoc->my_vtag = override_tag; 1203 } else { 1204 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1205 } 1206 /* Get the nonce tags */ 1207 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1208 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1209 asoc->vrf_id = vrf_id; 1210 1211 #ifdef SCTP_ASOCLOG_OF_TSNS 1212 asoc->tsn_in_at = 0; 1213 asoc->tsn_out_at = 0; 1214 asoc->tsn_in_wrapped = 0; 1215 asoc->tsn_out_wrapped = 0; 1216 asoc->cumack_log_at = 0; 1217 asoc->cumack_log_atsnt = 0; 1218 #endif 1219 #ifdef SCTP_FS_SPEC_LOG 1220 asoc->fs_index = 0; 1221 #endif 1222 asoc->refcnt = 0; 1223 asoc->assoc_up_sent = 0; 1224 if (override_tag) { 1225 asoc->init_seq_number = initial_tsn; 1226 } else { 1227 asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); 1228 } 1229 asoc->asconf_seq_out = asoc->init_seq_number; 1230 asoc->str_reset_seq_out = asoc->init_seq_number; 1231 asoc->sending_seq = asoc->init_seq_number; 1232 asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; 1233 /* we are optimistic here */ 1234 asoc->peer_supports_nat = 0; 1235 asoc->sent_queue_retran_cnt = 0; 1236 1237 /* for CMT */ 1238 asoc->last_net_cmt_send_started = NULL; 1239 1240 asoc->last_acked_seq = asoc->init_seq_number - 1; 1241 asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; 1242 asoc->asconf_seq_in = asoc->init_seq_number - 1; 1243 1244 /* here we are different, we hold the next one we expect */ 1245 asoc->str_reset_seq_in = asoc->init_seq_number; 1246 1247 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1248 asoc->initial_rto = inp->sctp_ep.initial_rto; 1249 1250 asoc->default_mtu = inp->sctp_ep.default_mtu; 1251 asoc->max_init_times = inp->sctp_ep.max_init_times; 1252 asoc->max_send_times = inp->sctp_ep.max_send_times; 1253 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1254 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1255 asoc->free_chunk_cnt = 0; 1256 1257 asoc->iam_blocking = 0; 1258 asoc->context = inp->sctp_context; 1259 asoc->local_strreset_support = inp->local_strreset_support; 1260 asoc->def_send = inp->def_send; 1261 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1262 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1263 asoc->pr_sctp_cnt = 0; 1264 asoc->total_output_queue_size = 0; 1265 1266 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1267 asoc->scope.ipv6_addr_legal = 1; 1268 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1269 asoc->scope.ipv4_addr_legal = 1; 1270 } else { 1271 asoc->scope.ipv4_addr_legal = 0; 1272 } 1273 #if defined(__Userspace__) 1274 asoc->scope.conn_addr_legal = 0; 1275 #endif 1276 } else { 1277 asoc->scope.ipv6_addr_legal = 0; 1278 #if defined(__Userspace__) 1279 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) { 1280 asoc->scope.conn_addr_legal = 1; 1281 asoc->scope.ipv4_addr_legal = 0; 1282 } else { 1283 asoc->scope.conn_addr_legal = 0; 1284 asoc->scope.ipv4_addr_legal = 1; 1285 } 1286 #else 1287 asoc->scope.ipv4_addr_legal = 1; 1288 #endif 1289 } 1290 1291 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1292 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1293 1294 asoc->smallest_mtu = 0; 1295 asoc->minrto = inp->sctp_ep.sctp_minrto; 1296 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1297 1298 asoc->stream_locked_on = 0; 1299 asoc->ecn_echo_cnt_onq = 0; 1300 asoc->stream_locked = 0; 1301 1302 asoc->send_sack = 1; 1303 1304 LIST_INIT(&asoc->sctp_restricted_addrs); 1305 1306 TAILQ_INIT(&asoc->nets); 1307 TAILQ_INIT(&asoc->pending_reply_queue); 1308 TAILQ_INIT(&asoc->asconf_ack_sent); 1309 /* Setup to fill the hb random cache at first HB */ 1310 asoc->hb_random_idx = 4; 1311 1312 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1313 1314 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1315 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1316 1317 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1318 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1319 1320 /* 1321 * Now the stream parameters, here we allocate space for all streams 1322 * that we request by default. 1323 */ 1324 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1325 o_strms; 1326 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1327 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1328 SCTP_M_STRMO); 1329 if (asoc->strmout == NULL) { 1330 /* big trouble no memory */ 1331 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1332 return (ENOMEM); 1333 } 1334 SCTP_TCB_LOCK(stcb); 1335 for (i = 0; i < asoc->streamoutcnt; i++) { 1336 /* 1337 * inbound side must be set to 0xffff, also NOTE when we get 1338 * the INIT-ACK back (for INIT sender) we MUST reduce the 1339 * count (streamoutcnt) but first check if we sent to any of 1340 * the upper streams that were dropped (if some were). Those 1341 * that were dropped must be notified to the upper layer as 1342 * failed to send. 1343 */ 1344 TAILQ_INIT(&asoc->strmout[i].outqueue); 1345 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1346 asoc->strmout[i].chunks_on_queues = 0; 1347 #if defined(SCTP_DETAILED_STR_STATS) 1348 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1349 asoc->strmout[i].abandoned_sent[j] = 0; 1350 asoc->strmout[i].abandoned_unsent[j] = 0; 1351 } 1352 #else 1353 asoc->strmout[i].abandoned_sent[0] = 0; 1354 asoc->strmout[i].abandoned_unsent[0] = 0; 1355 #endif 1356 asoc->strmout[i].next_mid_ordered = 0; 1357 asoc->strmout[i].next_mid_unordered = 0; 1358 asoc->strmout[i].sid = i; 1359 asoc->strmout[i].last_msg_incomplete = 0; 1360 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1361 } 1362 asoc->ss_functions.sctp_ss_init(stcb, asoc); 1363 SCTP_TCB_UNLOCK(stcb); 1364 1365 /* Now the mapping array */ 1366 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1367 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1368 SCTP_M_MAP); 1369 if (asoc->mapping_array == NULL) { 1370 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1371 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1372 return (ENOMEM); 1373 } 1374 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1375 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1376 SCTP_M_MAP); 1377 if (asoc->nr_mapping_array == NULL) { 1378 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1379 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1380 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1381 return (ENOMEM); 1382 } 1383 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1384 1385 /* Now the init of the other outqueues */ 1386 TAILQ_INIT(&asoc->free_chunks); 1387 TAILQ_INIT(&asoc->control_send_queue); 1388 TAILQ_INIT(&asoc->asconf_send_queue); 1389 TAILQ_INIT(&asoc->send_queue); 1390 TAILQ_INIT(&asoc->sent_queue); 1391 TAILQ_INIT(&asoc->resetHead); 1392 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1393 TAILQ_INIT(&asoc->asconf_queue); 1394 /* authentication fields */ 1395 asoc->authinfo.random = NULL; 1396 asoc->authinfo.active_keyid = 0; 1397 asoc->authinfo.assoc_key = NULL; 1398 asoc->authinfo.assoc_keyid = 0; 1399 asoc->authinfo.recv_key = NULL; 1400 asoc->authinfo.recv_keyid = 0; 1401 LIST_INIT(&asoc->shared_keys); 1402 asoc->marked_retrans = 0; 1403 asoc->port = inp->sctp_ep.port; 1404 asoc->timoinit = 0; 1405 asoc->timodata = 0; 1406 asoc->timosack = 0; 1407 asoc->timoshutdown = 0; 1408 asoc->timoheartbeat = 0; 1409 asoc->timocookie = 0; 1410 asoc->timoshutdownack = 0; 1411 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1412 asoc->discontinuity_time = asoc->start_time; 1413 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1414 asoc->abandoned_unsent[i] = 0; 1415 asoc->abandoned_sent[i] = 0; 1416 } 1417 /* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when 1418 * the association is freed. 1419 */ 1420 return (0); 1421 } 1422 1423 void 1424 sctp_print_mapping_array(struct sctp_association *asoc) 1425 { 1426 unsigned int i, limit; 1427 1428 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1429 asoc->mapping_array_size, 1430 asoc->mapping_array_base_tsn, 1431 asoc->cumulative_tsn, 1432 asoc->highest_tsn_inside_map, 1433 asoc->highest_tsn_inside_nr_map); 1434 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1435 if (asoc->mapping_array[limit - 1] != 0) { 1436 break; 1437 } 1438 } 1439 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1440 for (i = 0; i < limit; i++) { 1441 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1442 } 1443 if (limit % 16) 1444 SCTP_PRINTF("\n"); 1445 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1446 if (asoc->nr_mapping_array[limit - 1]) { 1447 break; 1448 } 1449 } 1450 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1451 for (i = 0; i < limit; i++) { 1452 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n'); 1453 } 1454 if (limit % 16) 1455 SCTP_PRINTF("\n"); 1456 } 1457 1458 int 1459 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1460 { 1461 /* mapping array needs to grow */ 1462 uint8_t *new_array1, *new_array2; 1463 uint32_t new_size; 1464 1465 new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR); 1466 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1467 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1468 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1469 /* can't get more, forget it */ 1470 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1471 if (new_array1) { 1472 SCTP_FREE(new_array1, SCTP_M_MAP); 1473 } 1474 if (new_array2) { 1475 SCTP_FREE(new_array2, SCTP_M_MAP); 1476 } 1477 return (-1); 1478 } 1479 memset(new_array1, 0, new_size); 1480 memset(new_array2, 0, new_size); 1481 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1482 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1483 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1484 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1485 asoc->mapping_array = new_array1; 1486 asoc->nr_mapping_array = new_array2; 1487 asoc->mapping_array_size = new_size; 1488 return (0); 1489 } 1490 1491 static void 1492 sctp_iterator_work(struct sctp_iterator *it) 1493 { 1494 #if defined(__FreeBSD__) && !defined(__Userspace__) 1495 struct epoch_tracker et; 1496 #endif 1497 struct sctp_inpcb *tinp; 1498 int iteration_count = 0; 1499 int inp_skip = 0; 1500 int first_in = 1; 1501 1502 #if defined(__FreeBSD__) && !defined(__Userspace__) 1503 NET_EPOCH_ENTER(et); 1504 #endif 1505 SCTP_INP_INFO_RLOCK(); 1506 SCTP_ITERATOR_LOCK(); 1507 sctp_it_ctl.cur_it = it; 1508 if (it->inp) { 1509 SCTP_INP_RLOCK(it->inp); 1510 SCTP_INP_DECR_REF(it->inp); 1511 } 1512 if (it->inp == NULL) { 1513 /* iterator is complete */ 1514 done_with_iterator: 1515 sctp_it_ctl.cur_it = NULL; 1516 SCTP_ITERATOR_UNLOCK(); 1517 SCTP_INP_INFO_RUNLOCK(); 1518 if (it->function_atend != NULL) { 1519 (*it->function_atend) (it->pointer, it->val); 1520 } 1521 SCTP_FREE(it, SCTP_M_ITER); 1522 #if defined(__FreeBSD__) && !defined(__Userspace__) 1523 NET_EPOCH_EXIT(et); 1524 #endif 1525 return; 1526 } 1527 select_a_new_ep: 1528 if (first_in) { 1529 first_in = 0; 1530 } else { 1531 SCTP_INP_RLOCK(it->inp); 1532 } 1533 while (((it->pcb_flags) && 1534 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1535 ((it->pcb_features) && 1536 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1537 /* endpoint flags or features don't match, so keep looking */ 1538 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1539 SCTP_INP_RUNLOCK(it->inp); 1540 goto done_with_iterator; 1541 } 1542 tinp = it->inp; 1543 it->inp = LIST_NEXT(it->inp, sctp_list); 1544 it->stcb = NULL; 1545 SCTP_INP_RUNLOCK(tinp); 1546 if (it->inp == NULL) { 1547 goto done_with_iterator; 1548 } 1549 SCTP_INP_RLOCK(it->inp); 1550 } 1551 /* now go through each assoc which is in the desired state */ 1552 if (it->done_current_ep == 0) { 1553 if (it->function_inp != NULL) 1554 inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val); 1555 it->done_current_ep = 1; 1556 } 1557 if (it->stcb == NULL) { 1558 /* run the per instance function */ 1559 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1560 } 1561 if ((inp_skip) || it->stcb == NULL) { 1562 if (it->function_inp_end != NULL) { 1563 inp_skip = (*it->function_inp_end)(it->inp, 1564 it->pointer, 1565 it->val); 1566 } 1567 SCTP_INP_RUNLOCK(it->inp); 1568 goto no_stcb; 1569 } 1570 while (it->stcb != NULL) { 1571 SCTP_TCB_LOCK(it->stcb); 1572 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1573 /* not in the right state... keep looking */ 1574 SCTP_TCB_UNLOCK(it->stcb); 1575 goto next_assoc; 1576 } 1577 /* see if we have limited out the iterator loop */ 1578 iteration_count++; 1579 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1580 /* Pause to let others grab the lock */ 1581 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1582 SCTP_TCB_UNLOCK(it->stcb); 1583 SCTP_INP_INCR_REF(it->inp); 1584 SCTP_INP_RUNLOCK(it->inp); 1585 SCTP_ITERATOR_UNLOCK(); 1586 SCTP_INP_INFO_RUNLOCK(); 1587 SCTP_INP_INFO_RLOCK(); 1588 SCTP_ITERATOR_LOCK(); 1589 if (sctp_it_ctl.iterator_flags) { 1590 /* We won't be staying here */ 1591 SCTP_INP_DECR_REF(it->inp); 1592 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1593 #if !(defined(__FreeBSD__) && !defined(__Userspace__)) 1594 if (sctp_it_ctl.iterator_flags & 1595 SCTP_ITERATOR_MUST_EXIT) { 1596 goto done_with_iterator; 1597 } 1598 #endif 1599 if (sctp_it_ctl.iterator_flags & 1600 SCTP_ITERATOR_STOP_CUR_IT) { 1601 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1602 goto done_with_iterator; 1603 } 1604 if (sctp_it_ctl.iterator_flags & 1605 SCTP_ITERATOR_STOP_CUR_INP) { 1606 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1607 goto no_stcb; 1608 } 1609 /* If we reach here huh? */ 1610 SCTP_PRINTF("Unknown it ctl flag %x\n", 1611 sctp_it_ctl.iterator_flags); 1612 sctp_it_ctl.iterator_flags = 0; 1613 } 1614 SCTP_INP_RLOCK(it->inp); 1615 SCTP_INP_DECR_REF(it->inp); 1616 SCTP_TCB_LOCK(it->stcb); 1617 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1618 iteration_count = 0; 1619 } 1620 KASSERT(it->inp == it->stcb->sctp_ep, 1621 ("%s: stcb %p does not belong to inp %p, but inp %p", 1622 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1623 SCTP_INP_RLOCK_ASSERT(it->inp); 1624 SCTP_TCB_LOCK_ASSERT(it->stcb); 1625 1626 /* run function on this one */ 1627 (*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val); 1628 SCTP_INP_RLOCK_ASSERT(it->inp); 1629 SCTP_TCB_LOCK_ASSERT(it->stcb); 1630 1631 /* 1632 * we lie here, it really needs to have its own type but 1633 * first I must verify that this won't effect things :-0 1634 */ 1635 if (it->no_chunk_output == 0) { 1636 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1637 SCTP_INP_RLOCK_ASSERT(it->inp); 1638 SCTP_TCB_LOCK_ASSERT(it->stcb); 1639 } 1640 1641 SCTP_TCB_UNLOCK(it->stcb); 1642 next_assoc: 1643 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1644 if (it->stcb == NULL) { 1645 /* Run last function */ 1646 if (it->function_inp_end != NULL) { 1647 inp_skip = (*it->function_inp_end)(it->inp, 1648 it->pointer, 1649 it->val); 1650 } 1651 } 1652 } 1653 SCTP_INP_RUNLOCK(it->inp); 1654 no_stcb: 1655 /* done with all assocs on this endpoint, move on to next endpoint */ 1656 it->done_current_ep = 0; 1657 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1658 it->inp = NULL; 1659 } else { 1660 it->inp = LIST_NEXT(it->inp, sctp_list); 1661 } 1662 it->stcb = NULL; 1663 if (it->inp == NULL) { 1664 goto done_with_iterator; 1665 } 1666 goto select_a_new_ep; 1667 } 1668 1669 void 1670 sctp_iterator_worker(void) 1671 { 1672 struct sctp_iterator *it; 1673 1674 /* This function is called with the WQ lock in place */ 1675 sctp_it_ctl.iterator_running = 1; 1676 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1677 /* now lets work on this one */ 1678 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1679 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1680 #if defined(__FreeBSD__) && !defined(__Userspace__) 1681 CURVNET_SET(it->vn); 1682 #endif 1683 sctp_iterator_work(it); 1684 #if defined(__FreeBSD__) && !defined(__Userspace__) 1685 CURVNET_RESTORE(); 1686 #endif 1687 SCTP_IPI_ITERATOR_WQ_LOCK(); 1688 #if !defined(__FreeBSD__) && !defined(__Userspace__) 1689 if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) { 1690 break; 1691 } 1692 #endif 1693 /*sa_ignore FREED_MEMORY*/ 1694 } 1695 sctp_it_ctl.iterator_running = 0; 1696 return; 1697 } 1698 1699 static void 1700 sctp_handle_addr_wq(void) 1701 { 1702 /* deal with the ADDR wq from the rtsock calls */ 1703 struct sctp_laddr *wi, *nwi; 1704 struct sctp_asconf_iterator *asc; 1705 1706 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1707 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1708 if (asc == NULL) { 1709 /* Try later, no memory */ 1710 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1711 (struct sctp_inpcb *)NULL, 1712 (struct sctp_tcb *)NULL, 1713 (struct sctp_nets *)NULL); 1714 return; 1715 } 1716 LIST_INIT(&asc->list_of_work); 1717 asc->cnt = 0; 1718 1719 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1720 LIST_REMOVE(wi, sctp_nxt_addr); 1721 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1722 asc->cnt++; 1723 } 1724 1725 if (asc->cnt == 0) { 1726 SCTP_FREE(asc, SCTP_M_ASC_IT); 1727 } else { 1728 int ret; 1729 1730 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1731 sctp_asconf_iterator_stcb, 1732 NULL, /* No ep end for boundall */ 1733 SCTP_PCB_FLAGS_BOUNDALL, 1734 SCTP_PCB_ANY_FEATURES, 1735 SCTP_ASOC_ANY_STATE, 1736 (void *)asc, 0, 1737 sctp_asconf_iterator_end, NULL, 0); 1738 if (ret) { 1739 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1740 /* Freeing if we are stopping or put back on the addr_wq. */ 1741 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1742 sctp_asconf_iterator_end(asc, 0); 1743 } else { 1744 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1745 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1746 } 1747 SCTP_FREE(asc, SCTP_M_ASC_IT); 1748 } 1749 } 1750 } 1751 } 1752 1753 /*- 1754 * The following table shows which pointers for the inp, stcb, or net are 1755 * stored for each timer after it was started. 1756 * 1757 *|Name |Timer |inp |stcb|net | 1758 *|-----------------------------|-----------------------------|----|----|----| 1759 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1760 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1761 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1762 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1763 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1764 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1765 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1766 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1767 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1768 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1769 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1770 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1771 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1772 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1773 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1774 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1775 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1776 */ 1777 1778 void 1779 sctp_timeout_handler(void *t) 1780 { 1781 #if defined(__FreeBSD__) && !defined(__Userspace__) 1782 struct epoch_tracker et; 1783 #endif 1784 struct timeval tv; 1785 struct sctp_inpcb *inp; 1786 struct sctp_tcb *stcb; 1787 struct sctp_nets *net; 1788 struct sctp_timer *tmr; 1789 struct mbuf *op_err; 1790 #if defined(__APPLE__) && !defined(__Userspace__) 1791 struct socket *so; 1792 #endif 1793 #if defined(__Userspace__) 1794 struct socket *upcall_socket = NULL; 1795 #endif 1796 int type; 1797 int i, secret; 1798 bool did_output, released_asoc_reference; 1799 1800 /* 1801 * If inp, stcb or net are not NULL, then references to these were 1802 * added when the timer was started, and must be released before this 1803 * function returns. 1804 */ 1805 tmr = (struct sctp_timer *)t; 1806 inp = (struct sctp_inpcb *)tmr->ep; 1807 stcb = (struct sctp_tcb *)tmr->tcb; 1808 net = (struct sctp_nets *)tmr->net; 1809 #if defined(__FreeBSD__) && !defined(__Userspace__) 1810 CURVNET_SET((struct vnet *)tmr->vnet); 1811 NET_EPOCH_ENTER(et); 1812 #endif 1813 released_asoc_reference = false; 1814 1815 #ifdef SCTP_AUDITING_ENABLED 1816 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1817 sctp_auditing(3, inp, stcb, net); 1818 #endif 1819 1820 /* sanity checks... */ 1821 KASSERT(tmr->self == NULL || tmr->self == tmr, 1822 ("sctp_timeout_handler: tmr->self corrupted")); 1823 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1824 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1825 type = tmr->type; 1826 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1827 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1828 type, stcb, stcb->sctp_ep)); 1829 tmr->stopped_from = 0xa001; 1830 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1831 SCTPDBG(SCTP_DEBUG_TIMER2, 1832 "Timer type %d handler exiting due to CLOSED association.\n", 1833 type); 1834 goto out_decr; 1835 } 1836 tmr->stopped_from = 0xa002; 1837 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1838 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1839 SCTPDBG(SCTP_DEBUG_TIMER2, 1840 "Timer type %d handler exiting due to not being active.\n", 1841 type); 1842 goto out_decr; 1843 } 1844 1845 tmr->stopped_from = 0xa003; 1846 if (stcb) { 1847 SCTP_TCB_LOCK(stcb); 1848 /* 1849 * Release reference so that association can be freed if 1850 * necessary below. 1851 * This is safe now that we have acquired the lock. 1852 */ 1853 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1854 released_asoc_reference = true; 1855 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1856 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1857 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1858 SCTPDBG(SCTP_DEBUG_TIMER2, 1859 "Timer type %d handler exiting due to CLOSED association.\n", 1860 type); 1861 goto out; 1862 } 1863 } else if (inp != NULL) { 1864 SCTP_INP_WLOCK(inp); 1865 } else { 1866 SCTP_WQ_ADDR_LOCK(); 1867 } 1868 1869 /* Record in stopped_from which timeout occurred. */ 1870 tmr->stopped_from = type; 1871 /* mark as being serviced now */ 1872 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1873 /* 1874 * Callout has been rescheduled. 1875 */ 1876 goto out; 1877 } 1878 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1879 /* 1880 * Not active, so no action. 1881 */ 1882 goto out; 1883 } 1884 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1885 1886 #if defined(__Userspace__) 1887 if ((stcb != NULL) && 1888 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 1889 (stcb->sctp_socket != NULL)) { 1890 upcall_socket = stcb->sctp_socket; 1891 SOCK_LOCK(upcall_socket); 1892 soref(upcall_socket); 1893 SOCK_UNLOCK(upcall_socket); 1894 } 1895 #endif 1896 /* call the handler for the appropriate timer type */ 1897 switch (type) { 1898 case SCTP_TIMER_TYPE_SEND: 1899 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1900 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1901 type, inp, stcb, net)); 1902 SCTP_STAT_INCR(sctps_timodata); 1903 stcb->asoc.timodata++; 1904 stcb->asoc.num_send_timers_up--; 1905 if (stcb->asoc.num_send_timers_up < 0) { 1906 stcb->asoc.num_send_timers_up = 0; 1907 } 1908 SCTP_TCB_LOCK_ASSERT(stcb); 1909 if (sctp_t3rxt_timer(inp, stcb, net)) { 1910 /* no need to unlock on tcb its gone */ 1911 1912 goto out_decr; 1913 } 1914 SCTP_TCB_LOCK_ASSERT(stcb); 1915 #ifdef SCTP_AUDITING_ENABLED 1916 sctp_auditing(4, inp, stcb, net); 1917 #endif 1918 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1919 did_output = true; 1920 if ((stcb->asoc.num_send_timers_up == 0) && 1921 (stcb->asoc.sent_queue_cnt > 0)) { 1922 struct sctp_tmit_chunk *chk; 1923 1924 /* 1925 * Safeguard. If there on some on the sent queue 1926 * somewhere but no timers running something is 1927 * wrong... so we start a timer on the first chunk 1928 * on the send queue on whatever net it is sent to. 1929 */ 1930 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1931 if (chk->whoTo != NULL) { 1932 break; 1933 } 1934 } 1935 if (chk != NULL) { 1936 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1937 } 1938 } 1939 break; 1940 case SCTP_TIMER_TYPE_INIT: 1941 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1942 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1943 type, inp, stcb, net)); 1944 SCTP_STAT_INCR(sctps_timoinit); 1945 stcb->asoc.timoinit++; 1946 if (sctp_t1init_timer(inp, stcb, net)) { 1947 /* no need to unlock on tcb its gone */ 1948 goto out_decr; 1949 } 1950 did_output = false; 1951 break; 1952 case SCTP_TIMER_TYPE_RECV: 1953 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1954 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1955 type, inp, stcb, net)); 1956 SCTP_STAT_INCR(sctps_timosack); 1957 stcb->asoc.timosack++; 1958 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1959 #ifdef SCTP_AUDITING_ENABLED 1960 sctp_auditing(4, inp, stcb, NULL); 1961 #endif 1962 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1963 did_output = true; 1964 break; 1965 case SCTP_TIMER_TYPE_SHUTDOWN: 1966 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1967 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1968 type, inp, stcb, net)); 1969 SCTP_STAT_INCR(sctps_timoshutdown); 1970 stcb->asoc.timoshutdown++; 1971 if (sctp_shutdown_timer(inp, stcb, net)) { 1972 /* no need to unlock on tcb its gone */ 1973 goto out_decr; 1974 } 1975 #ifdef SCTP_AUDITING_ENABLED 1976 sctp_auditing(4, inp, stcb, net); 1977 #endif 1978 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1979 did_output = true; 1980 break; 1981 case SCTP_TIMER_TYPE_HEARTBEAT: 1982 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1983 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1984 type, inp, stcb, net)); 1985 SCTP_STAT_INCR(sctps_timoheartbeat); 1986 stcb->asoc.timoheartbeat++; 1987 if (sctp_heartbeat_timer(inp, stcb, net)) { 1988 /* no need to unlock on tcb its gone */ 1989 goto out_decr; 1990 } 1991 #ifdef SCTP_AUDITING_ENABLED 1992 sctp_auditing(4, inp, stcb, net); 1993 #endif 1994 if ((net->dest_state & SCTP_ADDR_NOHB) == 0) { 1995 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1996 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1997 did_output = true; 1998 } else { 1999 did_output = false; 2000 } 2001 break; 2002 case SCTP_TIMER_TYPE_COOKIE: 2003 KASSERT(inp != NULL && stcb != NULL && net != NULL, 2004 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2005 type, inp, stcb, net)); 2006 SCTP_STAT_INCR(sctps_timocookie); 2007 stcb->asoc.timocookie++; 2008 if (sctp_cookie_timer(inp, stcb, net)) { 2009 /* no need to unlock on tcb its gone */ 2010 goto out_decr; 2011 } 2012 #ifdef SCTP_AUDITING_ENABLED 2013 sctp_auditing(4, inp, stcb, net); 2014 #endif 2015 /* 2016 * We consider T3 and Cookie timer pretty much the same with 2017 * respect to where from in chunk_output. 2018 */ 2019 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 2020 did_output = true; 2021 break; 2022 case SCTP_TIMER_TYPE_NEWCOOKIE: 2023 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2024 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2025 type, inp, stcb, net)); 2026 SCTP_STAT_INCR(sctps_timosecret); 2027 (void)SCTP_GETTIME_TIMEVAL(&tv); 2028 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 2029 inp->sctp_ep.last_secret_number = 2030 inp->sctp_ep.current_secret_number; 2031 inp->sctp_ep.current_secret_number++; 2032 if (inp->sctp_ep.current_secret_number >= 2033 SCTP_HOW_MANY_SECRETS) { 2034 inp->sctp_ep.current_secret_number = 0; 2035 } 2036 secret = (int)inp->sctp_ep.current_secret_number; 2037 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 2038 inp->sctp_ep.secret_key[secret][i] = 2039 sctp_select_initial_TSN(&inp->sctp_ep); 2040 } 2041 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 2042 did_output = false; 2043 break; 2044 case SCTP_TIMER_TYPE_PATHMTURAISE: 2045 KASSERT(inp != NULL && stcb != NULL && net != NULL, 2046 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2047 type, inp, stcb, net)); 2048 SCTP_STAT_INCR(sctps_timopathmtu); 2049 sctp_pathmtu_timer(inp, stcb, net); 2050 did_output = false; 2051 break; 2052 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2053 KASSERT(inp != NULL && stcb != NULL && net != NULL, 2054 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2055 type, inp, stcb, net)); 2056 if (sctp_shutdownack_timer(inp, stcb, net)) { 2057 /* no need to unlock on tcb its gone */ 2058 goto out_decr; 2059 } 2060 SCTP_STAT_INCR(sctps_timoshutdownack); 2061 stcb->asoc.timoshutdownack++; 2062 #ifdef SCTP_AUDITING_ENABLED 2063 sctp_auditing(4, inp, stcb, net); 2064 #endif 2065 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 2066 did_output = true; 2067 break; 2068 case SCTP_TIMER_TYPE_ASCONF: 2069 KASSERT(inp != NULL && stcb != NULL && net != NULL, 2070 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2071 type, inp, stcb, net)); 2072 SCTP_STAT_INCR(sctps_timoasconf); 2073 if (sctp_asconf_timer(inp, stcb, net)) { 2074 /* no need to unlock on tcb its gone */ 2075 goto out_decr; 2076 } 2077 #ifdef SCTP_AUDITING_ENABLED 2078 sctp_auditing(4, inp, stcb, net); 2079 #endif 2080 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 2081 did_output = true; 2082 break; 2083 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2084 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2085 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2086 type, inp, stcb, net)); 2087 SCTP_STAT_INCR(sctps_timoshutdownguard); 2088 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2089 "Shutdown guard timer expired"); 2090 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); 2091 /* no need to unlock on tcb its gone */ 2092 goto out_decr; 2093 case SCTP_TIMER_TYPE_AUTOCLOSE: 2094 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2095 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2096 type, inp, stcb, net)); 2097 SCTP_STAT_INCR(sctps_timoautoclose); 2098 sctp_autoclose_timer(inp, stcb); 2099 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2100 did_output = true; 2101 break; 2102 case SCTP_TIMER_TYPE_STRRESET: 2103 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2104 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2105 type, inp, stcb, net)); 2106 SCTP_STAT_INCR(sctps_timostrmrst); 2107 if (sctp_strreset_timer(inp, stcb)) { 2108 /* no need to unlock on tcb its gone */ 2109 goto out_decr; 2110 } 2111 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2112 did_output = true; 2113 break; 2114 case SCTP_TIMER_TYPE_INPKILL: 2115 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2116 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2117 type, inp, stcb, net)); 2118 SCTP_STAT_INCR(sctps_timoinpkill); 2119 /* 2120 * special case, take away our increment since WE are the 2121 * killer 2122 */ 2123 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2124 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2125 #if defined(__APPLE__) && !defined(__Userspace__) 2126 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1); 2127 #endif 2128 SCTP_INP_DECR_REF(inp); 2129 SCTP_INP_WUNLOCK(inp); 2130 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2131 SCTP_CALLED_FROM_INPKILL_TIMER); 2132 #if defined(__APPLE__) && !defined(__Userspace__) 2133 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1); 2134 #endif 2135 inp = NULL; 2136 goto out_decr; 2137 case SCTP_TIMER_TYPE_ASOCKILL: 2138 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2139 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2140 type, inp, stcb, net)); 2141 SCTP_STAT_INCR(sctps_timoassockill); 2142 /* Can we free it yet? */ 2143 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2144 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2145 #if defined(__APPLE__) && !defined(__Userspace__) 2146 so = SCTP_INP_SO(inp); 2147 atomic_add_int(&stcb->asoc.refcnt, 1); 2148 SCTP_TCB_UNLOCK(stcb); 2149 SCTP_SOCKET_LOCK(so, 1); 2150 SCTP_TCB_LOCK(stcb); 2151 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2152 #endif 2153 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2154 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2155 #if defined(__APPLE__) && !defined(__Userspace__) 2156 SCTP_SOCKET_UNLOCK(so, 1); 2157 #endif 2158 /* 2159 * free asoc, always unlocks (or destroy's) so prevent 2160 * duplicate unlock or unlock of a free mtx :-0 2161 */ 2162 stcb = NULL; 2163 goto out_decr; 2164 case SCTP_TIMER_TYPE_ADDR_WQ: 2165 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2166 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2167 type, inp, stcb, net)); 2168 sctp_handle_addr_wq(); 2169 did_output = true; 2170 break; 2171 case SCTP_TIMER_TYPE_PRIM_DELETED: 2172 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2173 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2174 type, inp, stcb, net)); 2175 SCTP_STAT_INCR(sctps_timodelprim); 2176 sctp_delete_prim_timer(inp, stcb); 2177 did_output = false; 2178 break; 2179 default: 2180 #ifdef INVARIANTS 2181 panic("Unknown timer type %d", type); 2182 #else 2183 goto out; 2184 #endif 2185 } 2186 #ifdef SCTP_AUDITING_ENABLED 2187 sctp_audit_log(0xF1, (uint8_t) type); 2188 if (inp != NULL) 2189 sctp_auditing(5, inp, stcb, net); 2190 #endif 2191 if (did_output && (stcb != NULL)) { 2192 /* 2193 * Now we need to clean up the control chunk chain if an 2194 * ECNE is on it. It must be marked as UNSENT again so next 2195 * call will continue to send it until such time that we get 2196 * a CWR, to remove it. It is, however, less likely that we 2197 * will find a ecn echo on the chain though. 2198 */ 2199 sctp_fix_ecn_echo(&stcb->asoc); 2200 } 2201 out: 2202 if (stcb != NULL) { 2203 SCTP_TCB_UNLOCK(stcb); 2204 } else if (inp != NULL) { 2205 SCTP_INP_WUNLOCK(inp); 2206 } else { 2207 SCTP_WQ_ADDR_UNLOCK(); 2208 } 2209 2210 out_decr: 2211 #if defined(__Userspace__) 2212 if (upcall_socket != NULL) { 2213 if ((upcall_socket->so_upcall != NULL) && 2214 (upcall_socket->so_error != 0)) { 2215 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT); 2216 } 2217 ACCEPT_LOCK(); 2218 SOCK_LOCK(upcall_socket); 2219 sorele(upcall_socket); 2220 } 2221 #endif 2222 /* These reference counts were incremented in sctp_timer_start(). */ 2223 if (inp != NULL) { 2224 SCTP_INP_DECR_REF(inp); 2225 } 2226 if ((stcb != NULL) && !released_asoc_reference) { 2227 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2228 } 2229 if (net != NULL) { 2230 sctp_free_remote_addr(net); 2231 } 2232 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2233 #if defined(__FreeBSD__) && !defined(__Userspace__) 2234 CURVNET_RESTORE(); 2235 NET_EPOCH_EXIT(et); 2236 #endif 2237 } 2238 2239 /*- 2240 * The following table shows which parameters must be provided 2241 * when calling sctp_timer_start(). For parameters not being 2242 * provided, NULL must be used. 2243 * 2244 * |Name |inp |stcb|net | 2245 * |-----------------------------|----|----|----| 2246 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2247 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2248 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2249 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2250 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2251 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2252 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2253 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2254 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2255 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2256 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2257 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2258 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2259 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2260 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2261 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2262 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2263 * 2264 */ 2265 2266 void 2267 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2268 struct sctp_nets *net) 2269 { 2270 struct sctp_timer *tmr; 2271 uint32_t to_ticks; 2272 uint32_t rndval, jitter; 2273 2274 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2275 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2276 t_type, stcb, stcb->sctp_ep)); 2277 tmr = NULL; 2278 if (stcb != NULL) { 2279 SCTP_TCB_LOCK_ASSERT(stcb); 2280 } else if (inp != NULL) { 2281 SCTP_INP_WLOCK_ASSERT(inp); 2282 } else { 2283 SCTP_WQ_ADDR_LOCK_ASSERT(); 2284 } 2285 if (stcb != NULL) { 2286 /* Don't restart timer on association that's about to be killed. */ 2287 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2288 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2289 SCTPDBG(SCTP_DEBUG_TIMER2, 2290 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2291 t_type, inp, stcb, net); 2292 return; 2293 } 2294 /* Don't restart timer on net that's been removed. */ 2295 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2296 SCTPDBG(SCTP_DEBUG_TIMER2, 2297 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2298 t_type, inp, stcb, net); 2299 return; 2300 } 2301 } 2302 switch (t_type) { 2303 case SCTP_TIMER_TYPE_SEND: 2304 /* Here we use the RTO timer. */ 2305 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2306 #ifdef INVARIANTS 2307 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2308 t_type, inp, stcb, net); 2309 #else 2310 return; 2311 #endif 2312 } 2313 tmr = &net->rxt_timer; 2314 if (net->RTO == 0) { 2315 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2316 } else { 2317 to_ticks = sctp_msecs_to_ticks(net->RTO); 2318 } 2319 break; 2320 case SCTP_TIMER_TYPE_INIT: 2321 /* 2322 * Here we use the INIT timer default usually about 1 2323 * second. 2324 */ 2325 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2326 #ifdef INVARIANTS 2327 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2328 t_type, inp, stcb, net); 2329 #else 2330 return; 2331 #endif 2332 } 2333 tmr = &net->rxt_timer; 2334 if (net->RTO == 0) { 2335 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2336 } else { 2337 to_ticks = sctp_msecs_to_ticks(net->RTO); 2338 } 2339 break; 2340 case SCTP_TIMER_TYPE_RECV: 2341 /* 2342 * Here we use the Delayed-Ack timer value from the inp, 2343 * usually about 200ms. 2344 */ 2345 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2346 #ifdef INVARIANTS 2347 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2348 t_type, inp, stcb, net); 2349 #else 2350 return; 2351 #endif 2352 } 2353 tmr = &stcb->asoc.dack_timer; 2354 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2355 break; 2356 case SCTP_TIMER_TYPE_SHUTDOWN: 2357 /* Here we use the RTO of the destination. */ 2358 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2359 #ifdef INVARIANTS 2360 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2361 t_type, inp, stcb, net); 2362 #else 2363 return; 2364 #endif 2365 } 2366 tmr = &net->rxt_timer; 2367 if (net->RTO == 0) { 2368 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2369 } else { 2370 to_ticks = sctp_msecs_to_ticks(net->RTO); 2371 } 2372 break; 2373 case SCTP_TIMER_TYPE_HEARTBEAT: 2374 /* 2375 * The net is used here so that we can add in the RTO. Even 2376 * though we use a different timer. We also add the HB timer 2377 * PLUS a random jitter. 2378 */ 2379 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2380 #ifdef INVARIANTS 2381 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2382 t_type, inp, stcb, net); 2383 #else 2384 return; 2385 #endif 2386 } 2387 if ((net->dest_state & SCTP_ADDR_NOHB) && 2388 ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { 2389 SCTPDBG(SCTP_DEBUG_TIMER2, 2390 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2391 t_type, inp, stcb, net); 2392 return; 2393 } 2394 tmr = &net->hb_timer; 2395 if (net->RTO == 0) { 2396 to_ticks = stcb->asoc.initial_rto; 2397 } else { 2398 to_ticks = net->RTO; 2399 } 2400 if (!((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2401 (net->dest_state & SCTP_ADDR_REACHABLE)) && 2402 ((net->dest_state & SCTP_ADDR_PF) == 0)) { 2403 if (to_ticks > 1) { 2404 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2405 jitter = rndval % to_ticks; 2406 to_ticks >>= 1; 2407 if (jitter < (UINT32_MAX - to_ticks)) { 2408 to_ticks += jitter; 2409 } else { 2410 to_ticks = UINT32_MAX; 2411 } 2412 } 2413 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2414 to_ticks += net->heart_beat_delay; 2415 } else { 2416 to_ticks = UINT32_MAX; 2417 } 2418 } 2419 /* 2420 * Now we must convert the to_ticks that are now in 2421 * ms to ticks. 2422 */ 2423 to_ticks = sctp_msecs_to_ticks(to_ticks); 2424 break; 2425 case SCTP_TIMER_TYPE_COOKIE: 2426 /* 2427 * Here we can use the RTO timer from the network since one 2428 * RTT was complete. If a retransmission happened then we will 2429 * be using the RTO initial value. 2430 */ 2431 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2432 #ifdef INVARIANTS 2433 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2434 t_type, inp, stcb, net); 2435 #else 2436 return; 2437 #endif 2438 } 2439 tmr = &net->rxt_timer; 2440 if (net->RTO == 0) { 2441 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2442 } else { 2443 to_ticks = sctp_msecs_to_ticks(net->RTO); 2444 } 2445 break; 2446 case SCTP_TIMER_TYPE_NEWCOOKIE: 2447 /* 2448 * Nothing needed but the endpoint here usually about 60 2449 * minutes. 2450 */ 2451 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2452 #ifdef INVARIANTS 2453 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2454 t_type, inp, stcb, net); 2455 #else 2456 return; 2457 #endif 2458 } 2459 tmr = &inp->sctp_ep.signature_change; 2460 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2461 break; 2462 case SCTP_TIMER_TYPE_PATHMTURAISE: 2463 /* 2464 * Here we use the value found in the EP for PMTUD, usually 2465 * about 10 minutes. 2466 */ 2467 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2468 #ifdef INVARIANTS 2469 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2470 t_type, inp, stcb, net); 2471 #else 2472 return; 2473 #endif 2474 } 2475 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2476 SCTPDBG(SCTP_DEBUG_TIMER2, 2477 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2478 t_type, inp, stcb, net); 2479 return; 2480 } 2481 tmr = &net->pmtu_timer; 2482 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2483 break; 2484 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2485 /* Here we use the RTO of the destination. */ 2486 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2487 #ifdef INVARIANTS 2488 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2489 t_type, inp, stcb, net); 2490 #else 2491 return; 2492 #endif 2493 } 2494 tmr = &net->rxt_timer; 2495 if (net->RTO == 0) { 2496 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2497 } else { 2498 to_ticks = sctp_msecs_to_ticks(net->RTO); 2499 } 2500 break; 2501 case SCTP_TIMER_TYPE_ASCONF: 2502 /* 2503 * Here the timer comes from the stcb but its value is from 2504 * the net's RTO. 2505 */ 2506 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2507 #ifdef INVARIANTS 2508 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2509 t_type, inp, stcb, net); 2510 #else 2511 return; 2512 #endif 2513 } 2514 tmr = &stcb->asoc.asconf_timer; 2515 if (net->RTO == 0) { 2516 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2517 } else { 2518 to_ticks = sctp_msecs_to_ticks(net->RTO); 2519 } 2520 break; 2521 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2522 /* 2523 * Here we use the endpoints shutdown guard timer usually 2524 * about 3 minutes. 2525 */ 2526 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2527 #ifdef INVARIANTS 2528 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2529 t_type, inp, stcb, net); 2530 #else 2531 return; 2532 #endif 2533 } 2534 tmr = &stcb->asoc.shut_guard_timer; 2535 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2536 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2537 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2538 } else { 2539 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2540 } 2541 } else { 2542 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2543 } 2544 break; 2545 case SCTP_TIMER_TYPE_AUTOCLOSE: 2546 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2547 #ifdef INVARIANTS 2548 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2549 t_type, inp, stcb, net); 2550 #else 2551 return; 2552 #endif 2553 } 2554 tmr = &stcb->asoc.autoclose_timer; 2555 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2556 break; 2557 case SCTP_TIMER_TYPE_STRRESET: 2558 /* 2559 * Here the timer comes from the stcb but its value is from 2560 * the net's RTO. 2561 */ 2562 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2563 #ifdef INVARIANTS 2564 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2565 t_type, inp, stcb, net); 2566 #else 2567 return; 2568 #endif 2569 } 2570 tmr = &stcb->asoc.strreset_timer; 2571 if (net->RTO == 0) { 2572 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2573 } else { 2574 to_ticks = sctp_msecs_to_ticks(net->RTO); 2575 } 2576 break; 2577 case SCTP_TIMER_TYPE_INPKILL: 2578 /* 2579 * The inp is setup to die. We re-use the signature_change 2580 * timer since that has stopped and we are in the GONE 2581 * state. 2582 */ 2583 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2584 #ifdef INVARIANTS 2585 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2586 t_type, inp, stcb, net); 2587 #else 2588 return; 2589 #endif 2590 } 2591 tmr = &inp->sctp_ep.signature_change; 2592 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2593 break; 2594 case SCTP_TIMER_TYPE_ASOCKILL: 2595 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2596 #ifdef INVARIANTS 2597 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2598 t_type, inp, stcb, net); 2599 #else 2600 return; 2601 #endif 2602 } 2603 tmr = &stcb->asoc.strreset_timer; 2604 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2605 break; 2606 case SCTP_TIMER_TYPE_ADDR_WQ: 2607 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2608 #ifdef INVARIANTS 2609 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2610 t_type, inp, stcb, net); 2611 #else 2612 return; 2613 #endif 2614 } 2615 /* Only 1 tick away :-) */ 2616 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2617 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2618 break; 2619 case SCTP_TIMER_TYPE_PRIM_DELETED: 2620 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2621 #ifdef INVARIANTS 2622 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2623 t_type, inp, stcb, net); 2624 #else 2625 return; 2626 #endif 2627 } 2628 tmr = &stcb->asoc.delete_prim_timer; 2629 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2630 break; 2631 default: 2632 #ifdef INVARIANTS 2633 panic("Unknown timer type %d", t_type); 2634 #else 2635 return; 2636 #endif 2637 } 2638 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2639 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2640 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2641 /* 2642 * We do NOT allow you to have it already running. If it is, 2643 * we leave the current one up unchanged. 2644 */ 2645 SCTPDBG(SCTP_DEBUG_TIMER2, 2646 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2647 t_type, inp, stcb, net); 2648 return; 2649 } 2650 /* At this point we can proceed. */ 2651 if (t_type == SCTP_TIMER_TYPE_SEND) { 2652 stcb->asoc.num_send_timers_up++; 2653 } 2654 tmr->stopped_from = 0; 2655 tmr->type = t_type; 2656 tmr->ep = (void *)inp; 2657 tmr->tcb = (void *)stcb; 2658 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2659 tmr->net = NULL; 2660 } else { 2661 tmr->net = (void *)net; 2662 } 2663 tmr->self = (void *)tmr; 2664 #if defined(__FreeBSD__) && !defined(__Userspace__) 2665 tmr->vnet = (void *)curvnet; 2666 #endif 2667 tmr->ticks = sctp_get_tick_count(); 2668 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2669 SCTPDBG(SCTP_DEBUG_TIMER2, 2670 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2671 t_type, to_ticks, inp, stcb, net); 2672 /* 2673 * If this is a newly scheduled callout, as opposed to a 2674 * rescheduled one, increment relevant reference counts. 2675 */ 2676 if (tmr->ep != NULL) { 2677 SCTP_INP_INCR_REF(inp); 2678 } 2679 if (tmr->tcb != NULL) { 2680 atomic_add_int(&stcb->asoc.refcnt, 1); 2681 } 2682 if (tmr->net != NULL) { 2683 atomic_add_int(&net->ref_count, 1); 2684 } 2685 } else { 2686 /* 2687 * This should not happen, since we checked for pending 2688 * above. 2689 */ 2690 SCTPDBG(SCTP_DEBUG_TIMER2, 2691 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2692 t_type, to_ticks, inp, stcb, net); 2693 } 2694 return; 2695 } 2696 2697 /*- 2698 * The following table shows which parameters must be provided 2699 * when calling sctp_timer_stop(). For parameters not being 2700 * provided, NULL must be used. 2701 * 2702 * |Name |inp |stcb|net | 2703 * |-----------------------------|----|----|----| 2704 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2705 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2706 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2707 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2708 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2709 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2710 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2711 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2712 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2713 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2714 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2715 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2716 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2717 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2718 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2719 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2720 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2721 * 2722 */ 2723 2724 void 2725 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2726 struct sctp_nets *net, uint32_t from) 2727 { 2728 struct sctp_timer *tmr; 2729 2730 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2731 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2732 t_type, stcb, stcb->sctp_ep)); 2733 if (stcb != NULL) { 2734 SCTP_TCB_LOCK_ASSERT(stcb); 2735 } else if (inp != NULL) { 2736 SCTP_INP_WLOCK_ASSERT(inp); 2737 } else { 2738 SCTP_WQ_ADDR_LOCK_ASSERT(); 2739 } 2740 tmr = NULL; 2741 switch (t_type) { 2742 case SCTP_TIMER_TYPE_SEND: 2743 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2744 #ifdef INVARIANTS 2745 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2746 t_type, inp, stcb, net); 2747 #else 2748 return; 2749 #endif 2750 } 2751 tmr = &net->rxt_timer; 2752 break; 2753 case SCTP_TIMER_TYPE_INIT: 2754 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2755 #ifdef INVARIANTS 2756 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2757 t_type, inp, stcb, net); 2758 #else 2759 return; 2760 #endif 2761 } 2762 tmr = &net->rxt_timer; 2763 break; 2764 case SCTP_TIMER_TYPE_RECV: 2765 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2766 #ifdef INVARIANTS 2767 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2768 t_type, inp, stcb, net); 2769 #else 2770 return; 2771 #endif 2772 } 2773 tmr = &stcb->asoc.dack_timer; 2774 break; 2775 case SCTP_TIMER_TYPE_SHUTDOWN: 2776 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2777 #ifdef INVARIANTS 2778 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2779 t_type, inp, stcb, net); 2780 #else 2781 return; 2782 #endif 2783 } 2784 tmr = &net->rxt_timer; 2785 break; 2786 case SCTP_TIMER_TYPE_HEARTBEAT: 2787 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2788 #ifdef INVARIANTS 2789 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2790 t_type, inp, stcb, net); 2791 #else 2792 return; 2793 #endif 2794 } 2795 tmr = &net->hb_timer; 2796 break; 2797 case SCTP_TIMER_TYPE_COOKIE: 2798 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2799 #ifdef INVARIANTS 2800 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2801 t_type, inp, stcb, net); 2802 #else 2803 return; 2804 #endif 2805 } 2806 tmr = &net->rxt_timer; 2807 break; 2808 case SCTP_TIMER_TYPE_NEWCOOKIE: 2809 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2810 #ifdef INVARIANTS 2811 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2812 t_type, inp, stcb, net); 2813 #else 2814 return; 2815 #endif 2816 } 2817 tmr = &inp->sctp_ep.signature_change; 2818 break; 2819 case SCTP_TIMER_TYPE_PATHMTURAISE: 2820 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2821 #ifdef INVARIANTS 2822 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2823 t_type, inp, stcb, net); 2824 #else 2825 return; 2826 #endif 2827 } 2828 tmr = &net->pmtu_timer; 2829 break; 2830 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2831 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2832 #ifdef INVARIANTS 2833 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2834 t_type, inp, stcb, net); 2835 #else 2836 return; 2837 #endif 2838 } 2839 tmr = &net->rxt_timer; 2840 break; 2841 case SCTP_TIMER_TYPE_ASCONF: 2842 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2843 #ifdef INVARIANTS 2844 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2845 t_type, inp, stcb, net); 2846 #else 2847 return; 2848 #endif 2849 } 2850 tmr = &stcb->asoc.asconf_timer; 2851 break; 2852 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2853 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2854 #ifdef INVARIANTS 2855 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2856 t_type, inp, stcb, net); 2857 #else 2858 return; 2859 #endif 2860 } 2861 tmr = &stcb->asoc.shut_guard_timer; 2862 break; 2863 case SCTP_TIMER_TYPE_AUTOCLOSE: 2864 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2865 #ifdef INVARIANTS 2866 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2867 t_type, inp, stcb, net); 2868 #else 2869 return; 2870 #endif 2871 } 2872 tmr = &stcb->asoc.autoclose_timer; 2873 break; 2874 case SCTP_TIMER_TYPE_STRRESET: 2875 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2876 #ifdef INVARIANTS 2877 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2878 t_type, inp, stcb, net); 2879 #else 2880 return; 2881 #endif 2882 } 2883 tmr = &stcb->asoc.strreset_timer; 2884 break; 2885 case SCTP_TIMER_TYPE_INPKILL: 2886 /* 2887 * The inp is setup to die. We re-use the signature_change 2888 * timer since that has stopped and we are in the GONE 2889 * state. 2890 */ 2891 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2892 #ifdef INVARIANTS 2893 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2894 t_type, inp, stcb, net); 2895 #else 2896 return; 2897 #endif 2898 } 2899 tmr = &inp->sctp_ep.signature_change; 2900 break; 2901 case SCTP_TIMER_TYPE_ASOCKILL: 2902 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2903 #ifdef INVARIANTS 2904 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2905 t_type, inp, stcb, net); 2906 #else 2907 return; 2908 #endif 2909 } 2910 tmr = &stcb->asoc.strreset_timer; 2911 break; 2912 case SCTP_TIMER_TYPE_ADDR_WQ: 2913 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2914 #ifdef INVARIANTS 2915 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2916 t_type, inp, stcb, net); 2917 #else 2918 return; 2919 #endif 2920 } 2921 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2922 break; 2923 case SCTP_TIMER_TYPE_PRIM_DELETED: 2924 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2925 #ifdef INVARIANTS 2926 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2927 t_type, inp, stcb, net); 2928 #else 2929 return; 2930 #endif 2931 } 2932 tmr = &stcb->asoc.delete_prim_timer; 2933 break; 2934 default: 2935 #ifdef INVARIANTS 2936 panic("Unknown timer type %d", t_type); 2937 #else 2938 return; 2939 #endif 2940 } 2941 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2942 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2943 (tmr->type != t_type)) { 2944 /* 2945 * Ok we have a timer that is under joint use. Cookie timer 2946 * per chance with the SEND timer. We therefore are NOT 2947 * running the timer that the caller wants stopped. So just 2948 * return. 2949 */ 2950 SCTPDBG(SCTP_DEBUG_TIMER2, 2951 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2952 t_type, inp, stcb, net); 2953 return; 2954 } 2955 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2956 stcb->asoc.num_send_timers_up--; 2957 if (stcb->asoc.num_send_timers_up < 0) { 2958 stcb->asoc.num_send_timers_up = 0; 2959 } 2960 } 2961 tmr->self = NULL; 2962 tmr->stopped_from = from; 2963 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2964 KASSERT(tmr->ep == inp, 2965 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2966 t_type, inp, tmr->ep)); 2967 KASSERT(tmr->tcb == stcb, 2968 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2969 t_type, stcb, tmr->tcb)); 2970 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2971 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2972 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2973 t_type, net, tmr->net)); 2974 SCTPDBG(SCTP_DEBUG_TIMER2, 2975 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2976 t_type, inp, stcb, net); 2977 /* 2978 * If the timer was actually stopped, decrement reference counts 2979 * that were incremented in sctp_timer_start(). 2980 */ 2981 if (tmr->ep != NULL) { 2982 tmr->ep = NULL; 2983 SCTP_INP_DECR_REF(inp); 2984 } 2985 if (tmr->tcb != NULL) { 2986 tmr->tcb = NULL; 2987 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2988 } 2989 if (tmr->net != NULL) { 2990 struct sctp_nets *tmr_net; 2991 2992 /* 2993 * Can't use net, since it doesn't work for 2994 * SCTP_TIMER_TYPE_ASCONF. 2995 */ 2996 tmr_net = tmr->net; 2997 tmr->net = NULL; 2998 sctp_free_remote_addr(tmr_net); 2999 } 3000 } else { 3001 SCTPDBG(SCTP_DEBUG_TIMER2, 3002 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 3003 t_type, inp, stcb, net); 3004 } 3005 return; 3006 } 3007 3008 uint32_t 3009 sctp_calculate_len(struct mbuf *m) 3010 { 3011 struct mbuf *at; 3012 uint32_t tlen; 3013 3014 tlen = 0; 3015 for (at = m; at != NULL; at = SCTP_BUF_NEXT(at)) { 3016 tlen += SCTP_BUF_LEN(at); 3017 } 3018 return (tlen); 3019 } 3020 3021 /* 3022 * Given an association and starting time of the current RTT period, update 3023 * RTO in number of msecs. net should point to the current network. 3024 * Return 1, if an RTO update was performed, return 0 if no update was 3025 * performed due to invalid starting point. 3026 */ 3027 3028 int 3029 sctp_calculate_rto(struct sctp_tcb *stcb, 3030 struct sctp_association *asoc, 3031 struct sctp_nets *net, 3032 struct timeval *old, 3033 int rtt_from_sack) 3034 { 3035 struct timeval now; 3036 uint64_t rtt_us; /* RTT in us */ 3037 int32_t rtt; /* RTT in ms */ 3038 uint32_t new_rto; 3039 int first_measure = 0; 3040 3041 /************************/ 3042 /* 1. calculate new RTT */ 3043 /************************/ 3044 /* get the current time */ 3045 if (stcb->asoc.use_precise_time) { 3046 (void)SCTP_GETPTIME_TIMEVAL(&now); 3047 } else { 3048 (void)SCTP_GETTIME_TIMEVAL(&now); 3049 } 3050 if ((old->tv_sec > now.tv_sec) || 3051 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 3052 /* The starting point is in the future. */ 3053 return (0); 3054 } 3055 timevalsub(&now, old); 3056 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 3057 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 3058 /* The RTT is larger than a sane value. */ 3059 return (0); 3060 } 3061 /* store the current RTT in us */ 3062 net->rtt = rtt_us; 3063 /* compute rtt in ms */ 3064 rtt = (int32_t)(net->rtt / 1000); 3065 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 3066 /* Tell the CC module that a new update has just occurred from a sack */ 3067 (*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now); 3068 } 3069 /* Do we need to determine the lan? We do this only 3070 * on sacks i.e. RTT being determined from data not 3071 * non-data (HB/INIT->INITACK). 3072 */ 3073 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 3074 (net->lan_type == SCTP_LAN_UNKNOWN)) { 3075 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 3076 net->lan_type = SCTP_LAN_INTERNET; 3077 } else { 3078 net->lan_type = SCTP_LAN_LOCAL; 3079 } 3080 } 3081 3082 /***************************/ 3083 /* 2. update RTTVAR & SRTT */ 3084 /***************************/ 3085 /*- 3086 * Compute the scaled average lastsa and the 3087 * scaled variance lastsv as described in van Jacobson 3088 * Paper "Congestion Avoidance and Control", Annex A. 3089 * 3090 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 3091 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 3092 */ 3093 if (net->RTO_measured) { 3094 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 3095 net->lastsa += rtt; 3096 if (rtt < 0) { 3097 rtt = -rtt; 3098 } 3099 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 3100 net->lastsv += rtt; 3101 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3102 rto_logging(net, SCTP_LOG_RTTVAR); 3103 } 3104 } else { 3105 /* First RTO measurement */ 3106 net->RTO_measured = 1; 3107 first_measure = 1; 3108 net->lastsa = rtt << SCTP_RTT_SHIFT; 3109 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3110 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3111 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3112 } 3113 } 3114 if (net->lastsv == 0) { 3115 net->lastsv = SCTP_CLOCK_GRANULARITY; 3116 } 3117 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3118 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3119 (stcb->asoc.sat_network_lockout == 0)) { 3120 stcb->asoc.sat_network = 1; 3121 } else if ((!first_measure) && stcb->asoc.sat_network) { 3122 stcb->asoc.sat_network = 0; 3123 stcb->asoc.sat_network_lockout = 1; 3124 } 3125 /* bound it, per C6/C7 in Section 5.3.1 */ 3126 if (new_rto < stcb->asoc.minrto) { 3127 new_rto = stcb->asoc.minrto; 3128 } 3129 if (new_rto > stcb->asoc.maxrto) { 3130 new_rto = stcb->asoc.maxrto; 3131 } 3132 net->RTO = new_rto; 3133 return (1); 3134 } 3135 3136 /* 3137 * return a pointer to a contiguous piece of data from the given mbuf chain 3138 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3139 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3140 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3141 */ 3142 caddr_t 3143 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 3144 { 3145 uint32_t count; 3146 uint8_t *ptr; 3147 3148 ptr = in_ptr; 3149 if ((off < 0) || (len <= 0)) 3150 return (NULL); 3151 3152 /* find the desired start location */ 3153 while ((m != NULL) && (off > 0)) { 3154 if (off < SCTP_BUF_LEN(m)) 3155 break; 3156 off -= SCTP_BUF_LEN(m); 3157 m = SCTP_BUF_NEXT(m); 3158 } 3159 if (m == NULL) 3160 return (NULL); 3161 3162 /* is the current mbuf large enough (eg. contiguous)? */ 3163 if ((SCTP_BUF_LEN(m) - off) >= len) { 3164 return (mtod(m, caddr_t) + off); 3165 } else { 3166 /* else, it spans more than one mbuf, so save a temp copy... */ 3167 while ((m != NULL) && (len > 0)) { 3168 count = min(SCTP_BUF_LEN(m) - off, len); 3169 memcpy(ptr, mtod(m, caddr_t) + off, count); 3170 len -= count; 3171 ptr += count; 3172 off = 0; 3173 m = SCTP_BUF_NEXT(m); 3174 } 3175 if ((m == NULL) && (len > 0)) 3176 return (NULL); 3177 else 3178 return ((caddr_t)in_ptr); 3179 } 3180 } 3181 3182 struct sctp_paramhdr * 3183 sctp_get_next_param(struct mbuf *m, 3184 int offset, 3185 struct sctp_paramhdr *pull, 3186 int pull_limit) 3187 { 3188 /* This just provides a typed signature to Peter's Pull routine */ 3189 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3190 (uint8_t *) pull)); 3191 } 3192 3193 struct mbuf * 3194 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3195 { 3196 struct mbuf *m_last; 3197 caddr_t dp; 3198 3199 if (padlen > 3) { 3200 return (NULL); 3201 } 3202 if (padlen <= M_TRAILINGSPACE(m)) { 3203 /* 3204 * The easy way. We hope the majority of the time we hit 3205 * here :) 3206 */ 3207 m_last = m; 3208 } else { 3209 /* Hard way we must grow the mbuf chain */ 3210 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3211 if (m_last == NULL) { 3212 return (NULL); 3213 } 3214 SCTP_BUF_LEN(m_last) = 0; 3215 SCTP_BUF_NEXT(m_last) = NULL; 3216 SCTP_BUF_NEXT(m) = m_last; 3217 } 3218 dp = mtod(m_last, caddr_t) + SCTP_BUF_LEN(m_last); 3219 SCTP_BUF_LEN(m_last) += padlen; 3220 memset(dp, 0, padlen); 3221 return (m_last); 3222 } 3223 3224 struct mbuf * 3225 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3226 { 3227 /* find the last mbuf in chain and pad it */ 3228 struct mbuf *m_at; 3229 3230 if (last_mbuf != NULL) { 3231 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3232 } else { 3233 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3234 if (SCTP_BUF_NEXT(m_at) == NULL) { 3235 return (sctp_add_pad_tombuf(m_at, padval)); 3236 } 3237 } 3238 } 3239 return (NULL); 3240 } 3241 3242 static void 3243 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3244 uint16_t error, struct sctp_abort_chunk *abort, 3245 bool from_peer, bool timedout, int so_locked) 3246 { 3247 struct mbuf *m_notify; 3248 struct sctp_assoc_change *sac; 3249 struct sctp_queued_to_read *control; 3250 struct sctp_inpcb *inp; 3251 unsigned int notif_len; 3252 unsigned int i; 3253 uint16_t abort_len; 3254 #if defined(__APPLE__) && !defined(__Userspace__) 3255 struct socket *so; 3256 #endif 3257 3258 KASSERT(abort == NULL || from_peer, 3259 ("sctp_notify_assoc_change: ABORT chunk provided for local termination")); 3260 KASSERT(!from_peer || !timedout, 3261 ("sctp_notify_assoc_change: timeouts can only be local")); 3262 KASSERT(stcb != NULL, ("stcb == NULL")); 3263 SCTP_TCB_LOCK_ASSERT(stcb); 3264 inp = stcb->sctp_ep; 3265 SCTP_INP_READ_LOCK_ASSERT(inp); 3266 3267 if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3268 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3269 if (abort != NULL) { 3270 abort_len = ntohs(abort->ch.chunk_length); 3271 /* 3272 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3273 * contiguous. 3274 */ 3275 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3276 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3277 } 3278 } else { 3279 abort_len = 0; 3280 } 3281 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3282 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3283 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3284 notif_len += abort_len; 3285 } 3286 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3287 if (m_notify == NULL) { 3288 /* Retry with smaller value. */ 3289 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3290 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3291 if (m_notify == NULL) { 3292 goto set_error; 3293 } 3294 } 3295 SCTP_BUF_NEXT(m_notify) = NULL; 3296 sac = mtod(m_notify, struct sctp_assoc_change *); 3297 memset(sac, 0, notif_len); 3298 sac->sac_type = SCTP_ASSOC_CHANGE; 3299 sac->sac_flags = 0; 3300 sac->sac_length = sizeof(struct sctp_assoc_change); 3301 sac->sac_state = state; 3302 sac->sac_error = error; 3303 if (state == SCTP_CANT_STR_ASSOC) { 3304 sac->sac_outbound_streams = 0; 3305 sac->sac_inbound_streams = 0; 3306 } else { 3307 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3308 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3309 } 3310 sac->sac_assoc_id = sctp_get_associd(stcb); 3311 if (notif_len > sizeof(struct sctp_assoc_change)) { 3312 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3313 i = 0; 3314 if (stcb->asoc.prsctp_supported == 1) { 3315 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3316 } 3317 if (stcb->asoc.auth_supported == 1) { 3318 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3319 } 3320 if (stcb->asoc.asconf_supported == 1) { 3321 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3322 } 3323 if (stcb->asoc.idata_supported == 1) { 3324 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3325 } 3326 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3327 if (stcb->asoc.reconfig_supported == 1) { 3328 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3329 } 3330 sac->sac_length += i; 3331 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3332 memcpy(sac->sac_info, abort, abort_len); 3333 sac->sac_length += abort_len; 3334 } 3335 } 3336 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3337 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3338 0, 0, stcb->asoc.context, 0, 0, 0, 3339 m_notify); 3340 if (control != NULL) { 3341 control->length = SCTP_BUF_LEN(m_notify); 3342 control->spec_flags = M_NOTIFICATION; 3343 /* not that we need this */ 3344 control->tail_mbuf = m_notify; 3345 sctp_add_to_readq(inp, stcb, control, 3346 &stcb->sctp_socket->so_rcv, 1, 3347 SCTP_READ_LOCK_HELD, so_locked); 3348 } else { 3349 sctp_m_freem(m_notify); 3350 } 3351 } 3352 /* 3353 * For 1-to-1 style sockets, we send up and error when an ABORT 3354 * comes in. 3355 */ 3356 set_error: 3357 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3358 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3359 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3360 SOCK_LOCK(stcb->sctp_socket); 3361 if (from_peer) { 3362 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3363 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3364 stcb->sctp_socket->so_error = ECONNREFUSED; 3365 } else { 3366 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3367 stcb->sctp_socket->so_error = ECONNRESET; 3368 } 3369 } else { 3370 if (timedout) { 3371 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3372 stcb->sctp_socket->so_error = ETIMEDOUT; 3373 } else { 3374 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3375 stcb->sctp_socket->so_error = ECONNABORTED; 3376 } 3377 } 3378 SOCK_UNLOCK(stcb->sctp_socket); 3379 } 3380 /* Wake ANY sleepers */ 3381 #if defined(__APPLE__) && !defined(__Userspace__) 3382 so = SCTP_INP_SO(inp); 3383 if (!so_locked) { 3384 atomic_add_int(&stcb->asoc.refcnt, 1); 3385 SCTP_TCB_UNLOCK(stcb); 3386 SCTP_SOCKET_LOCK(so, 1); 3387 SCTP_TCB_LOCK(stcb); 3388 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3389 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3390 SCTP_SOCKET_UNLOCK(so, 1); 3391 return; 3392 } 3393 } 3394 #endif 3395 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3396 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3397 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3398 socantrcvmore(stcb->sctp_socket); 3399 } 3400 sorwakeup(stcb->sctp_socket); 3401 sowwakeup(stcb->sctp_socket); 3402 #if defined(__APPLE__) && !defined(__Userspace__) 3403 if (!so_locked) { 3404 SCTP_SOCKET_UNLOCK(so, 1); 3405 } 3406 #endif 3407 } 3408 3409 static void 3410 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3411 struct sockaddr *sa, uint32_t error, int so_locked) 3412 { 3413 struct mbuf *m_notify; 3414 struct sctp_paddr_change *spc; 3415 struct sctp_queued_to_read *control; 3416 3417 KASSERT(stcb != NULL, ("stcb == NULL")); 3418 SCTP_TCB_LOCK_ASSERT(stcb); 3419 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3420 3421 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3422 /* event not enabled */ 3423 return; 3424 } 3425 3426 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3427 if (m_notify == NULL) 3428 return; 3429 SCTP_BUF_LEN(m_notify) = 0; 3430 spc = mtod(m_notify, struct sctp_paddr_change *); 3431 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3432 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3433 spc->spc_flags = 0; 3434 spc->spc_length = sizeof(struct sctp_paddr_change); 3435 switch (sa->sa_family) { 3436 #ifdef INET 3437 case AF_INET: 3438 #ifdef INET6 3439 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3440 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3441 (struct sockaddr_in6 *)&spc->spc_aaddr); 3442 } else { 3443 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3444 } 3445 #else 3446 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3447 #endif 3448 break; 3449 #endif 3450 #ifdef INET6 3451 case AF_INET6: 3452 { 3453 #ifdef SCTP_EMBEDDED_V6_SCOPE 3454 struct sockaddr_in6 *sin6; 3455 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 3456 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3457 3458 #ifdef SCTP_EMBEDDED_V6_SCOPE 3459 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3460 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3461 if (sin6->sin6_scope_id == 0) { 3462 /* recover scope_id for user */ 3463 #ifdef SCTP_KAME 3464 (void)sa6_recoverscope(sin6); 3465 #else 3466 (void)in6_recoverscope(sin6, &sin6->sin6_addr, 3467 NULL); 3468 #endif 3469 } else { 3470 /* clear embedded scope_id for user */ 3471 in6_clearscope(&sin6->sin6_addr); 3472 } 3473 } 3474 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 3475 break; 3476 } 3477 #endif 3478 #if defined(__Userspace__) 3479 case AF_CONN: 3480 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_conn)); 3481 break; 3482 #endif 3483 default: 3484 /* TSNH */ 3485 break; 3486 } 3487 spc->spc_state = state; 3488 spc->spc_error = error; 3489 spc->spc_assoc_id = sctp_get_associd(stcb); 3490 3491 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3492 SCTP_BUF_NEXT(m_notify) = NULL; 3493 3494 /* append to socket */ 3495 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3496 0, 0, stcb->asoc.context, 0, 0, 0, 3497 m_notify); 3498 if (control == NULL) { 3499 /* no memory */ 3500 sctp_m_freem(m_notify); 3501 return; 3502 } 3503 control->length = SCTP_BUF_LEN(m_notify); 3504 control->spec_flags = M_NOTIFICATION; 3505 /* not that we need this */ 3506 control->tail_mbuf = m_notify; 3507 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3508 &stcb->sctp_socket->so_rcv, 1, 3509 SCTP_READ_LOCK_HELD, so_locked); 3510 } 3511 3512 static void 3513 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3514 struct sctp_tmit_chunk *chk, int so_locked) 3515 { 3516 struct mbuf *m_notify; 3517 struct sctp_send_failed *ssf; 3518 struct sctp_send_failed_event *ssfe; 3519 struct sctp_queued_to_read *control; 3520 struct sctp_chunkhdr *chkhdr; 3521 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3522 3523 KASSERT(stcb != NULL, ("stcb == NULL")); 3524 SCTP_TCB_LOCK_ASSERT(stcb); 3525 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3526 3527 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3528 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3529 /* event not enabled */ 3530 return; 3531 } 3532 3533 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3534 notifhdr_len = sizeof(struct sctp_send_failed_event); 3535 } else { 3536 notifhdr_len = sizeof(struct sctp_send_failed); 3537 } 3538 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3539 if (m_notify == NULL) 3540 /* no space left */ 3541 return; 3542 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3543 if (stcb->asoc.idata_supported) { 3544 chkhdr_len = sizeof(struct sctp_idata_chunk); 3545 } else { 3546 chkhdr_len = sizeof(struct sctp_data_chunk); 3547 } 3548 /* Use some defaults in case we can't access the chunk header */ 3549 if (chk->send_size >= chkhdr_len) { 3550 payload_len = chk->send_size - chkhdr_len; 3551 } else { 3552 payload_len = 0; 3553 } 3554 padding_len = 0; 3555 if (chk->data != NULL) { 3556 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3557 if (chkhdr != NULL) { 3558 chk_len = ntohs(chkhdr->chunk_length); 3559 if ((chk_len >= chkhdr_len) && 3560 (chk->send_size >= chk_len) && 3561 (chk->send_size - chk_len < 4)) { 3562 padding_len = chk->send_size - chk_len; 3563 payload_len = chk->send_size - chkhdr_len - padding_len; 3564 } 3565 } 3566 } 3567 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3568 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3569 memset(ssfe, 0, notifhdr_len); 3570 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3571 if (sent) { 3572 ssfe->ssfe_flags = SCTP_DATA_SENT; 3573 } else { 3574 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3575 } 3576 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3577 ssfe->ssfe_error = error; 3578 /* not exactly what the user sent in, but should be close :) */ 3579 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3580 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3581 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3582 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3583 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3584 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3585 } else { 3586 ssf = mtod(m_notify, struct sctp_send_failed *); 3587 memset(ssf, 0, notifhdr_len); 3588 ssf->ssf_type = SCTP_SEND_FAILED; 3589 if (sent) { 3590 ssf->ssf_flags = SCTP_DATA_SENT; 3591 } else { 3592 ssf->ssf_flags = SCTP_DATA_UNSENT; 3593 } 3594 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3595 ssf->ssf_error = error; 3596 /* not exactly what the user sent in, but should be close :) */ 3597 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3598 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3599 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3600 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3601 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3602 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3603 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3604 } 3605 if (chk->data != NULL) { 3606 /* Trim off the sctp chunk header (it should be there) */ 3607 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3608 m_adj(chk->data, chkhdr_len); 3609 m_adj(chk->data, -padding_len); 3610 sctp_mbuf_crush(chk->data); 3611 chk->send_size -= (chkhdr_len + padding_len); 3612 } 3613 } 3614 SCTP_BUF_NEXT(m_notify) = chk->data; 3615 /* Steal off the mbuf */ 3616 chk->data = NULL; 3617 /* 3618 * For this case, we check the actual socket buffer, since the assoc 3619 * is going away we don't want to overfill the socket buffer for a 3620 * non-reader 3621 */ 3622 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3623 sctp_m_freem(m_notify); 3624 return; 3625 } 3626 /* append to socket */ 3627 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3628 0, 0, stcb->asoc.context, 0, 0, 0, 3629 m_notify); 3630 if (control == NULL) { 3631 /* no memory */ 3632 sctp_m_freem(m_notify); 3633 return; 3634 } 3635 control->length = SCTP_BUF_LEN(m_notify); 3636 control->spec_flags = M_NOTIFICATION; 3637 /* not that we need this */ 3638 control->tail_mbuf = m_notify; 3639 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3640 &stcb->sctp_socket->so_rcv, 1, 3641 SCTP_READ_LOCK_HELD, so_locked); 3642 } 3643 3644 static void 3645 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3646 struct sctp_stream_queue_pending *sp, int so_locked) 3647 { 3648 struct mbuf *m_notify; 3649 struct sctp_send_failed *ssf; 3650 struct sctp_send_failed_event *ssfe; 3651 struct sctp_queued_to_read *control; 3652 int notifhdr_len; 3653 3654 KASSERT(stcb != NULL, ("stcb == NULL")); 3655 SCTP_TCB_LOCK_ASSERT(stcb); 3656 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3657 3658 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3659 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3660 /* event not enabled */ 3661 return; 3662 } 3663 3664 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3665 notifhdr_len = sizeof(struct sctp_send_failed_event); 3666 } else { 3667 notifhdr_len = sizeof(struct sctp_send_failed); 3668 } 3669 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3670 if (m_notify == NULL) { 3671 /* no space left */ 3672 return; 3673 } 3674 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3675 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3676 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3677 memset(ssfe, 0, notifhdr_len); 3678 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3679 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3680 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3681 ssfe->ssfe_error = error; 3682 /* not exactly what the user sent in, but should be close :) */ 3683 ssfe->ssfe_info.snd_sid = sp->sid; 3684 if (sp->some_taken) { 3685 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3686 } else { 3687 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3688 } 3689 ssfe->ssfe_info.snd_ppid = sp->ppid; 3690 ssfe->ssfe_info.snd_context = sp->context; 3691 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3692 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3693 } else { 3694 ssf = mtod(m_notify, struct sctp_send_failed *); 3695 memset(ssf, 0, notifhdr_len); 3696 ssf->ssf_type = SCTP_SEND_FAILED; 3697 ssf->ssf_flags = SCTP_DATA_UNSENT; 3698 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3699 ssf->ssf_error = error; 3700 /* not exactly what the user sent in, but should be close :) */ 3701 ssf->ssf_info.sinfo_stream = sp->sid; 3702 ssf->ssf_info.sinfo_ssn = 0; 3703 if (sp->some_taken) { 3704 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3705 } else { 3706 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3707 } 3708 ssf->ssf_info.sinfo_ppid = sp->ppid; 3709 ssf->ssf_info.sinfo_context = sp->context; 3710 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3711 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3712 } 3713 SCTP_BUF_NEXT(m_notify) = sp->data; 3714 3715 /* Steal off the mbuf */ 3716 sp->data = NULL; 3717 /* 3718 * For this case, we check the actual socket buffer, since the assoc 3719 * is going away we don't want to overfill the socket buffer for a 3720 * non-reader 3721 */ 3722 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3723 sctp_m_freem(m_notify); 3724 return; 3725 } 3726 /* append to socket */ 3727 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3728 0, 0, stcb->asoc.context, 0, 0, 0, 3729 m_notify); 3730 if (control == NULL) { 3731 /* no memory */ 3732 sctp_m_freem(m_notify); 3733 return; 3734 } 3735 control->length = SCTP_BUF_LEN(m_notify); 3736 control->spec_flags = M_NOTIFICATION; 3737 /* not that we need this */ 3738 control->tail_mbuf = m_notify; 3739 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3740 &stcb->sctp_socket->so_rcv, 1, 3741 SCTP_READ_LOCK_HELD, so_locked); 3742 } 3743 3744 static void 3745 sctp_notify_adaptation_layer(struct sctp_tcb *stcb, int so_locked) 3746 { 3747 struct mbuf *m_notify; 3748 struct sctp_adaptation_event *sai; 3749 struct sctp_queued_to_read *control; 3750 3751 KASSERT(stcb != NULL, ("stcb == NULL")); 3752 SCTP_TCB_LOCK_ASSERT(stcb); 3753 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3754 3755 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3756 /* event not enabled */ 3757 return; 3758 } 3759 3760 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3761 if (m_notify == NULL) 3762 /* no space left */ 3763 return; 3764 SCTP_BUF_LEN(m_notify) = 0; 3765 sai = mtod(m_notify, struct sctp_adaptation_event *); 3766 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3767 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3768 sai->sai_flags = 0; 3769 sai->sai_length = sizeof(struct sctp_adaptation_event); 3770 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3771 sai->sai_assoc_id = sctp_get_associd(stcb); 3772 3773 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3774 SCTP_BUF_NEXT(m_notify) = NULL; 3775 3776 /* append to socket */ 3777 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3778 0, 0, stcb->asoc.context, 0, 0, 0, 3779 m_notify); 3780 if (control == NULL) { 3781 /* no memory */ 3782 sctp_m_freem(m_notify); 3783 return; 3784 } 3785 control->length = SCTP_BUF_LEN(m_notify); 3786 control->spec_flags = M_NOTIFICATION; 3787 /* not that we need this */ 3788 control->tail_mbuf = m_notify; 3789 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3790 &stcb->sctp_socket->so_rcv, 1, 3791 SCTP_READ_LOCK_HELD, so_locked); 3792 } 3793 3794 static void 3795 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3796 struct sctp_queued_to_read *aborted_control, 3797 int so_locked) 3798 { 3799 struct mbuf *m_notify; 3800 struct sctp_pdapi_event *pdapi; 3801 struct sctp_queued_to_read *control; 3802 struct sockbuf *sb; 3803 3804 KASSERT(aborted_control != NULL, ("aborted_control is NULL")); 3805 KASSERT(stcb != NULL, ("stcb == NULL")); 3806 SCTP_TCB_LOCK_ASSERT(stcb); 3807 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3808 3809 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3810 /* event not enabled */ 3811 return; 3812 } 3813 3814 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3815 if (m_notify == NULL) 3816 /* no space left */ 3817 return; 3818 SCTP_BUF_LEN(m_notify) = 0; 3819 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3820 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3821 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3822 pdapi->pdapi_flags = 0; 3823 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3824 pdapi->pdapi_indication = error; 3825 pdapi->pdapi_stream = aborted_control->sinfo_stream; 3826 pdapi->pdapi_seq = (uint16_t)aborted_control->mid; 3827 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3828 3829 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3830 SCTP_BUF_NEXT(m_notify) = NULL; 3831 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3832 0, 0, stcb->asoc.context, 0, 0, 0, 3833 m_notify); 3834 if (control == NULL) { 3835 /* no memory */ 3836 sctp_m_freem(m_notify); 3837 return; 3838 } 3839 control->length = SCTP_BUF_LEN(m_notify); 3840 control->spec_flags = M_NOTIFICATION; 3841 /* not that we need this */ 3842 control->tail_mbuf = m_notify; 3843 sb = &stcb->sctp_socket->so_rcv; 3844 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3845 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3846 } 3847 sctp_sballoc(stcb, sb, m_notify); 3848 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3849 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0); 3850 } 3851 control->end_added = 1; 3852 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, aborted_control, control, next); 3853 if (stcb->sctp_ep && stcb->sctp_socket) { 3854 /* This should always be the case */ 3855 #if defined(__APPLE__) && !defined(__Userspace__) 3856 struct socket *so; 3857 3858 so = SCTP_INP_SO(stcb->sctp_ep); 3859 if (!so_locked) { 3860 atomic_add_int(&stcb->asoc.refcnt, 1); 3861 SCTP_TCB_UNLOCK(stcb); 3862 SCTP_SOCKET_LOCK(so, 1); 3863 SCTP_TCB_LOCK(stcb); 3864 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3865 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3866 SCTP_SOCKET_UNLOCK(so, 1); 3867 return; 3868 } 3869 } 3870 #endif 3871 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3872 #if defined(__APPLE__) && !defined(__Userspace__) 3873 if (!so_locked) { 3874 SCTP_SOCKET_UNLOCK(so, 1); 3875 } 3876 #endif 3877 } 3878 } 3879 3880 static void 3881 sctp_notify_shutdown_event(struct sctp_tcb *stcb, int so_locked) 3882 { 3883 struct mbuf *m_notify; 3884 struct sctp_shutdown_event *sse; 3885 struct sctp_queued_to_read *control; 3886 3887 KASSERT(stcb != NULL, ("stcb == NULL")); 3888 SCTP_TCB_LOCK_ASSERT(stcb); 3889 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3890 3891 /* 3892 * For TCP model AND UDP connected sockets we will send an error up 3893 * when an SHUTDOWN completes 3894 */ 3895 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3896 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3897 /* mark socket closed for read/write and wakeup! */ 3898 #if defined(__APPLE__) && !defined(__Userspace__) 3899 struct socket *so; 3900 3901 so = SCTP_INP_SO(stcb->sctp_ep); 3902 atomic_add_int(&stcb->asoc.refcnt, 1); 3903 SCTP_TCB_UNLOCK(stcb); 3904 SCTP_SOCKET_LOCK(so, 1); 3905 SCTP_TCB_LOCK(stcb); 3906 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3907 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3908 SCTP_SOCKET_UNLOCK(so, 1); 3909 return; 3910 } 3911 #endif 3912 socantsendmore(stcb->sctp_socket); 3913 #if defined(__APPLE__) && !defined(__Userspace__) 3914 SCTP_SOCKET_UNLOCK(so, 1); 3915 #endif 3916 } 3917 3918 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3919 /* event not enabled */ 3920 return; 3921 } 3922 3923 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3924 if (m_notify == NULL) 3925 /* no space left */ 3926 return; 3927 sse = mtod(m_notify, struct sctp_shutdown_event *); 3928 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3929 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3930 sse->sse_flags = 0; 3931 sse->sse_length = sizeof(struct sctp_shutdown_event); 3932 sse->sse_assoc_id = sctp_get_associd(stcb); 3933 3934 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3935 SCTP_BUF_NEXT(m_notify) = NULL; 3936 3937 /* append to socket */ 3938 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3939 0, 0, stcb->asoc.context, 0, 0, 0, 3940 m_notify); 3941 if (control == NULL) { 3942 /* no memory */ 3943 sctp_m_freem(m_notify); 3944 return; 3945 } 3946 control->length = SCTP_BUF_LEN(m_notify); 3947 control->spec_flags = M_NOTIFICATION; 3948 /* not that we need this */ 3949 control->tail_mbuf = m_notify; 3950 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3951 &stcb->sctp_socket->so_rcv, 1, 3952 SCTP_READ_LOCK_HELD, so_locked); 3953 } 3954 3955 static void 3956 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, int so_locked) 3957 { 3958 struct mbuf *m_notify; 3959 struct sctp_sender_dry_event *event; 3960 struct sctp_queued_to_read *control; 3961 3962 KASSERT(stcb != NULL, ("stcb == NULL")); 3963 SCTP_TCB_LOCK_ASSERT(stcb); 3964 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3965 3966 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3967 /* event not enabled */ 3968 return; 3969 } 3970 3971 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3972 if (m_notify == NULL) { 3973 /* no space left */ 3974 return; 3975 } 3976 SCTP_BUF_LEN(m_notify) = 0; 3977 event = mtod(m_notify, struct sctp_sender_dry_event *); 3978 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3979 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3980 event->sender_dry_flags = 0; 3981 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3982 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3983 3984 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3985 SCTP_BUF_NEXT(m_notify) = NULL; 3986 3987 /* append to socket */ 3988 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3989 0, 0, stcb->asoc.context, 0, 0, 0, 3990 m_notify); 3991 if (control == NULL) { 3992 /* no memory */ 3993 sctp_m_freem(m_notify); 3994 return; 3995 } 3996 control->length = SCTP_BUF_LEN(m_notify); 3997 control->spec_flags = M_NOTIFICATION; 3998 /* not that we need this */ 3999 control->tail_mbuf = m_notify; 4000 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 4001 &stcb->sctp_socket->so_rcv, 1, 4002 SCTP_READ_LOCK_HELD, so_locked); 4003 } 4004 4005 static void 4006 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int flag, int so_locked) 4007 { 4008 struct mbuf *m_notify; 4009 struct sctp_queued_to_read *control; 4010 struct sctp_stream_change_event *stradd; 4011 4012 KASSERT(stcb != NULL, ("stcb == NULL")); 4013 SCTP_TCB_LOCK_ASSERT(stcb); 4014 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 4015 4016 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT)) { 4017 /* event not enabled */ 4018 return; 4019 } 4020 4021 if ((stcb->asoc.peer_req_out) && flag) { 4022 /* Peer made the request, don't tell the local user */ 4023 stcb->asoc.peer_req_out = 0; 4024 return; 4025 } 4026 stcb->asoc.peer_req_out = 0; 4027 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 4028 if (m_notify == NULL) 4029 /* no space left */ 4030 return; 4031 SCTP_BUF_LEN(m_notify) = 0; 4032 stradd = mtod(m_notify, struct sctp_stream_change_event *); 4033 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 4034 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 4035 stradd->strchange_flags = flag; 4036 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 4037 stradd->strchange_assoc_id = sctp_get_associd(stcb); 4038 stradd->strchange_instrms = stcb->asoc.streamincnt; 4039 stradd->strchange_outstrms = stcb->asoc.streamoutcnt; 4040 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 4041 SCTP_BUF_NEXT(m_notify) = NULL; 4042 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 4043 /* no space */ 4044 sctp_m_freem(m_notify); 4045 return; 4046 } 4047 /* append to socket */ 4048 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4049 0, 0, stcb->asoc.context, 0, 0, 0, 4050 m_notify); 4051 if (control == NULL) { 4052 /* no memory */ 4053 sctp_m_freem(m_notify); 4054 return; 4055 } 4056 control->length = SCTP_BUF_LEN(m_notify); 4057 control->spec_flags = M_NOTIFICATION; 4058 /* not that we need this */ 4059 control->tail_mbuf = m_notify; 4060 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 4061 &stcb->sctp_socket->so_rcv, 1, 4062 SCTP_READ_LOCK_HELD, so_locked); 4063 } 4064 4065 static void 4066 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, int flag, int so_locked) 4067 { 4068 struct mbuf *m_notify; 4069 struct sctp_queued_to_read *control; 4070 struct sctp_assoc_reset_event *strasoc; 4071 4072 KASSERT(stcb != NULL, ("stcb == NULL")); 4073 SCTP_TCB_LOCK_ASSERT(stcb); 4074 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 4075 4076 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT)) { 4077 /* event not enabled */ 4078 return; 4079 } 4080 4081 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 4082 if (m_notify == NULL) 4083 /* no space left */ 4084 return; 4085 SCTP_BUF_LEN(m_notify) = 0; 4086 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 4087 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 4088 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 4089 strasoc->assocreset_flags = flag; 4090 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 4091 strasoc->assocreset_assoc_id= sctp_get_associd(stcb); 4092 strasoc->assocreset_local_tsn = stcb->asoc.sending_seq; 4093 strasoc->assocreset_remote_tsn = stcb->asoc.mapping_array_base_tsn + 1; 4094 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 4095 SCTP_BUF_NEXT(m_notify) = NULL; 4096 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 4097 /* no space */ 4098 sctp_m_freem(m_notify); 4099 return; 4100 } 4101 /* append to socket */ 4102 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4103 0, 0, stcb->asoc.context, 0, 0, 0, 4104 m_notify); 4105 if (control == NULL) { 4106 /* no memory */ 4107 sctp_m_freem(m_notify); 4108 return; 4109 } 4110 control->length = SCTP_BUF_LEN(m_notify); 4111 control->spec_flags = M_NOTIFICATION; 4112 /* not that we need this */ 4113 control->tail_mbuf = m_notify; 4114 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 4115 &stcb->sctp_socket->so_rcv, 1, 4116 SCTP_READ_LOCK_HELD, so_locked); 4117 } 4118 4119 static void 4120 sctp_notify_stream_reset(struct sctp_tcb *stcb, 4121 int number_entries, uint16_t *list, int flag, int so_locked) 4122 { 4123 struct mbuf *m_notify; 4124 struct sctp_queued_to_read *control; 4125 struct sctp_stream_reset_event *strreset; 4126 int len; 4127 4128 KASSERT(stcb != NULL, ("stcb == NULL")); 4129 SCTP_TCB_LOCK_ASSERT(stcb); 4130 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 4131 4132 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) { 4133 /* event not enabled */ 4134 return; 4135 } 4136 4137 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 4138 if (m_notify == NULL) 4139 /* no space left */ 4140 return; 4141 SCTP_BUF_LEN(m_notify) = 0; 4142 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 4143 if (len > M_TRAILINGSPACE(m_notify)) { 4144 /* never enough room */ 4145 sctp_m_freem(m_notify); 4146 return; 4147 } 4148 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 4149 memset(strreset, 0, len); 4150 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 4151 strreset->strreset_flags = flag; 4152 strreset->strreset_length = len; 4153 strreset->strreset_assoc_id = sctp_get_associd(stcb); 4154 if (number_entries) { 4155 int i; 4156 4157 for (i = 0; i < number_entries; i++) { 4158 strreset->strreset_stream_list[i] = ntohs(list[i]); 4159 } 4160 } 4161 SCTP_BUF_LEN(m_notify) = len; 4162 SCTP_BUF_NEXT(m_notify) = NULL; 4163 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 4164 /* no space */ 4165 sctp_m_freem(m_notify); 4166 return; 4167 } 4168 /* append to socket */ 4169 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4170 0, 0, stcb->asoc.context, 0, 0, 0, 4171 m_notify); 4172 if (control == NULL) { 4173 /* no memory */ 4174 sctp_m_freem(m_notify); 4175 return; 4176 } 4177 control->length = SCTP_BUF_LEN(m_notify); 4178 control->spec_flags = M_NOTIFICATION; 4179 /* not that we need this */ 4180 control->tail_mbuf = m_notify; 4181 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 4182 &stcb->sctp_socket->so_rcv, 1, 4183 SCTP_READ_LOCK_HELD, so_locked); 4184 } 4185 4186 static void 4187 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, 4188 struct sctp_error_chunk *chunk, int so_locked) 4189 { 4190 struct mbuf *m_notify; 4191 struct sctp_remote_error *sre; 4192 struct sctp_queued_to_read *control; 4193 unsigned int notif_len; 4194 uint16_t chunk_len; 4195 4196 KASSERT(stcb != NULL, ("stcb == NULL")); 4197 SCTP_TCB_LOCK_ASSERT(stcb); 4198 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 4199 4200 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 4201 return; 4202 } 4203 4204 if (chunk != NULL) { 4205 chunk_len = ntohs(chunk->ch.chunk_length); 4206 /* 4207 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4208 * contiguous. 4209 */ 4210 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4211 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4212 } 4213 } else { 4214 chunk_len = 0; 4215 } 4216 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4217 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4218 if (m_notify == NULL) { 4219 /* Retry with smaller value. */ 4220 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4221 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4222 if (m_notify == NULL) { 4223 return; 4224 } 4225 } 4226 SCTP_BUF_NEXT(m_notify) = NULL; 4227 sre = mtod(m_notify, struct sctp_remote_error *); 4228 memset(sre, 0, notif_len); 4229 sre->sre_type = SCTP_REMOTE_ERROR; 4230 sre->sre_flags = 0; 4231 sre->sre_length = sizeof(struct sctp_remote_error); 4232 sre->sre_error = error; 4233 sre->sre_assoc_id = sctp_get_associd(stcb); 4234 if (notif_len > sizeof(struct sctp_remote_error)) { 4235 memcpy(sre->sre_data, chunk, chunk_len); 4236 sre->sre_length += chunk_len; 4237 } 4238 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4239 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4240 0, 0, stcb->asoc.context, 0, 0, 0, 4241 m_notify); 4242 if (control != NULL) { 4243 control->length = SCTP_BUF_LEN(m_notify); 4244 control->spec_flags = M_NOTIFICATION; 4245 /* not that we need this */ 4246 control->tail_mbuf = m_notify; 4247 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 4248 &stcb->sctp_socket->so_rcv, 1, 4249 SCTP_READ_LOCK_HELD, so_locked); 4250 } else { 4251 sctp_m_freem(m_notify); 4252 } 4253 } 4254 4255 void 4256 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4257 uint32_t error, void *data, int so_locked) 4258 { 4259 struct sctp_inpcb *inp; 4260 struct sctp_nets *net; 4261 4262 KASSERT(stcb != NULL, ("stcb == NULL")); 4263 SCTP_TCB_LOCK_ASSERT(stcb); 4264 4265 inp = stcb->sctp_ep; 4266 #if defined(__APPLE__) && !defined(__Userspace__) 4267 if (so_locked) { 4268 sctp_lock_assert(SCTP_INP_SO(inp)); 4269 } else { 4270 sctp_unlock_assert(SCTP_INP_SO(inp)); 4271 } 4272 #endif 4273 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4274 return; 4275 } 4276 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4277 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4278 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4279 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4280 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4281 /* Don't report these in front states */ 4282 return; 4283 } 4284 } 4285 if (notification != SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION) { 4286 SCTP_INP_READ_LOCK(inp); 4287 } 4288 SCTP_INP_READ_LOCK_ASSERT(inp); 4289 4290 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4291 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4292 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) { 4293 SCTP_INP_READ_UNLOCK(inp); 4294 return; 4295 } 4296 4297 switch (notification) { 4298 case SCTP_NOTIFY_ASSOC_UP: 4299 if (stcb->asoc.assoc_up_sent == 0) { 4300 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, false, false, so_locked); 4301 stcb->asoc.assoc_up_sent = 1; 4302 } 4303 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4304 sctp_notify_adaptation_layer(stcb, so_locked); 4305 } 4306 if (stcb->asoc.auth_supported == 0) { 4307 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, 0, so_locked); 4308 } 4309 break; 4310 case SCTP_NOTIFY_ASSOC_DOWN: 4311 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, false, false, so_locked); 4312 #if defined(__Userspace__) 4313 if (inp->recv_callback) { 4314 if (stcb->sctp_socket) { 4315 union sctp_sockstore addr; 4316 struct sctp_rcvinfo rcv; 4317 4318 memset(&addr, 0, sizeof(union sctp_sockstore)); 4319 memset(&rcv, 0, sizeof(struct sctp_rcvinfo)); 4320 atomic_add_int(&stcb->asoc.refcnt, 1); 4321 SCTP_TCB_UNLOCK(stcb); 4322 inp->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, inp->ulp_info); 4323 SCTP_TCB_LOCK(stcb); 4324 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4325 } 4326 } 4327 #endif 4328 break; 4329 case SCTP_NOTIFY_INTERFACE_DOWN: 4330 net = (struct sctp_nets *)data; 4331 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4332 &net->ro._l_addr.sa, error, so_locked); 4333 break; 4334 case SCTP_NOTIFY_INTERFACE_UP: 4335 net = (struct sctp_nets *)data; 4336 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4337 &net->ro._l_addr.sa, error, so_locked); 4338 break; 4339 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4340 net = (struct sctp_nets *)data; 4341 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4342 &net->ro._l_addr.sa, error, so_locked); 4343 break; 4344 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4345 sctp_notify_send_failed2(stcb, error, 4346 (struct sctp_stream_queue_pending *)data, so_locked); 4347 break; 4348 case SCTP_NOTIFY_SENT_DG_FAIL: 4349 sctp_notify_send_failed(stcb, 1, error, 4350 (struct sctp_tmit_chunk *)data, so_locked); 4351 break; 4352 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4353 sctp_notify_send_failed(stcb, 0, error, 4354 (struct sctp_tmit_chunk *)data, so_locked); 4355 break; 4356 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4357 sctp_notify_partial_delivery_indication(stcb, error, 4358 (struct sctp_queued_to_read *)data, 4359 so_locked); 4360 break; 4361 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4362 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4363 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4364 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, false, so_locked); 4365 } else { 4366 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, false, so_locked); 4367 } 4368 break; 4369 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4370 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4371 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4372 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, true, false, so_locked); 4373 } else { 4374 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, true, false, so_locked); 4375 } 4376 break; 4377 case SCTP_NOTIFY_ASSOC_TIMEDOUT: 4378 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4379 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4380 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, true, so_locked); 4381 } else { 4382 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, true, so_locked); 4383 } 4384 break; 4385 case SCTP_NOTIFY_ASSOC_RESTART: 4386 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, false, false, so_locked); 4387 if (stcb->asoc.auth_supported == 0) { 4388 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, 0, so_locked); 4389 } 4390 break; 4391 case SCTP_NOTIFY_STR_RESET_SEND: 4392 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN, so_locked); 4393 break; 4394 case SCTP_NOTIFY_STR_RESET_RECV: 4395 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING, so_locked); 4396 break; 4397 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4398 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 4399 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED), so_locked); 4400 break; 4401 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4402 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 4403 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED), so_locked); 4404 break; 4405 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4406 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 4407 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED), so_locked); 4408 break; 4409 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4410 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 4411 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED), so_locked); 4412 break; 4413 case SCTP_NOTIFY_STR_RESET_ADD: 4414 sctp_notify_stream_reset_add(stcb, error, so_locked); 4415 break; 4416 case SCTP_NOTIFY_STR_RESET_TSN: 4417 sctp_notify_stream_reset_tsn(stcb, error, so_locked); 4418 break; 4419 case SCTP_NOTIFY_ASCONF_ADD_IP: 4420 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4421 error, so_locked); 4422 break; 4423 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4424 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4425 error, so_locked); 4426 break; 4427 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4428 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4429 error, so_locked); 4430 break; 4431 case SCTP_NOTIFY_PEER_SHUTDOWN: 4432 sctp_notify_shutdown_event(stcb, so_locked); 4433 break; 4434 case SCTP_NOTIFY_AUTH_NEW_KEY: 4435 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, 4436 *(uint16_t *)data, so_locked); 4437 break; 4438 case SCTP_NOTIFY_AUTH_FREE_KEY: 4439 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, 4440 *(uint16_t *)data, so_locked); 4441 break; 4442 case SCTP_NOTIFY_NO_PEER_AUTH: 4443 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, 4444 0, so_locked); 4445 break; 4446 case SCTP_NOTIFY_SENDER_DRY: 4447 sctp_notify_sender_dry_event(stcb, so_locked); 4448 break; 4449 case SCTP_NOTIFY_REMOTE_ERROR: 4450 sctp_notify_remote_error(stcb, error, data, so_locked); 4451 break; 4452 default: 4453 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4454 __func__, notification, notification); 4455 break; 4456 } 4457 if (notification != SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION) { 4458 SCTP_INP_READ_UNLOCK(inp); 4459 } 4460 } 4461 4462 void 4463 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4464 { 4465 struct sctp_association *asoc; 4466 struct sctp_stream_out *outs; 4467 struct sctp_tmit_chunk *chk, *nchk; 4468 struct sctp_stream_queue_pending *sp, *nsp; 4469 int i; 4470 4471 if (stcb == NULL) { 4472 return; 4473 } 4474 asoc = &stcb->asoc; 4475 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4476 /* already being freed */ 4477 return; 4478 } 4479 #if defined(__APPLE__) && !defined(__Userspace__) 4480 if (so_locked) { 4481 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep)); 4482 } else { 4483 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep)); 4484 } 4485 #endif 4486 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4487 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4488 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4489 return; 4490 } 4491 /* now through all the gunk freeing chunks */ 4492 /* sent queue SHOULD be empty */ 4493 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4494 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4495 asoc->sent_queue_cnt--; 4496 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4497 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4498 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4499 #ifdef INVARIANTS 4500 } else { 4501 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4502 #endif 4503 } 4504 } 4505 if (chk->data != NULL) { 4506 sctp_free_bufspace(stcb, asoc, chk, 1); 4507 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4508 error, chk, so_locked); 4509 if (chk->data) { 4510 sctp_m_freem(chk->data); 4511 chk->data = NULL; 4512 } 4513 } 4514 sctp_free_a_chunk(stcb, chk, so_locked); 4515 /*sa_ignore FREED_MEMORY*/ 4516 } 4517 /* pending send queue SHOULD be empty */ 4518 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4519 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4520 asoc->send_queue_cnt--; 4521 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4522 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4523 #ifdef INVARIANTS 4524 } else { 4525 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4526 #endif 4527 } 4528 if (chk->data != NULL) { 4529 sctp_free_bufspace(stcb, asoc, chk, 1); 4530 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4531 error, chk, so_locked); 4532 if (chk->data) { 4533 sctp_m_freem(chk->data); 4534 chk->data = NULL; 4535 } 4536 } 4537 sctp_free_a_chunk(stcb, chk, so_locked); 4538 /*sa_ignore FREED_MEMORY*/ 4539 } 4540 for (i = 0; i < asoc->streamoutcnt; i++) { 4541 /* For each stream */ 4542 outs = &asoc->strmout[i]; 4543 /* clean up any sends there */ 4544 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4545 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4546 TAILQ_REMOVE(&outs->outqueue, sp, next); 4547 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp); 4548 sctp_free_spbufspace(stcb, asoc, sp); 4549 if (sp->data) { 4550 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4551 error, (void *)sp, so_locked); 4552 if (sp->data) { 4553 sctp_m_freem(sp->data); 4554 sp->data = NULL; 4555 sp->tail_mbuf = NULL; 4556 sp->length = 0; 4557 } 4558 } 4559 if (sp->net) { 4560 sctp_free_remote_addr(sp->net); 4561 sp->net = NULL; 4562 } 4563 /* Free the chunk */ 4564 sctp_free_a_strmoq(stcb, sp, so_locked); 4565 /*sa_ignore FREED_MEMORY*/ 4566 } 4567 } 4568 } 4569 4570 void 4571 sctp_abort_notification(struct sctp_tcb *stcb, bool from_peer, bool timeout, 4572 uint16_t error, struct sctp_abort_chunk *abort, 4573 int so_locked) 4574 { 4575 if (stcb == NULL) { 4576 return; 4577 } 4578 #if defined(__APPLE__) && !defined(__Userspace__) 4579 if (so_locked) { 4580 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep)); 4581 } else { 4582 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep)); 4583 } 4584 #endif 4585 SCTP_TCB_LOCK_ASSERT(stcb); 4586 4587 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4588 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4589 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4590 sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_WAS_ABORTED); 4591 } 4592 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4593 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4594 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4595 return; 4596 } 4597 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4598 /* Tell them we lost the asoc */ 4599 sctp_report_all_outbound(stcb, error, so_locked); 4600 if (from_peer) { 4601 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4602 } else { 4603 if (timeout) { 4604 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_TIMEDOUT, stcb, error, abort, so_locked); 4605 } else { 4606 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4607 } 4608 } 4609 } 4610 4611 void 4612 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4613 struct mbuf *m, int iphlen, 4614 struct sockaddr *src, struct sockaddr *dst, 4615 struct sctphdr *sh, struct mbuf *op_err, 4616 #if defined(__FreeBSD__) && !defined(__Userspace__) 4617 uint8_t mflowtype, uint32_t mflowid, 4618 #endif 4619 uint32_t vrf_id, uint16_t port) 4620 { 4621 #if defined(__APPLE__) && !defined(__Userspace__) 4622 struct socket *so; 4623 #endif 4624 struct sctp_gen_error_cause* cause; 4625 uint32_t vtag; 4626 uint16_t cause_code; 4627 4628 if (stcb != NULL) { 4629 vtag = stcb->asoc.peer_vtag; 4630 vrf_id = stcb->asoc.vrf_id; 4631 if (op_err != NULL) { 4632 /* Read the cause code from the error cause. */ 4633 cause = mtod(op_err, struct sctp_gen_error_cause *); 4634 cause_code = ntohs(cause->code); 4635 } else { 4636 cause_code = 0; 4637 } 4638 } else { 4639 vtag = 0; 4640 } 4641 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4642 #if defined(__FreeBSD__) && !defined(__Userspace__) 4643 mflowtype, mflowid, inp->fibnum, 4644 #endif 4645 vrf_id, port); 4646 if (stcb != NULL) { 4647 /* We have a TCB to abort, send notification too */ 4648 sctp_abort_notification(stcb, false, false, cause_code, NULL, SCTP_SO_NOT_LOCKED); 4649 /* Ok, now lets free it */ 4650 #if defined(__APPLE__) && !defined(__Userspace__) 4651 so = SCTP_INP_SO(inp); 4652 atomic_add_int(&stcb->asoc.refcnt, 1); 4653 SCTP_TCB_UNLOCK(stcb); 4654 SCTP_SOCKET_LOCK(so, 1); 4655 SCTP_TCB_LOCK(stcb); 4656 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4657 #endif 4658 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4659 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4660 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4661 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4662 } 4663 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4664 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4665 #if defined(__APPLE__) && !defined(__Userspace__) 4666 SCTP_SOCKET_UNLOCK(so, 1); 4667 #endif 4668 } 4669 } 4670 #ifdef SCTP_ASOCLOG_OF_TSNS 4671 void 4672 sctp_print_out_track_log(struct sctp_tcb *stcb) 4673 { 4674 #ifdef NOSIY_PRINTS 4675 int i; 4676 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4677 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4678 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4679 SCTP_PRINTF("None rcvd\n"); 4680 goto none_in; 4681 } 4682 if (stcb->asoc.tsn_in_wrapped) { 4683 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4684 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4685 stcb->asoc.in_tsnlog[i].tsn, 4686 stcb->asoc.in_tsnlog[i].strm, 4687 stcb->asoc.in_tsnlog[i].seq, 4688 stcb->asoc.in_tsnlog[i].flgs, 4689 stcb->asoc.in_tsnlog[i].sz); 4690 } 4691 } 4692 if (stcb->asoc.tsn_in_at) { 4693 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4694 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4695 stcb->asoc.in_tsnlog[i].tsn, 4696 stcb->asoc.in_tsnlog[i].strm, 4697 stcb->asoc.in_tsnlog[i].seq, 4698 stcb->asoc.in_tsnlog[i].flgs, 4699 stcb->asoc.in_tsnlog[i].sz); 4700 } 4701 } 4702 none_in: 4703 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4704 if ((stcb->asoc.tsn_out_at == 0) && 4705 (stcb->asoc.tsn_out_wrapped == 0)) { 4706 SCTP_PRINTF("None sent\n"); 4707 } 4708 if (stcb->asoc.tsn_out_wrapped) { 4709 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4710 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4711 stcb->asoc.out_tsnlog[i].tsn, 4712 stcb->asoc.out_tsnlog[i].strm, 4713 stcb->asoc.out_tsnlog[i].seq, 4714 stcb->asoc.out_tsnlog[i].flgs, 4715 stcb->asoc.out_tsnlog[i].sz); 4716 } 4717 } 4718 if (stcb->asoc.tsn_out_at) { 4719 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4720 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4721 stcb->asoc.out_tsnlog[i].tsn, 4722 stcb->asoc.out_tsnlog[i].strm, 4723 stcb->asoc.out_tsnlog[i].seq, 4724 stcb->asoc.out_tsnlog[i].flgs, 4725 stcb->asoc.out_tsnlog[i].sz); 4726 } 4727 } 4728 #endif 4729 } 4730 #endif 4731 4732 void 4733 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4734 struct mbuf *op_err, bool timedout, int so_locked) 4735 { 4736 #if defined(__APPLE__) && !defined(__Userspace__) 4737 struct socket *so; 4738 #endif 4739 struct sctp_gen_error_cause* cause; 4740 uint16_t cause_code; 4741 4742 #if defined(__APPLE__) && !defined(__Userspace__) 4743 so = SCTP_INP_SO(inp); 4744 #endif 4745 #if defined(__APPLE__) && !defined(__Userspace__) 4746 if (so_locked) { 4747 sctp_lock_assert(SCTP_INP_SO(inp)); 4748 } else { 4749 sctp_unlock_assert(SCTP_INP_SO(inp)); 4750 } 4751 #endif 4752 if (stcb == NULL) { 4753 /* Got to have a TCB */ 4754 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4755 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4756 #if defined(__APPLE__) && !defined(__Userspace__) 4757 if (!so_locked) { 4758 SCTP_SOCKET_LOCK(so, 1); 4759 } 4760 #endif 4761 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4762 SCTP_CALLED_DIRECTLY_NOCMPSET); 4763 #if defined(__APPLE__) && !defined(__Userspace__) 4764 if (!so_locked) { 4765 SCTP_SOCKET_UNLOCK(so, 1); 4766 } 4767 #endif 4768 } 4769 } 4770 return; 4771 } 4772 if (op_err != NULL) { 4773 /* Read the cause code from the error cause. */ 4774 cause = mtod(op_err, struct sctp_gen_error_cause *); 4775 cause_code = ntohs(cause->code); 4776 } else { 4777 cause_code = 0; 4778 } 4779 /* notify the peer */ 4780 sctp_send_abort_tcb(stcb, op_err, so_locked); 4781 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4782 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4783 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4784 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4785 } 4786 /* notify the ulp */ 4787 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4788 sctp_abort_notification(stcb, false, timedout, cause_code, NULL, so_locked); 4789 } 4790 /* now free the asoc */ 4791 #ifdef SCTP_ASOCLOG_OF_TSNS 4792 sctp_print_out_track_log(stcb); 4793 #endif 4794 #if defined(__APPLE__) && !defined(__Userspace__) 4795 if (!so_locked) { 4796 atomic_add_int(&stcb->asoc.refcnt, 1); 4797 SCTP_TCB_UNLOCK(stcb); 4798 SCTP_SOCKET_LOCK(so, 1); 4799 SCTP_TCB_LOCK(stcb); 4800 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4801 } 4802 #endif 4803 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4804 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4805 #if defined(__APPLE__) && !defined(__Userspace__) 4806 if (!so_locked) { 4807 SCTP_SOCKET_UNLOCK(so, 1); 4808 } 4809 #endif 4810 } 4811 4812 void 4813 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4814 struct sockaddr *src, struct sockaddr *dst, 4815 struct sctphdr *sh, struct sctp_inpcb *inp, 4816 struct mbuf *cause, 4817 #if defined(__FreeBSD__) && !defined(__Userspace__) 4818 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4819 #endif 4820 uint32_t vrf_id, uint16_t port) 4821 { 4822 struct sctp_chunkhdr *ch, chunk_buf; 4823 unsigned int chk_length; 4824 int contains_init_chunk; 4825 4826 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4827 /* Generate a TO address for future reference */ 4828 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4829 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4830 #if defined(__APPLE__) && !defined(__Userspace__) 4831 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1); 4832 #endif 4833 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4834 SCTP_CALLED_DIRECTLY_NOCMPSET); 4835 #if defined(__APPLE__) && !defined(__Userspace__) 4836 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1); 4837 #endif 4838 } 4839 } 4840 contains_init_chunk = 0; 4841 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4842 sizeof(*ch), (uint8_t *) & chunk_buf); 4843 while (ch != NULL) { 4844 chk_length = ntohs(ch->chunk_length); 4845 if (chk_length < sizeof(*ch)) { 4846 /* break to abort land */ 4847 break; 4848 } 4849 switch (ch->chunk_type) { 4850 case SCTP_INIT: 4851 contains_init_chunk = 1; 4852 break; 4853 case SCTP_PACKET_DROPPED: 4854 /* we don't respond to pkt-dropped */ 4855 return; 4856 case SCTP_ABORT_ASSOCIATION: 4857 /* we don't respond with an ABORT to an ABORT */ 4858 return; 4859 case SCTP_SHUTDOWN_COMPLETE: 4860 /* 4861 * we ignore it since we are not waiting for it and 4862 * peer is gone 4863 */ 4864 return; 4865 case SCTP_SHUTDOWN_ACK: 4866 sctp_send_shutdown_complete2(src, dst, sh, 4867 #if defined(__FreeBSD__) && !defined(__Userspace__) 4868 mflowtype, mflowid, fibnum, 4869 #endif 4870 vrf_id, port); 4871 return; 4872 default: 4873 break; 4874 } 4875 offset += SCTP_SIZE32(chk_length); 4876 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4877 sizeof(*ch), (uint8_t *) & chunk_buf); 4878 } 4879 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4880 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4881 (contains_init_chunk == 0))) { 4882 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4883 #if defined(__FreeBSD__) && !defined(__Userspace__) 4884 mflowtype, mflowid, fibnum, 4885 #endif 4886 vrf_id, port); 4887 } 4888 } 4889 4890 /* 4891 * check the inbound datagram to make sure there is not an abort inside it, 4892 * if there is return 1, else return 0. 4893 */ 4894 int 4895 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4896 { 4897 struct sctp_chunkhdr *ch; 4898 struct sctp_init_chunk *init_chk, chunk_buf; 4899 int offset; 4900 unsigned int chk_length; 4901 4902 offset = iphlen + sizeof(struct sctphdr); 4903 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4904 (uint8_t *) & chunk_buf); 4905 while (ch != NULL) { 4906 chk_length = ntohs(ch->chunk_length); 4907 if (chk_length < sizeof(*ch)) { 4908 /* packet is probably corrupt */ 4909 break; 4910 } 4911 /* we seem to be ok, is it an abort? */ 4912 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4913 /* yep, tell them */ 4914 return (1); 4915 } 4916 if ((ch->chunk_type == SCTP_INITIATION) || 4917 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4918 /* need to update the Vtag */ 4919 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4920 offset, sizeof(struct sctp_init_chunk), (uint8_t *) & chunk_buf); 4921 if (init_chk != NULL) { 4922 *vtag = ntohl(init_chk->init.initiate_tag); 4923 } 4924 } 4925 /* Nope, move to the next chunk */ 4926 offset += SCTP_SIZE32(chk_length); 4927 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4928 sizeof(*ch), (uint8_t *) & chunk_buf); 4929 } 4930 return (0); 4931 } 4932 4933 /* 4934 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4935 * set (i.e. it's 0) so, create this function to compare link local scopes 4936 */ 4937 #ifdef INET6 4938 uint32_t 4939 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4940 { 4941 #if defined(__Userspace__) 4942 /*__Userspace__ Returning 1 here always */ 4943 #endif 4944 #if defined(SCTP_EMBEDDED_V6_SCOPE) 4945 struct sockaddr_in6 a, b; 4946 4947 /* save copies */ 4948 a = *addr1; 4949 b = *addr2; 4950 4951 if (a.sin6_scope_id == 0) 4952 #ifdef SCTP_KAME 4953 if (sa6_recoverscope(&a)) { 4954 #else 4955 if (in6_recoverscope(&a, &a.sin6_addr, NULL)) { 4956 #endif /* SCTP_KAME */ 4957 /* can't get scope, so can't match */ 4958 return (0); 4959 } 4960 if (b.sin6_scope_id == 0) 4961 #ifdef SCTP_KAME 4962 if (sa6_recoverscope(&b)) { 4963 #else 4964 if (in6_recoverscope(&b, &b.sin6_addr, NULL)) { 4965 #endif /* SCTP_KAME */ 4966 /* can't get scope, so can't match */ 4967 return (0); 4968 } 4969 if (a.sin6_scope_id != b.sin6_scope_id) 4970 return (0); 4971 #else 4972 if (addr1->sin6_scope_id != addr2->sin6_scope_id) 4973 return (0); 4974 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4975 4976 return (1); 4977 } 4978 4979 #if defined(SCTP_EMBEDDED_V6_SCOPE) 4980 /* 4981 * returns a sockaddr_in6 with embedded scope recovered and removed 4982 */ 4983 struct sockaddr_in6 * 4984 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4985 { 4986 /* check and strip embedded scope junk */ 4987 if (addr->sin6_family == AF_INET6) { 4988 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4989 if (addr->sin6_scope_id == 0) { 4990 *store = *addr; 4991 #ifdef SCTP_KAME 4992 if (!sa6_recoverscope(store)) { 4993 #else 4994 if (!in6_recoverscope(store, &store->sin6_addr, 4995 NULL)) { 4996 #endif /* SCTP_KAME */ 4997 /* use the recovered scope */ 4998 addr = store; 4999 } 5000 } else { 5001 /* else, return the original "to" addr */ 5002 in6_clearscope(&addr->sin6_addr); 5003 } 5004 } 5005 } 5006 return (addr); 5007 } 5008 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 5009 #endif 5010 5011 /* 5012 * are the two addresses the same? currently a "scopeless" check returns: 1 5013 * if same, 0 if not 5014 */ 5015 int 5016 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 5017 { 5018 5019 /* must be valid */ 5020 if (sa1 == NULL || sa2 == NULL) 5021 return (0); 5022 5023 /* must be the same family */ 5024 if (sa1->sa_family != sa2->sa_family) 5025 return (0); 5026 5027 switch (sa1->sa_family) { 5028 #ifdef INET6 5029 case AF_INET6: 5030 { 5031 /* IPv6 addresses */ 5032 struct sockaddr_in6 *sin6_1, *sin6_2; 5033 5034 sin6_1 = (struct sockaddr_in6 *)sa1; 5035 sin6_2 = (struct sockaddr_in6 *)sa2; 5036 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 5037 sin6_2)); 5038 } 5039 #endif 5040 #ifdef INET 5041 case AF_INET: 5042 { 5043 /* IPv4 addresses */ 5044 struct sockaddr_in *sin_1, *sin_2; 5045 5046 sin_1 = (struct sockaddr_in *)sa1; 5047 sin_2 = (struct sockaddr_in *)sa2; 5048 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 5049 } 5050 #endif 5051 #if defined(__Userspace__) 5052 case AF_CONN: 5053 { 5054 struct sockaddr_conn *sconn_1, *sconn_2; 5055 5056 sconn_1 = (struct sockaddr_conn *)sa1; 5057 sconn_2 = (struct sockaddr_conn *)sa2; 5058 return (sconn_1->sconn_addr == sconn_2->sconn_addr); 5059 } 5060 #endif 5061 default: 5062 /* we don't do these... */ 5063 return (0); 5064 } 5065 } 5066 5067 void 5068 sctp_print_address(struct sockaddr *sa) 5069 { 5070 #ifdef INET6 5071 #if defined(__FreeBSD__) && !defined(__Userspace__) 5072 char ip6buf[INET6_ADDRSTRLEN]; 5073 #endif 5074 #endif 5075 5076 switch (sa->sa_family) { 5077 #ifdef INET6 5078 case AF_INET6: 5079 { 5080 struct sockaddr_in6 *sin6; 5081 5082 sin6 = (struct sockaddr_in6 *)sa; 5083 #if defined(__Userspace__) 5084 SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n", 5085 ntohs(sin6->sin6_addr.s6_addr16[0]), 5086 ntohs(sin6->sin6_addr.s6_addr16[1]), 5087 ntohs(sin6->sin6_addr.s6_addr16[2]), 5088 ntohs(sin6->sin6_addr.s6_addr16[3]), 5089 ntohs(sin6->sin6_addr.s6_addr16[4]), 5090 ntohs(sin6->sin6_addr.s6_addr16[5]), 5091 ntohs(sin6->sin6_addr.s6_addr16[6]), 5092 ntohs(sin6->sin6_addr.s6_addr16[7]), 5093 ntohs(sin6->sin6_port), 5094 sin6->sin6_scope_id); 5095 #else 5096 #if defined(__FreeBSD__) && !defined(__Userspace__) 5097 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 5098 ip6_sprintf(ip6buf, &sin6->sin6_addr), 5099 ntohs(sin6->sin6_port), 5100 sin6->sin6_scope_id); 5101 #else 5102 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 5103 ip6_sprintf(&sin6->sin6_addr), 5104 ntohs(sin6->sin6_port), 5105 sin6->sin6_scope_id); 5106 #endif 5107 #endif 5108 break; 5109 } 5110 #endif 5111 #ifdef INET 5112 case AF_INET: 5113 { 5114 struct sockaddr_in *sin; 5115 unsigned char *p; 5116 5117 sin = (struct sockaddr_in *)sa; 5118 p = (unsigned char *)&sin->sin_addr; 5119 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 5120 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 5121 break; 5122 } 5123 #endif 5124 #if defined(__Userspace__) 5125 case AF_CONN: 5126 { 5127 struct sockaddr_conn *sconn; 5128 5129 sconn = (struct sockaddr_conn *)sa; 5130 SCTP_PRINTF("AF_CONN address: %p\n", sconn->sconn_addr); 5131 break; 5132 } 5133 #endif 5134 default: 5135 SCTP_PRINTF("?\n"); 5136 break; 5137 } 5138 } 5139 5140 void 5141 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 5142 struct sctp_inpcb *new_inp, 5143 struct sctp_tcb *stcb, 5144 int waitflags) 5145 { 5146 /* 5147 * go through our old INP and pull off any control structures that 5148 * belong to stcb and move then to the new inp. 5149 */ 5150 struct socket *old_so, *new_so; 5151 struct sctp_queued_to_read *control, *nctl; 5152 struct sctp_readhead tmp_queue; 5153 struct mbuf *m; 5154 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__) 5155 int error = 0; 5156 #endif 5157 5158 old_so = old_inp->sctp_socket; 5159 new_so = new_inp->sctp_socket; 5160 TAILQ_INIT(&tmp_queue); 5161 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__) 5162 #if defined(__FreeBSD__) 5163 error = SOCK_IO_RECV_LOCK(old_so, waitflags); 5164 #else 5165 error = sblock(&old_so->so_rcv, waitflags); 5166 #endif 5167 if (error) { 5168 /* Gak, can't get I/O lock, we have a problem. 5169 * data will be left stranded.. and we 5170 * don't dare look at it since the 5171 * other thread may be reading something. 5172 * Oh well, its a screwed up app that does 5173 * a peeloff OR a accept while reading 5174 * from the main socket... actually its 5175 * only the peeloff() case, since I think 5176 * read will fail on a listening socket.. 5177 */ 5178 return; 5179 } 5180 #endif 5181 /* lock the socket buffers */ 5182 SCTP_INP_READ_LOCK(old_inp); 5183 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 5184 /* Pull off all for out target stcb */ 5185 if (control->stcb == stcb) { 5186 /* remove it we want it */ 5187 TAILQ_REMOVE(&old_inp->read_queue, control, next); 5188 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 5189 m = control->data; 5190 while (m) { 5191 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5192 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m)); 5193 } 5194 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 5195 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5196 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0); 5197 } 5198 m = SCTP_BUF_NEXT(m); 5199 } 5200 } 5201 } 5202 SCTP_INP_READ_UNLOCK(old_inp); 5203 /* Remove the recv-lock on the old socket */ 5204 #if defined(__APPLE__) && !defined(__Userspace__) 5205 sbunlock(&old_so->so_rcv, 1); 5206 #endif 5207 #if defined(__FreeBSD__) && !defined(__Userspace__) 5208 SOCK_IO_RECV_UNLOCK(old_so); 5209 #endif 5210 /* Now we move them over to the new socket buffer */ 5211 SCTP_INP_READ_LOCK(new_inp); 5212 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 5213 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 5214 m = control->data; 5215 while (m) { 5216 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5217 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 5218 } 5219 sctp_sballoc(stcb, &new_so->so_rcv, m); 5220 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5221 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0); 5222 } 5223 m = SCTP_BUF_NEXT(m); 5224 } 5225 } 5226 SCTP_INP_READ_UNLOCK(new_inp); 5227 } 5228 5229 void 5230 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 5231 struct sctp_tcb *stcb, 5232 int so_locked 5233 #if !(defined(__APPLE__) && !defined(__Userspace__)) 5234 SCTP_UNUSED 5235 #endif 5236 ) 5237 { 5238 if ((inp != NULL) && 5239 (inp->sctp_socket != NULL) && 5240 (((inp->sctp_flags & (SCTP_PCB_FLAGS_TCPTYPE | SCTP_PCB_FLAGS_IN_TCPPOOL)) == 0) || 5241 !SCTP_IS_LISTENING(inp))) { 5242 #if defined(__APPLE__) && !defined(__Userspace__) 5243 struct socket *so; 5244 5245 so = SCTP_INP_SO(inp); 5246 if (!so_locked) { 5247 if (stcb) { 5248 atomic_add_int(&stcb->asoc.refcnt, 1); 5249 SCTP_TCB_UNLOCK(stcb); 5250 } 5251 SCTP_SOCKET_LOCK(so, 1); 5252 if (stcb) { 5253 SCTP_TCB_LOCK(stcb); 5254 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5255 } 5256 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5257 SCTP_SOCKET_UNLOCK(so, 1); 5258 return; 5259 } 5260 } 5261 #endif 5262 sctp_sorwakeup(inp, inp->sctp_socket); 5263 #if defined(__APPLE__) && !defined(__Userspace__) 5264 if (!so_locked) { 5265 SCTP_SOCKET_UNLOCK(so, 1); 5266 } 5267 #endif 5268 } 5269 } 5270 #if defined(__Userspace__) 5271 5272 void 5273 sctp_invoke_recv_callback(struct sctp_inpcb *inp, 5274 struct sctp_tcb *stcb, 5275 struct sctp_queued_to_read *control, 5276 int inp_read_lock_held) 5277 { 5278 uint32_t pd_point, length; 5279 5280 if ((inp->recv_callback == NULL) || 5281 (stcb == NULL) || 5282 (stcb->sctp_socket == NULL)) { 5283 return; 5284 } 5285 5286 length = control->length; 5287 if (stcb != NULL && stcb->sctp_socket != NULL) { 5288 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 5289 stcb->sctp_ep->partial_delivery_point); 5290 } else { 5291 pd_point = inp->partial_delivery_point; 5292 } 5293 if ((control->end_added == 1) || (length >= pd_point)) { 5294 struct socket *so; 5295 struct mbuf *m; 5296 char *buffer; 5297 struct sctp_rcvinfo rcv; 5298 union sctp_sockstore addr; 5299 int flags; 5300 5301 if ((buffer = malloc(length)) == NULL) { 5302 return; 5303 } 5304 if (inp_read_lock_held == 0) { 5305 SCTP_INP_READ_LOCK(inp); 5306 } 5307 so = stcb->sctp_socket; 5308 for (m = control->data; m; m = SCTP_BUF_NEXT(m)) { 5309 sctp_sbfree(control, control->stcb, &so->so_rcv, m); 5310 } 5311 m_copydata(control->data, 0, length, buffer); 5312 memset(&rcv, 0, sizeof(struct sctp_rcvinfo)); 5313 rcv.rcv_sid = control->sinfo_stream; 5314 rcv.rcv_ssn = (uint16_t)control->mid; 5315 rcv.rcv_flags = control->sinfo_flags; 5316 rcv.rcv_ppid = control->sinfo_ppid; 5317 rcv.rcv_tsn = control->sinfo_tsn; 5318 rcv.rcv_cumtsn = control->sinfo_cumtsn; 5319 rcv.rcv_context = control->sinfo_context; 5320 rcv.rcv_assoc_id = control->sinfo_assoc_id; 5321 memset(&addr, 0, sizeof(union sctp_sockstore)); 5322 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5323 #ifdef INET 5324 case AF_INET: 5325 addr.sin = control->whoFrom->ro._l_addr.sin; 5326 break; 5327 #endif 5328 #ifdef INET6 5329 case AF_INET6: 5330 addr.sin6 = control->whoFrom->ro._l_addr.sin6; 5331 break; 5332 #endif 5333 case AF_CONN: 5334 addr.sconn = control->whoFrom->ro._l_addr.sconn; 5335 break; 5336 default: 5337 addr.sa = control->whoFrom->ro._l_addr.sa; 5338 break; 5339 } 5340 flags = 0; 5341 if (control->end_added == 1) { 5342 flags |= MSG_EOR; 5343 } 5344 if (control->spec_flags & M_NOTIFICATION) { 5345 flags |= MSG_NOTIFICATION; 5346 } 5347 sctp_m_freem(control->data); 5348 control->data = NULL; 5349 control->tail_mbuf = NULL; 5350 control->length = 0; 5351 if (control->end_added) { 5352 TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next); 5353 control->on_read_q = 0; 5354 sctp_free_remote_addr(control->whoFrom); 5355 control->whoFrom = NULL; 5356 sctp_free_a_readq(stcb, control); 5357 } 5358 atomic_add_int(&stcb->asoc.refcnt, 1); 5359 SCTP_TCB_UNLOCK(stcb); 5360 if (inp_read_lock_held == 0) { 5361 SCTP_INP_READ_UNLOCK(inp); 5362 } 5363 inp->recv_callback(so, addr, buffer, length, rcv, flags, inp->ulp_info); 5364 SCTP_TCB_LOCK(stcb); 5365 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5366 } 5367 } 5368 #endif 5369 5370 void 5371 sctp_add_to_readq(struct sctp_inpcb *inp, 5372 struct sctp_tcb *stcb, 5373 struct sctp_queued_to_read *control, 5374 struct sockbuf *sb, 5375 int end, 5376 int inp_read_lock_held, 5377 int so_locked) 5378 { 5379 /* 5380 * Here we must place the control on the end of the socket read 5381 * queue AND increment sb_cc so that select will work properly on 5382 * read. 5383 */ 5384 struct mbuf *m, *prev = NULL; 5385 5386 if (inp == NULL) { 5387 /* Gak, TSNH!! */ 5388 #ifdef INVARIANTS 5389 panic("Gak, inp NULL on add_to_readq"); 5390 #endif 5391 return; 5392 } 5393 #if defined(__APPLE__) && !defined(__Userspace__) 5394 if (so_locked) { 5395 sctp_lock_assert(SCTP_INP_SO(inp)); 5396 } else { 5397 sctp_unlock_assert(SCTP_INP_SO(inp)); 5398 } 5399 #endif 5400 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 5401 SCTP_INP_READ_LOCK(inp); 5402 } 5403 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 5404 if (!control->on_strm_q) { 5405 sctp_free_remote_addr(control->whoFrom); 5406 if (control->data) { 5407 sctp_m_freem(control->data); 5408 control->data = NULL; 5409 } 5410 sctp_free_a_readq(stcb, control); 5411 } 5412 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 5413 SCTP_INP_READ_UNLOCK(inp); 5414 } 5415 return; 5416 } 5417 if ((control->spec_flags & M_NOTIFICATION) == 0) { 5418 atomic_add_int(&inp->total_recvs, 1); 5419 if (!control->do_not_ref_stcb) { 5420 atomic_add_int(&stcb->total_recvs, 1); 5421 } 5422 } 5423 m = control->data; 5424 control->held_length = 0; 5425 control->length = 0; 5426 while (m != NULL) { 5427 if (SCTP_BUF_LEN(m) == 0) { 5428 /* Skip mbufs with NO length */ 5429 if (prev == NULL) { 5430 /* First one */ 5431 control->data = sctp_m_free(m); 5432 m = control->data; 5433 } else { 5434 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 5435 m = SCTP_BUF_NEXT(prev); 5436 } 5437 if (m == NULL) { 5438 control->tail_mbuf = prev; 5439 } 5440 continue; 5441 } 5442 prev = m; 5443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5444 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 5445 } 5446 sctp_sballoc(stcb, sb, m); 5447 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5448 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0); 5449 } 5450 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 5451 m = SCTP_BUF_NEXT(m); 5452 } 5453 if (prev != NULL) { 5454 control->tail_mbuf = prev; 5455 } else { 5456 /* Everything got collapsed out?? */ 5457 if (!control->on_strm_q) { 5458 sctp_free_remote_addr(control->whoFrom); 5459 sctp_free_a_readq(stcb, control); 5460 } 5461 if (inp_read_lock_held == 0) 5462 SCTP_INP_READ_UNLOCK(inp); 5463 return; 5464 } 5465 if (end) { 5466 control->end_added = 1; 5467 } 5468 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 5469 control->on_read_q = 1; 5470 #if defined(__Userspace__) 5471 sctp_invoke_recv_callback(inp, stcb, control, SCTP_READ_LOCK_HELD); 5472 #endif 5473 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 5474 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 5475 } 5476 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 5477 SCTP_INP_READ_UNLOCK(inp); 5478 } 5479 } 5480 5481 /*************HOLD THIS COMMENT FOR PATCH FILE OF 5482 *************ALTERNATE ROUTING CODE 5483 */ 5484 5485 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 5486 *************ALTERNATE ROUTING CODE 5487 */ 5488 5489 struct mbuf * 5490 sctp_generate_cause(uint16_t code, char *info) 5491 { 5492 struct mbuf *m; 5493 struct sctp_gen_error_cause *cause; 5494 size_t info_len; 5495 uint16_t len; 5496 5497 if ((code == 0) || (info == NULL)) { 5498 return (NULL); 5499 } 5500 info_len = strlen(info); 5501 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 5502 return (NULL); 5503 } 5504 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 5505 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 5506 if (m != NULL) { 5507 SCTP_BUF_LEN(m) = len; 5508 cause = mtod(m, struct sctp_gen_error_cause *); 5509 cause->code = htons(code); 5510 cause->length = htons(len); 5511 memcpy(cause->info, info, info_len); 5512 } 5513 return (m); 5514 } 5515 5516 struct mbuf * 5517 sctp_generate_no_user_data_cause(uint32_t tsn) 5518 { 5519 struct mbuf *m; 5520 struct sctp_error_no_user_data *no_user_data_cause; 5521 uint16_t len; 5522 5523 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 5524 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 5525 if (m != NULL) { 5526 SCTP_BUF_LEN(m) = len; 5527 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 5528 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 5529 no_user_data_cause->cause.length = htons(len); 5530 no_user_data_cause->tsn = htonl(tsn); 5531 } 5532 return (m); 5533 } 5534 5535 void 5536 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 5537 struct sctp_tmit_chunk *tp1, int chk_cnt) 5538 { 5539 if (tp1->data == NULL) { 5540 return; 5541 } 5542 atomic_subtract_int(&asoc->chunks_on_out_queue, chk_cnt); 5543 #ifdef SCTP_MBCNT_LOGGING 5544 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5545 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5546 asoc->total_output_queue_size, 5547 tp1->book_size, 5548 0, 5549 tp1->mbcnt); 5550 } 5551 #endif 5552 if (asoc->total_output_queue_size >= tp1->book_size) { 5553 atomic_subtract_int(&asoc->total_output_queue_size, tp1->book_size); 5554 } else { 5555 asoc->total_output_queue_size = 0; 5556 } 5557 if ((stcb->sctp_socket != NULL) && 5558 (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5559 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5560 SCTP_SB_DECR(&stcb->sctp_socket->so_snd, tp1->book_size); 5561 } 5562 } 5563 5564 int 5565 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5566 uint8_t sent, int so_locked) 5567 { 5568 struct sctp_stream_out *strq; 5569 struct sctp_tmit_chunk *chk = NULL, *tp2; 5570 struct sctp_stream_queue_pending *sp; 5571 uint32_t mid; 5572 uint16_t sid; 5573 uint8_t foundeom = 0; 5574 int ret_sz = 0; 5575 int notdone; 5576 int do_wakeup_routine = 0; 5577 5578 #if defined(__APPLE__) && !defined(__Userspace__) 5579 if (so_locked) { 5580 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep)); 5581 } else { 5582 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep)); 5583 } 5584 #endif 5585 SCTP_TCB_LOCK_ASSERT(stcb); 5586 5587 sid = tp1->rec.data.sid; 5588 mid = tp1->rec.data.mid; 5589 if (sent || ((tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) { 5590 stcb->asoc.abandoned_sent[0]++; 5591 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5592 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5593 #if defined(SCTP_DETAILED_STR_STATS) 5594 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5595 #endif 5596 } else { 5597 stcb->asoc.abandoned_unsent[0]++; 5598 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5599 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5600 #if defined(SCTP_DETAILED_STR_STATS) 5601 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5602 #endif 5603 } 5604 do { 5605 ret_sz += tp1->book_size; 5606 if (tp1->data != NULL) { 5607 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5608 sctp_flight_size_decrease(tp1); 5609 sctp_total_flight_decrease(stcb, tp1); 5610 } 5611 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5612 stcb->asoc.peers_rwnd += tp1->send_size; 5613 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5614 if (sent) { 5615 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5616 } else { 5617 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5618 } 5619 if (tp1->data) { 5620 sctp_m_freem(tp1->data); 5621 tp1->data = NULL; 5622 } 5623 do_wakeup_routine = 1; 5624 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5625 stcb->asoc.sent_queue_cnt_removeable--; 5626 } 5627 } 5628 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5629 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5630 SCTP_DATA_NOT_FRAG) { 5631 /* not frag'ed we ae done */ 5632 notdone = 0; 5633 foundeom = 1; 5634 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5635 /* end of frag, we are done */ 5636 notdone = 0; 5637 foundeom = 1; 5638 } else { 5639 /* 5640 * Its a begin or middle piece, we must mark all of 5641 * it 5642 */ 5643 notdone = 1; 5644 tp1 = TAILQ_NEXT(tp1, sctp_next); 5645 } 5646 } while (tp1 && notdone); 5647 if (foundeom == 0) { 5648 /* 5649 * The multi-part message was scattered across the send and 5650 * sent queue. 5651 */ 5652 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5653 if ((tp1->rec.data.sid != sid) || 5654 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5655 break; 5656 } 5657 /* save to chk in case we have some on stream out 5658 * queue. If so and we have an un-transmitted one 5659 * we don't have to fudge the TSN. 5660 */ 5661 chk = tp1; 5662 ret_sz += tp1->book_size; 5663 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5664 if (sent) { 5665 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5666 } else { 5667 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5668 } 5669 if (tp1->data) { 5670 sctp_m_freem(tp1->data); 5671 tp1->data = NULL; 5672 } 5673 /* No flight involved here book the size to 0 */ 5674 tp1->book_size = 0; 5675 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5676 foundeom = 1; 5677 } 5678 do_wakeup_routine = 1; 5679 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5680 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5681 /* on to the sent queue so we can wait for it to be passed by. */ 5682 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5683 sctp_next); 5684 stcb->asoc.send_queue_cnt--; 5685 stcb->asoc.sent_queue_cnt++; 5686 } 5687 } 5688 if (foundeom == 0) { 5689 /* 5690 * Still no eom found. That means there 5691 * is stuff left on the stream out queue.. yuck. 5692 */ 5693 strq = &stcb->asoc.strmout[sid]; 5694 sp = TAILQ_FIRST(&strq->outqueue); 5695 if (sp != NULL) { 5696 sp->discard_rest = 1; 5697 /* 5698 * We may need to put a chunk on the 5699 * queue that holds the TSN that 5700 * would have been sent with the LAST 5701 * bit. 5702 */ 5703 if (chk == NULL) { 5704 /* Yep, we have to */ 5705 sctp_alloc_a_chunk(stcb, chk); 5706 if (chk == NULL) { 5707 /* we are hosed. All we can 5708 * do is nothing.. which will 5709 * cause an abort if the peer is 5710 * paying attention. 5711 */ 5712 goto oh_well; 5713 } 5714 memset(chk, 0, sizeof(*chk)); 5715 chk->rec.data.rcv_flags = 0; 5716 chk->sent = SCTP_FORWARD_TSN_SKIP; 5717 chk->asoc = &stcb->asoc; 5718 if (stcb->asoc.idata_supported == 0) { 5719 if (sp->sinfo_flags & SCTP_UNORDERED) { 5720 chk->rec.data.mid = 0; 5721 } else { 5722 chk->rec.data.mid = strq->next_mid_ordered; 5723 } 5724 } else { 5725 if (sp->sinfo_flags & SCTP_UNORDERED) { 5726 chk->rec.data.mid = strq->next_mid_unordered; 5727 } else { 5728 chk->rec.data.mid = strq->next_mid_ordered; 5729 } 5730 } 5731 chk->rec.data.sid = sp->sid; 5732 chk->rec.data.ppid = sp->ppid; 5733 chk->rec.data.context = sp->context; 5734 chk->flags = sp->act_flags; 5735 chk->whoTo = NULL; 5736 #if defined(__FreeBSD__) && !defined(__Userspace__) 5737 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5738 #else 5739 chk->rec.data.tsn = stcb->asoc.sending_seq++; 5740 #endif 5741 strq->chunks_on_queues++; 5742 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5743 stcb->asoc.sent_queue_cnt++; 5744 stcb->asoc.pr_sctp_cnt++; 5745 } 5746 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5747 if (sp->sinfo_flags & SCTP_UNORDERED) { 5748 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5749 } 5750 if (stcb->asoc.idata_supported == 0) { 5751 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5752 strq->next_mid_ordered++; 5753 } 5754 } else { 5755 if (sp->sinfo_flags & SCTP_UNORDERED) { 5756 strq->next_mid_unordered++; 5757 } else { 5758 strq->next_mid_ordered++; 5759 } 5760 } 5761 oh_well: 5762 if (sp->data) { 5763 /* Pull any data to free up the SB and 5764 * allow sender to "add more" while we 5765 * will throw away :-) 5766 */ 5767 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5768 ret_sz += sp->length; 5769 do_wakeup_routine = 1; 5770 sp->some_taken = 1; 5771 sctp_m_freem(sp->data); 5772 sp->data = NULL; 5773 sp->tail_mbuf = NULL; 5774 sp->length = 0; 5775 } 5776 } 5777 } 5778 if (do_wakeup_routine) { 5779 #if defined(__APPLE__) && !defined(__Userspace__) 5780 struct socket *so; 5781 5782 so = SCTP_INP_SO(stcb->sctp_ep); 5783 if (!so_locked) { 5784 atomic_add_int(&stcb->asoc.refcnt, 1); 5785 SCTP_TCB_UNLOCK(stcb); 5786 SCTP_SOCKET_LOCK(so, 1); 5787 SCTP_TCB_LOCK(stcb); 5788 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5789 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 5790 /* assoc was freed while we were unlocked */ 5791 SCTP_SOCKET_UNLOCK(so, 1); 5792 return (ret_sz); 5793 } 5794 } 5795 #endif 5796 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5797 #if defined(__APPLE__) && !defined(__Userspace__) 5798 if (!so_locked) { 5799 SCTP_SOCKET_UNLOCK(so, 1); 5800 } 5801 #endif 5802 } 5803 return (ret_sz); 5804 } 5805 5806 /* 5807 * checks to see if the given address, sa, is one that is currently known by 5808 * the kernel note: can't distinguish the same address on multiple interfaces 5809 * and doesn't handle multiple addresses with different zone/scope id's note: 5810 * ifa_ifwithaddr() compares the entire sockaddr struct 5811 */ 5812 struct sctp_ifa * 5813 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5814 int holds_lock) 5815 { 5816 struct sctp_laddr *laddr; 5817 5818 if (holds_lock == 0) { 5819 SCTP_INP_RLOCK(inp); 5820 } 5821 5822 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5823 if (laddr->ifa == NULL) 5824 continue; 5825 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5826 continue; 5827 #ifdef INET 5828 if (addr->sa_family == AF_INET) { 5829 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5830 laddr->ifa->address.sin.sin_addr.s_addr) { 5831 /* found him. */ 5832 break; 5833 } 5834 } 5835 #endif 5836 #ifdef INET6 5837 if (addr->sa_family == AF_INET6) { 5838 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5839 &laddr->ifa->address.sin6)) { 5840 /* found him. */ 5841 break; 5842 } 5843 } 5844 #endif 5845 #if defined(__Userspace__) 5846 if (addr->sa_family == AF_CONN) { 5847 if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) { 5848 /* found him. */ 5849 break; 5850 } 5851 } 5852 #endif 5853 } 5854 if (holds_lock == 0) { 5855 SCTP_INP_RUNLOCK(inp); 5856 } 5857 if (laddr != NULL) { 5858 return (laddr->ifa); 5859 } else { 5860 return (NULL); 5861 } 5862 } 5863 5864 uint32_t 5865 sctp_get_ifa_hash_val(struct sockaddr *addr) 5866 { 5867 switch (addr->sa_family) { 5868 #ifdef INET 5869 case AF_INET: 5870 { 5871 struct sockaddr_in *sin; 5872 5873 sin = (struct sockaddr_in *)addr; 5874 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5875 } 5876 #endif 5877 #ifdef INET6 5878 case AF_INET6: 5879 { 5880 struct sockaddr_in6 *sin6; 5881 uint32_t hash_of_addr; 5882 5883 sin6 = (struct sockaddr_in6 *)addr; 5884 #if !defined(_WIN32) && !(defined(__FreeBSD__) && defined(__Userspace__)) && !defined(__APPLE__) 5885 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5886 sin6->sin6_addr.s6_addr32[1] + 5887 sin6->sin6_addr.s6_addr32[2] + 5888 sin6->sin6_addr.s6_addr32[3]); 5889 #else 5890 hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] + 5891 ((uint32_t *)&sin6->sin6_addr)[1] + 5892 ((uint32_t *)&sin6->sin6_addr)[2] + 5893 ((uint32_t *)&sin6->sin6_addr)[3]); 5894 #endif 5895 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5896 return (hash_of_addr); 5897 } 5898 #endif 5899 #if defined(__Userspace__) 5900 case AF_CONN: 5901 { 5902 struct sockaddr_conn *sconn; 5903 uintptr_t temp; 5904 5905 sconn = (struct sockaddr_conn *)addr; 5906 temp = (uintptr_t)sconn->sconn_addr; 5907 return ((uint32_t)(temp ^ (temp >> 16))); 5908 } 5909 #endif 5910 default: 5911 break; 5912 } 5913 return (0); 5914 } 5915 5916 struct sctp_ifa * 5917 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5918 { 5919 struct sctp_ifa *sctp_ifap; 5920 struct sctp_vrf *vrf; 5921 struct sctp_ifalist *hash_head; 5922 uint32_t hash_of_addr; 5923 5924 if (holds_lock == 0) { 5925 SCTP_IPI_ADDR_RLOCK(); 5926 } else { 5927 SCTP_IPI_ADDR_LOCK_ASSERT(); 5928 } 5929 5930 vrf = sctp_find_vrf(vrf_id); 5931 if (vrf == NULL) { 5932 if (holds_lock == 0) 5933 SCTP_IPI_ADDR_RUNLOCK(); 5934 return (NULL); 5935 } 5936 5937 hash_of_addr = sctp_get_ifa_hash_val(addr); 5938 5939 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5940 if (hash_head == NULL) { 5941 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5942 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5943 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5944 sctp_print_address(addr); 5945 SCTP_PRINTF("No such bucket for address\n"); 5946 if (holds_lock == 0) 5947 SCTP_IPI_ADDR_RUNLOCK(); 5948 5949 return (NULL); 5950 } 5951 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5952 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5953 continue; 5954 #ifdef INET 5955 if (addr->sa_family == AF_INET) { 5956 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5957 sctp_ifap->address.sin.sin_addr.s_addr) { 5958 /* found him. */ 5959 break; 5960 } 5961 } 5962 #endif 5963 #ifdef INET6 5964 if (addr->sa_family == AF_INET6) { 5965 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5966 &sctp_ifap->address.sin6)) { 5967 /* found him. */ 5968 break; 5969 } 5970 } 5971 #endif 5972 #if defined(__Userspace__) 5973 if (addr->sa_family == AF_CONN) { 5974 if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) { 5975 /* found him. */ 5976 break; 5977 } 5978 } 5979 #endif 5980 } 5981 if (holds_lock == 0) 5982 SCTP_IPI_ADDR_RUNLOCK(); 5983 return (sctp_ifap); 5984 } 5985 5986 static void 5987 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5988 uint32_t rwnd_req) 5989 { 5990 /* User pulled some data, do we need a rwnd update? */ 5991 #if defined(__FreeBSD__) && !defined(__Userspace__) 5992 struct epoch_tracker et; 5993 #endif 5994 int r_unlocked = 0; 5995 uint32_t dif, rwnd; 5996 struct socket *so = NULL; 5997 5998 if (stcb == NULL) 5999 return; 6000 6001 atomic_add_int(&stcb->asoc.refcnt, 1); 6002 6003 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 6004 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 6005 /* Pre-check If we are freeing no update */ 6006 goto no_lock; 6007 } 6008 SCTP_INP_INCR_REF(stcb->sctp_ep); 6009 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 6010 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 6011 goto out; 6012 } 6013 so = stcb->sctp_socket; 6014 if (so == NULL) { 6015 goto out; 6016 } 6017 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 6018 /* Have you have freed enough to look */ 6019 *freed_so_far = 0; 6020 /* Yep, its worth a look and the lock overhead */ 6021 6022 /* Figure out what the rwnd would be */ 6023 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 6024 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 6025 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 6026 } else { 6027 dif = 0; 6028 } 6029 if (dif >= rwnd_req) { 6030 if (hold_rlock) { 6031 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 6032 r_unlocked = 1; 6033 } 6034 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6035 /* 6036 * One last check before we allow the guy possibly 6037 * to get in. There is a race, where the guy has not 6038 * reached the gate. In that case 6039 */ 6040 goto out; 6041 } 6042 SCTP_TCB_LOCK(stcb); 6043 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6044 /* No reports here */ 6045 SCTP_TCB_UNLOCK(stcb); 6046 goto out; 6047 } 6048 SCTP_STAT_INCR(sctps_wu_sacks_sent); 6049 #if defined(__FreeBSD__) && !defined(__Userspace__) 6050 NET_EPOCH_ENTER(et); 6051 #endif 6052 sctp_send_sack(stcb, SCTP_SO_LOCKED); 6053 6054 sctp_chunk_output(stcb->sctp_ep, stcb, 6055 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 6056 /* make sure no timer is running */ 6057 #if defined(__FreeBSD__) && !defined(__Userspace__) 6058 NET_EPOCH_EXIT(et); 6059 #endif 6060 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 6061 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 6062 SCTP_TCB_UNLOCK(stcb); 6063 } else { 6064 /* Update how much we have pending */ 6065 stcb->freed_by_sorcv_sincelast = dif; 6066 } 6067 out: 6068 if (so && r_unlocked && hold_rlock) { 6069 SCTP_INP_READ_LOCK(stcb->sctp_ep); 6070 } 6071 6072 SCTP_INP_DECR_REF(stcb->sctp_ep); 6073 no_lock: 6074 atomic_subtract_int(&stcb->asoc.refcnt, 1); 6075 return; 6076 } 6077 6078 int 6079 sctp_sorecvmsg(struct socket *so, 6080 struct uio *uio, 6081 struct mbuf **mp, 6082 struct sockaddr *from, 6083 int fromlen, 6084 int *msg_flags, 6085 struct sctp_sndrcvinfo *sinfo, 6086 int filling_sinfo) 6087 { 6088 /* 6089 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 6090 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 6091 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 6092 * On the way out we may send out any combination of: 6093 * MSG_NOTIFICATION MSG_EOR 6094 * 6095 */ 6096 struct sctp_inpcb *inp = NULL; 6097 ssize_t my_len = 0; 6098 ssize_t cp_len = 0; 6099 int error = 0; 6100 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 6101 struct mbuf *m = NULL; 6102 struct sctp_tcb *stcb = NULL; 6103 int wakeup_read_socket = 0; 6104 int freecnt_applied = 0; 6105 int out_flags = 0, in_flags = 0; 6106 int block_allowed = 1; 6107 uint32_t freed_so_far = 0; 6108 ssize_t copied_so_far = 0; 6109 int in_eeor_mode = 0; 6110 int no_rcv_needed = 0; 6111 uint32_t rwnd_req = 0; 6112 int hold_sblock = 0; 6113 int hold_rlock = 0; 6114 ssize_t slen = 0; 6115 uint32_t held_length = 0; 6116 #if defined(__FreeBSD__) && !defined(__Userspace__) 6117 int sockbuf_lock = 0; 6118 #endif 6119 6120 if (uio == NULL) { 6121 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6122 return (EINVAL); 6123 } 6124 6125 if (msg_flags) { 6126 in_flags = *msg_flags; 6127 if (in_flags & MSG_PEEK) 6128 SCTP_STAT_INCR(sctps_read_peeks); 6129 } else { 6130 in_flags = 0; 6131 } 6132 #if defined(__APPLE__) && !defined(__Userspace__) 6133 #if defined(APPLE_LEOPARD) 6134 slen = uio->uio_resid; 6135 #else 6136 slen = uio_resid(uio); 6137 #endif 6138 #else 6139 slen = uio->uio_resid; 6140 #endif 6141 6142 /* Pull in and set up our int flags */ 6143 if (in_flags & MSG_OOB) { 6144 /* Out of band's NOT supported */ 6145 return (EOPNOTSUPP); 6146 } 6147 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 6148 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6149 return (EINVAL); 6150 } 6151 if ((in_flags & (MSG_DONTWAIT 6152 #if defined(__FreeBSD__) && !defined(__Userspace__) 6153 | MSG_NBIO 6154 #endif 6155 )) || 6156 SCTP_SO_IS_NBIO(so)) { 6157 block_allowed = 0; 6158 } 6159 /* setup the endpoint */ 6160 inp = (struct sctp_inpcb *)so->so_pcb; 6161 if (inp == NULL) { 6162 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 6163 return (EFAULT); 6164 } 6165 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 6166 /* Must be at least a MTU's worth */ 6167 if (rwnd_req < SCTP_MIN_RWND) 6168 rwnd_req = SCTP_MIN_RWND; 6169 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 6170 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6171 #if defined(__APPLE__) && !defined(__Userspace__) 6172 #if defined(APPLE_LEOPARD) 6173 sctp_misc_ints(SCTP_SORECV_ENTER, 6174 rwnd_req, in_eeor_mode, SCTP_SBAVAIL(&so->so_rcv), uio->uio_resid); 6175 #else 6176 sctp_misc_ints(SCTP_SORECV_ENTER, 6177 rwnd_req, in_eeor_mode, SCTP_SBAVAIL(&so->so_rcv), uio_resid(uio)); 6178 #endif 6179 #else 6180 sctp_misc_ints(SCTP_SORECV_ENTER, 6181 rwnd_req, in_eeor_mode, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 6182 #endif 6183 } 6184 #if defined(__Userspace__) 6185 SOCKBUF_LOCK(&so->so_rcv); 6186 hold_sblock = 1; 6187 #endif 6188 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) { 6189 #if defined(__APPLE__) && !defined(__Userspace__) 6190 #if defined(APPLE_LEOPARD) 6191 sctp_misc_ints(SCTP_SORECV_ENTERPL, 6192 rwnd_req, block_allowed, SCTP_SBAVAIL(&so->so_rcv), uio->uio_resid); 6193 #else 6194 sctp_misc_ints(SCTP_SORECV_ENTERPL, 6195 rwnd_req, block_allowed, SCTP_SBAVAIL(&so->so_rcv), uio_resid(uio)); 6196 #endif 6197 #else 6198 sctp_misc_ints(SCTP_SORECV_ENTERPL, 6199 rwnd_req, block_allowed, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 6200 #endif 6201 } 6202 6203 #if defined(__APPLE__) && !defined(__Userspace__) 6204 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags)); 6205 #endif 6206 #if defined(__FreeBSD__) && !defined(__Userspace__) 6207 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(in_flags)); 6208 #endif 6209 if (error) { 6210 goto release_unlocked; 6211 } 6212 #if defined(__FreeBSD__) && !defined(__Userspace__) 6213 sockbuf_lock = 1; 6214 #endif 6215 restart: 6216 #if defined(__Userspace__) 6217 if (hold_sblock == 0) { 6218 SOCKBUF_LOCK(&so->so_rcv); 6219 hold_sblock = 1; 6220 } 6221 #endif 6222 #if defined(__APPLE__) && !defined(__Userspace__) 6223 sbunlock(&so->so_rcv, 1); 6224 #endif 6225 6226 restart_nosblocks: 6227 if (hold_sblock == 0) { 6228 SOCKBUF_LOCK(&so->so_rcv); 6229 hold_sblock = 1; 6230 } 6231 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 6232 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 6233 goto out; 6234 } 6235 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__) 6236 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && SCTP_SBAVAIL(&so->so_rcv) == 0) { 6237 #else 6238 if ((so->so_state & SS_CANTRCVMORE) && SCTP_SBAVAIL(&so->so_rcv) == 0) { 6239 #endif 6240 if (so->so_error) { 6241 error = so->so_error; 6242 if ((in_flags & MSG_PEEK) == 0) 6243 so->so_error = 0; 6244 goto out; 6245 } else { 6246 if (SCTP_SBAVAIL(&so->so_rcv) == 0) { 6247 /* indicate EOF */ 6248 error = 0; 6249 goto out; 6250 } 6251 } 6252 } 6253 if (SCTP_SBAVAIL(&so->so_rcv) <= held_length) { 6254 if (so->so_error) { 6255 error = so->so_error; 6256 if ((in_flags & MSG_PEEK) == 0) { 6257 so->so_error = 0; 6258 } 6259 goto out; 6260 } 6261 if ((SCTP_SBAVAIL(&so->so_rcv) == 0) && 6262 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 6263 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 6264 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 6265 /* For active open side clear flags for re-use 6266 * passive open is blocked by connect. 6267 */ 6268 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 6269 /* You were aborted, passive side always hits here */ 6270 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 6271 error = ECONNRESET; 6272 } 6273 so->so_state &= ~(SS_ISCONNECTING | 6274 SS_ISDISCONNECTING | 6275 #if !(defined(__FreeBSD__) && !defined(__Userspace__)) 6276 SS_ISCONFIRMING | 6277 #endif 6278 SS_ISCONNECTED); 6279 if (error == 0) { 6280 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 6281 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 6282 error = ENOTCONN; 6283 } 6284 } 6285 goto out; 6286 } 6287 } 6288 if (block_allowed) { 6289 #if defined(__FreeBSD__) && !defined(__Userspace__) 6290 error = sbwait(so, SO_RCV); 6291 #else 6292 error = sbwait(&so->so_rcv); 6293 #endif 6294 if (error) { 6295 goto out; 6296 } 6297 held_length = 0; 6298 goto restart_nosblocks; 6299 } else { 6300 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 6301 error = EWOULDBLOCK; 6302 goto out; 6303 } 6304 } 6305 if (hold_sblock == 1) { 6306 SOCKBUF_UNLOCK(&so->so_rcv); 6307 hold_sblock = 0; 6308 } 6309 #if defined(__APPLE__) && !defined(__Userspace__) 6310 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags)); 6311 #endif 6312 /* we possibly have data we can read */ 6313 /*sa_ignore FREED_MEMORY*/ 6314 control = TAILQ_FIRST(&inp->read_queue); 6315 if (control == NULL) { 6316 /* This could be happening since 6317 * the appender did the increment but as not 6318 * yet did the tailq insert onto the read_queue 6319 */ 6320 if (hold_rlock == 0) { 6321 SCTP_INP_READ_LOCK(inp); 6322 } 6323 control = TAILQ_FIRST(&inp->read_queue); 6324 if ((control == NULL) && (SCTP_SBAVAIL(&so->so_rcv) > 0)) { 6325 #ifdef INVARIANTS 6326 panic("Huh, its non zero and nothing on control?"); 6327 #endif 6328 SCTP_SB_CLEAR(so->so_rcv); 6329 } 6330 SCTP_INP_READ_UNLOCK(inp); 6331 hold_rlock = 0; 6332 goto restart; 6333 } 6334 6335 if ((control->length == 0) && 6336 (control->do_not_ref_stcb)) { 6337 /* Clean up code for freeing assoc that left behind a pdapi.. 6338 * maybe a peer in EEOR that just closed after sending and 6339 * never indicated a EOR. 6340 */ 6341 if (hold_rlock == 0) { 6342 hold_rlock = 1; 6343 SCTP_INP_READ_LOCK(inp); 6344 } 6345 control->held_length = 0; 6346 if (control->data) { 6347 /* Hmm there is data here .. fix */ 6348 struct mbuf *m_tmp; 6349 int cnt = 0; 6350 m_tmp = control->data; 6351 while (m_tmp) { 6352 cnt += SCTP_BUF_LEN(m_tmp); 6353 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 6354 control->tail_mbuf = m_tmp; 6355 control->end_added = 1; 6356 } 6357 m_tmp = SCTP_BUF_NEXT(m_tmp); 6358 } 6359 control->length = cnt; 6360 } else { 6361 /* remove it */ 6362 TAILQ_REMOVE(&inp->read_queue, control, next); 6363 /* Add back any hidden data */ 6364 sctp_free_remote_addr(control->whoFrom); 6365 sctp_free_a_readq(stcb, control); 6366 } 6367 if (hold_rlock) { 6368 hold_rlock = 0; 6369 SCTP_INP_READ_UNLOCK(inp); 6370 } 6371 goto restart; 6372 } 6373 if ((control->length == 0) && 6374 (control->end_added == 1)) { 6375 /* Do we also need to check for (control->pdapi_aborted == 1)? */ 6376 if (hold_rlock == 0) { 6377 hold_rlock = 1; 6378 SCTP_INP_READ_LOCK(inp); 6379 } 6380 TAILQ_REMOVE(&inp->read_queue, control, next); 6381 if (control->data) { 6382 #ifdef INVARIANTS 6383 panic("control->data not null but control->length == 0"); 6384 #else 6385 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 6386 sctp_m_freem(control->data); 6387 control->data = NULL; 6388 #endif 6389 } 6390 if (control->aux_data) { 6391 sctp_m_free (control->aux_data); 6392 control->aux_data = NULL; 6393 } 6394 #ifdef INVARIANTS 6395 if (control->on_strm_q) { 6396 panic("About to free ctl:%p so:%p and its in %d", 6397 control, so, control->on_strm_q); 6398 } 6399 #endif 6400 sctp_free_remote_addr(control->whoFrom); 6401 sctp_free_a_readq(stcb, control); 6402 if (hold_rlock) { 6403 hold_rlock = 0; 6404 SCTP_INP_READ_UNLOCK(inp); 6405 } 6406 goto restart; 6407 } 6408 if (control->length == 0) { 6409 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 6410 (filling_sinfo)) { 6411 /* find a more suitable one then this */ 6412 ctl = TAILQ_NEXT(control, next); 6413 while (ctl) { 6414 if ((ctl->stcb != control->stcb) && (ctl->length) && 6415 (ctl->some_taken || 6416 (ctl->spec_flags & M_NOTIFICATION) || 6417 ((ctl->do_not_ref_stcb == 0) && 6418 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 6419 ) { 6420 /*- 6421 * If we have a different TCB next, and there is data 6422 * present. If we have already taken some (pdapi), OR we can 6423 * ref the tcb and no delivery as started on this stream, we 6424 * take it. Note we allow a notification on a different 6425 * assoc to be delivered.. 6426 */ 6427 control = ctl; 6428 goto found_one; 6429 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 6430 (ctl->length) && 6431 ((ctl->some_taken) || 6432 ((ctl->do_not_ref_stcb == 0) && 6433 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 6434 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 6435 /*- 6436 * If we have the same tcb, and there is data present, and we 6437 * have the strm interleave feature present. Then if we have 6438 * taken some (pdapi) or we can refer to tht tcb AND we have 6439 * not started a delivery for this stream, we can take it. 6440 * Note we do NOT allow a notification on the same assoc to 6441 * be delivered. 6442 */ 6443 control = ctl; 6444 goto found_one; 6445 } 6446 ctl = TAILQ_NEXT(ctl, next); 6447 } 6448 } 6449 /* 6450 * if we reach here, not suitable replacement is available 6451 * <or> fragment interleave is NOT on. So stuff the sb_cc 6452 * into the our held count, and its time to sleep again. 6453 */ 6454 held_length = SCTP_SBAVAIL(&so->so_rcv); 6455 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 6456 goto restart; 6457 } 6458 /* Clear the held length since there is something to read */ 6459 control->held_length = 0; 6460 found_one: 6461 /* 6462 * If we reach here, control has a some data for us to read off. 6463 * Note that stcb COULD be NULL. 6464 */ 6465 if (hold_rlock == 0) { 6466 hold_rlock = 1; 6467 SCTP_INP_READ_LOCK(inp); 6468 } 6469 control->some_taken++; 6470 stcb = control->stcb; 6471 if (stcb) { 6472 if ((control->do_not_ref_stcb == 0) && 6473 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 6474 if (freecnt_applied == 0) 6475 stcb = NULL; 6476 } else if (control->do_not_ref_stcb == 0) { 6477 /* you can't free it on me please */ 6478 /* 6479 * The lock on the socket buffer protects us so the 6480 * free code will stop. But since we used the socketbuf 6481 * lock and the sender uses the tcb_lock to increment, 6482 * we need to use the atomic add to the refcnt 6483 */ 6484 if (freecnt_applied) { 6485 #ifdef INVARIANTS 6486 panic("refcnt already incremented"); 6487 #else 6488 SCTP_PRINTF("refcnt already incremented?\n"); 6489 #endif 6490 } else { 6491 atomic_add_int(&stcb->asoc.refcnt, 1); 6492 freecnt_applied = 1; 6493 } 6494 /* 6495 * Setup to remember how much we have not yet told 6496 * the peer our rwnd has opened up. Note we grab 6497 * the value from the tcb from last time. 6498 * Note too that sack sending clears this when a sack 6499 * is sent, which is fine. Once we hit the rwnd_req, 6500 * we then will go to the sctp_user_rcvd() that will 6501 * not lock until it KNOWs it MUST send a WUP-SACK. 6502 */ 6503 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 6504 stcb->freed_by_sorcv_sincelast = 0; 6505 } 6506 } 6507 if (stcb && 6508 ((control->spec_flags & M_NOTIFICATION) == 0) && 6509 control->do_not_ref_stcb == 0) { 6510 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 6511 } 6512 6513 /* First lets get off the sinfo and sockaddr info */ 6514 if ((sinfo != NULL) && (filling_sinfo != 0)) { 6515 sinfo->sinfo_stream = control->sinfo_stream; 6516 sinfo->sinfo_ssn = (uint16_t)control->mid; 6517 sinfo->sinfo_flags = control->sinfo_flags; 6518 sinfo->sinfo_ppid = control->sinfo_ppid; 6519 sinfo->sinfo_context =control->sinfo_context; 6520 sinfo->sinfo_timetolive = control->sinfo_timetolive; 6521 sinfo->sinfo_tsn = control->sinfo_tsn; 6522 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 6523 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 6524 nxt = TAILQ_NEXT(control, next); 6525 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6526 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 6527 struct sctp_extrcvinfo *s_extra; 6528 s_extra = (struct sctp_extrcvinfo *)sinfo; 6529 if ((nxt) && 6530 (nxt->length)) { 6531 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 6532 if (nxt->sinfo_flags & SCTP_UNORDERED) { 6533 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 6534 } 6535 if (nxt->spec_flags & M_NOTIFICATION) { 6536 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 6537 } 6538 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 6539 s_extra->serinfo_next_length = nxt->length; 6540 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 6541 s_extra->serinfo_next_stream = nxt->sinfo_stream; 6542 if (nxt->tail_mbuf != NULL) { 6543 if (nxt->end_added) { 6544 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 6545 } 6546 } 6547 } else { 6548 /* we explicitly 0 this, since the memcpy got 6549 * some other things beyond the older sinfo_ 6550 * that is on the control's structure :-D 6551 */ 6552 nxt = NULL; 6553 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6554 s_extra->serinfo_next_aid = 0; 6555 s_extra->serinfo_next_length = 0; 6556 s_extra->serinfo_next_ppid = 0; 6557 s_extra->serinfo_next_stream = 0; 6558 } 6559 } 6560 /* 6561 * update off the real current cum-ack, if we have an stcb. 6562 */ 6563 if ((control->do_not_ref_stcb == 0) && stcb) 6564 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 6565 /* 6566 * mask off the high bits, we keep the actual chunk bits in 6567 * there. 6568 */ 6569 sinfo->sinfo_flags &= 0x00ff; 6570 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 6571 sinfo->sinfo_flags |= SCTP_UNORDERED; 6572 } 6573 } 6574 #ifdef SCTP_ASOCLOG_OF_TSNS 6575 { 6576 int index, newindex; 6577 struct sctp_pcbtsn_rlog *entry; 6578 do { 6579 index = inp->readlog_index; 6580 newindex = index + 1; 6581 if (newindex >= SCTP_READ_LOG_SIZE) { 6582 newindex = 0; 6583 } 6584 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 6585 entry = &inp->readlog[index]; 6586 entry->vtag = control->sinfo_assoc_id; 6587 entry->strm = control->sinfo_stream; 6588 entry->seq = (uint16_t)control->mid; 6589 entry->sz = control->length; 6590 entry->flgs = control->sinfo_flags; 6591 } 6592 #endif 6593 if ((fromlen > 0) && (from != NULL)) { 6594 union sctp_sockstore store; 6595 size_t len; 6596 6597 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 6598 #ifdef INET6 6599 case AF_INET6: 6600 len = sizeof(struct sockaddr_in6); 6601 store.sin6 = control->whoFrom->ro._l_addr.sin6; 6602 store.sin6.sin6_port = control->port_from; 6603 break; 6604 #endif 6605 #ifdef INET 6606 case AF_INET: 6607 #ifdef INET6 6608 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 6609 len = sizeof(struct sockaddr_in6); 6610 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 6611 &store.sin6); 6612 store.sin6.sin6_port = control->port_from; 6613 } else { 6614 len = sizeof(struct sockaddr_in); 6615 store.sin = control->whoFrom->ro._l_addr.sin; 6616 store.sin.sin_port = control->port_from; 6617 } 6618 #else 6619 len = sizeof(struct sockaddr_in); 6620 store.sin = control->whoFrom->ro._l_addr.sin; 6621 store.sin.sin_port = control->port_from; 6622 #endif 6623 break; 6624 #endif 6625 #if defined(__Userspace__) 6626 case AF_CONN: 6627 len = sizeof(struct sockaddr_conn); 6628 store.sconn = control->whoFrom->ro._l_addr.sconn; 6629 store.sconn.sconn_port = control->port_from; 6630 break; 6631 #endif 6632 default: 6633 len = 0; 6634 break; 6635 } 6636 memcpy(from, &store, min((size_t)fromlen, len)); 6637 #if defined(SCTP_EMBEDDED_V6_SCOPE) 6638 #ifdef INET6 6639 { 6640 struct sockaddr_in6 lsa6, *from6; 6641 6642 from6 = (struct sockaddr_in6 *)from; 6643 sctp_recover_scope_mac(from6, (&lsa6)); 6644 } 6645 #endif 6646 #endif 6647 } 6648 if (hold_rlock) { 6649 SCTP_INP_READ_UNLOCK(inp); 6650 hold_rlock = 0; 6651 } 6652 if (hold_sblock) { 6653 SOCKBUF_UNLOCK(&so->so_rcv); 6654 hold_sblock = 0; 6655 } 6656 /* now copy out what data we can */ 6657 if (mp == NULL) { 6658 /* copy out each mbuf in the chain up to length */ 6659 get_more_data: 6660 m = control->data; 6661 while (m) { 6662 /* Move out all we can */ 6663 #if defined(__APPLE__) && !defined(__Userspace__) 6664 #if defined(APPLE_LEOPARD) 6665 cp_len = uio->uio_resid; 6666 #else 6667 cp_len = uio_resid(uio); 6668 #endif 6669 #else 6670 cp_len = uio->uio_resid; 6671 #endif 6672 my_len = SCTP_BUF_LEN(m); 6673 if (cp_len > my_len) { 6674 /* not enough in this buf */ 6675 cp_len = my_len; 6676 } 6677 if (hold_rlock) { 6678 SCTP_INP_READ_UNLOCK(inp); 6679 hold_rlock = 0; 6680 } 6681 #if defined(__APPLE__) && !defined(__Userspace__) 6682 SCTP_SOCKET_UNLOCK(so, 0); 6683 #endif 6684 if (cp_len > 0) 6685 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6686 #if defined(__APPLE__) && !defined(__Userspace__) 6687 SCTP_SOCKET_LOCK(so, 0); 6688 #endif 6689 /* re-read */ 6690 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6691 goto release; 6692 } 6693 6694 if ((control->do_not_ref_stcb == 0) && stcb && 6695 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6696 no_rcv_needed = 1; 6697 } 6698 if (error) { 6699 /* error we are out of here */ 6700 goto release; 6701 } 6702 SCTP_INP_READ_LOCK(inp); 6703 hold_rlock = 1; 6704 if (cp_len == SCTP_BUF_LEN(m)) { 6705 if ((SCTP_BUF_NEXT(m)== NULL) && 6706 (control->end_added)) { 6707 out_flags |= MSG_EOR; 6708 if ((control->do_not_ref_stcb == 0) && 6709 (control->stcb != NULL) && 6710 ((control->spec_flags & M_NOTIFICATION) == 0)) 6711 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6712 } 6713 if (control->spec_flags & M_NOTIFICATION) { 6714 out_flags |= MSG_NOTIFICATION; 6715 } 6716 /* we ate up the mbuf */ 6717 if (in_flags & MSG_PEEK) { 6718 /* just looking */ 6719 m = SCTP_BUF_NEXT(m); 6720 copied_so_far += cp_len; 6721 } else { 6722 /* dispose of the mbuf */ 6723 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6724 sctp_sblog(&so->so_rcv, 6725 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6726 } 6727 sctp_sbfree(control, stcb, &so->so_rcv, m); 6728 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6729 sctp_sblog(&so->so_rcv, 6730 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0); 6731 } 6732 copied_so_far += cp_len; 6733 freed_so_far += (uint32_t)cp_len; 6734 freed_so_far += MSIZE; 6735 atomic_subtract_int(&control->length, (int)cp_len); 6736 control->data = sctp_m_free(m); 6737 m = control->data; 6738 /* been through it all, must hold sb lock ok to null tail */ 6739 if (control->data == NULL) { 6740 #ifdef INVARIANTS 6741 #if defined(__FreeBSD__) && !defined(__Userspace__) 6742 if ((control->end_added == 0) || 6743 (TAILQ_NEXT(control, next) == NULL)) { 6744 /* If the end is not added, OR the 6745 * next is NOT null we MUST have the lock. 6746 */ 6747 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6748 panic("Hmm we don't own the lock?"); 6749 } 6750 } 6751 #endif 6752 #endif 6753 control->tail_mbuf = NULL; 6754 #ifdef INVARIANTS 6755 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6756 panic("end_added, nothing left and no MSG_EOR"); 6757 } 6758 #endif 6759 } 6760 } 6761 } else { 6762 /* Do we need to trim the mbuf? */ 6763 if (control->spec_flags & M_NOTIFICATION) { 6764 out_flags |= MSG_NOTIFICATION; 6765 } 6766 if ((in_flags & MSG_PEEK) == 0) { 6767 SCTP_BUF_RESV_UF(m, cp_len); 6768 SCTP_BUF_LEN(m) -= (int)cp_len; 6769 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6770 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, (int)cp_len); 6771 } 6772 SCTP_SB_DECR(&so->so_rcv, cp_len); 6773 if ((control->do_not_ref_stcb == 0) && 6774 stcb) { 6775 atomic_subtract_int(&stcb->asoc.sb_cc, (int)cp_len); 6776 } 6777 copied_so_far += cp_len; 6778 freed_so_far += (uint32_t)cp_len; 6779 freed_so_far += MSIZE; 6780 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6781 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, 6782 SCTP_LOG_SBRESULT, 0); 6783 } 6784 atomic_subtract_int(&control->length, (int)cp_len); 6785 } else { 6786 copied_so_far += cp_len; 6787 } 6788 } 6789 #if defined(__APPLE__) && !defined(__Userspace__) 6790 #if defined(APPLE_LEOPARD) 6791 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6792 #else 6793 if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) { 6794 #endif 6795 #else 6796 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6797 #endif 6798 break; 6799 } 6800 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6801 (control->do_not_ref_stcb == 0) && 6802 (freed_so_far >= rwnd_req)) { 6803 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6804 } 6805 } /* end while(m) */ 6806 /* 6807 * At this point we have looked at it all and we either have 6808 * a MSG_EOR/or read all the user wants... <OR> 6809 * control->length == 0. 6810 */ 6811 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6812 /* we are done with this control */ 6813 if (control->length == 0) { 6814 if (control->data) { 6815 #ifdef INVARIANTS 6816 panic("control->data not null at read eor?"); 6817 #else 6818 SCTP_PRINTF("Strange, data left in the control buffer .. invariants would panic?\n"); 6819 sctp_m_freem(control->data); 6820 control->data = NULL; 6821 #endif 6822 } 6823 done_with_control: 6824 if (hold_rlock == 0) { 6825 SCTP_INP_READ_LOCK(inp); 6826 hold_rlock = 1; 6827 } 6828 TAILQ_REMOVE(&inp->read_queue, control, next); 6829 /* Add back any hidden data */ 6830 if (control->held_length) { 6831 held_length = 0; 6832 control->held_length = 0; 6833 wakeup_read_socket = 1; 6834 } 6835 if (control->aux_data) { 6836 sctp_m_free (control->aux_data); 6837 control->aux_data = NULL; 6838 } 6839 no_rcv_needed = control->do_not_ref_stcb; 6840 sctp_free_remote_addr(control->whoFrom); 6841 control->data = NULL; 6842 #ifdef INVARIANTS 6843 if (control->on_strm_q) { 6844 panic("About to free ctl:%p so:%p and its in %d", 6845 control, so, control->on_strm_q); 6846 } 6847 #endif 6848 sctp_free_a_readq(stcb, control); 6849 control = NULL; 6850 if ((freed_so_far >= rwnd_req) && 6851 (no_rcv_needed == 0)) 6852 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6853 6854 } else { 6855 /* 6856 * The user did not read all of this 6857 * message, turn off the returned MSG_EOR 6858 * since we are leaving more behind on the 6859 * control to read. 6860 */ 6861 #ifdef INVARIANTS 6862 if (control->end_added && 6863 (control->data == NULL) && 6864 (control->tail_mbuf == NULL)) { 6865 panic("Gak, control->length is corrupt?"); 6866 } 6867 #endif 6868 no_rcv_needed = control->do_not_ref_stcb; 6869 out_flags &= ~MSG_EOR; 6870 } 6871 } 6872 if (out_flags & MSG_EOR) { 6873 goto release; 6874 } 6875 #if defined(__APPLE__) && !defined(__Userspace__) 6876 #if defined(APPLE_LEOPARD) 6877 if ((uio->uio_resid == 0) || 6878 #else 6879 if ((uio_resid(uio) == 0) || 6880 #endif 6881 #else 6882 if ((uio->uio_resid == 0) || 6883 #endif 6884 ((in_eeor_mode) && 6885 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6886 goto release; 6887 } 6888 /* 6889 * If I hit here the receiver wants more and this message is 6890 * NOT done (pd-api). So two questions. Can we block? if not 6891 * we are done. Did the user NOT set MSG_WAITALL? 6892 */ 6893 if (block_allowed == 0) { 6894 goto release; 6895 } 6896 /* 6897 * We need to wait for more data a few things: 6898 * - We don't release the I/O lock so we don't get someone else 6899 * reading. 6900 * - We must be sure to account for the case where what is added 6901 * is NOT to our control when we wakeup. 6902 */ 6903 6904 /* Do we need to tell the transport a rwnd update might be 6905 * needed before we go to sleep? 6906 */ 6907 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6908 ((freed_so_far >= rwnd_req) && 6909 (control->do_not_ref_stcb == 0) && 6910 (no_rcv_needed == 0))) { 6911 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6912 } 6913 wait_some_more: 6914 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__) 6915 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6916 goto release; 6917 } 6918 #else 6919 if (so->so_state & SS_CANTRCVMORE) { 6920 goto release; 6921 } 6922 #endif 6923 6924 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6925 goto release; 6926 6927 if (hold_rlock == 1) { 6928 SCTP_INP_READ_UNLOCK(inp); 6929 hold_rlock = 0; 6930 } 6931 if (hold_sblock == 0) { 6932 SOCKBUF_LOCK(&so->so_rcv); 6933 hold_sblock = 1; 6934 } 6935 if ((copied_so_far) && (control->length == 0) && 6936 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6937 goto release; 6938 } 6939 #if defined(__APPLE__) && !defined(__Userspace__) 6940 sbunlock(&so->so_rcv, 1); 6941 #endif 6942 if (SCTP_SBAVAIL(&so->so_rcv) <= control->held_length) { 6943 #if defined(__FreeBSD__) && !defined(__Userspace__) 6944 error = sbwait(so, SO_RCV); 6945 #else 6946 error = sbwait(&so->so_rcv); 6947 #endif 6948 if (error) { 6949 #if defined(__APPLE__) && !defined(__Userspace__) 6950 goto release_unlocked; 6951 #else 6952 goto release; 6953 #endif 6954 } 6955 control->held_length = 0; 6956 } 6957 #if defined(__APPLE__) && !defined(__Userspace__) 6958 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags)); 6959 #endif 6960 if (hold_sblock) { 6961 SOCKBUF_UNLOCK(&so->so_rcv); 6962 hold_sblock = 0; 6963 } 6964 if (control->length == 0) { 6965 /* still nothing here */ 6966 if (control->end_added == 1) { 6967 /* he aborted, or is done i.e.did a shutdown */ 6968 out_flags |= MSG_EOR; 6969 if (control->pdapi_aborted) { 6970 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6971 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6972 6973 out_flags |= MSG_TRUNC; 6974 } else { 6975 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6976 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6977 } 6978 goto done_with_control; 6979 } 6980 if (SCTP_SBAVAIL(&so->so_rcv) > held_length) { 6981 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 6982 held_length = 0; 6983 } 6984 goto wait_some_more; 6985 } else if (control->data == NULL) { 6986 /* we must re-sync since data 6987 * is probably being added 6988 */ 6989 SCTP_INP_READ_LOCK(inp); 6990 if ((control->length > 0) && (control->data == NULL)) { 6991 /* big trouble.. we have the lock and its corrupt? */ 6992 #ifdef INVARIANTS 6993 panic ("Impossible data==NULL length !=0"); 6994 #endif 6995 out_flags |= MSG_EOR; 6996 out_flags |= MSG_TRUNC; 6997 control->length = 0; 6998 SCTP_INP_READ_UNLOCK(inp); 6999 goto done_with_control; 7000 } 7001 SCTP_INP_READ_UNLOCK(inp); 7002 /* We will fall around to get more data */ 7003 } 7004 goto get_more_data; 7005 } else { 7006 /*- 7007 * Give caller back the mbuf chain, 7008 * store in uio_resid the length 7009 */ 7010 wakeup_read_socket = 0; 7011 if ((control->end_added == 0) || 7012 (TAILQ_NEXT(control, next) == NULL)) { 7013 /* Need to get rlock */ 7014 if (hold_rlock == 0) { 7015 SCTP_INP_READ_LOCK(inp); 7016 hold_rlock = 1; 7017 } 7018 } 7019 if (control->end_added) { 7020 out_flags |= MSG_EOR; 7021 if ((control->do_not_ref_stcb == 0) && 7022 (control->stcb != NULL) && 7023 ((control->spec_flags & M_NOTIFICATION) == 0)) 7024 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 7025 } 7026 if (control->spec_flags & M_NOTIFICATION) { 7027 out_flags |= MSG_NOTIFICATION; 7028 } 7029 #if defined(__APPLE__) && !defined(__Userspace__) 7030 #if defined(APPLE_LEOPARD) 7031 uio->uio_resid = control->length; 7032 #else 7033 uio_setresid(uio, control->length); 7034 #endif 7035 #else 7036 uio->uio_resid = control->length; 7037 #endif 7038 *mp = control->data; 7039 m = control->data; 7040 while (m) { 7041 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 7042 sctp_sblog(&so->so_rcv, 7043 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 7044 } 7045 sctp_sbfree(control, stcb, &so->so_rcv, m); 7046 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 7047 freed_so_far += MSIZE; 7048 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 7049 sctp_sblog(&so->so_rcv, 7050 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0); 7051 } 7052 m = SCTP_BUF_NEXT(m); 7053 } 7054 control->data = control->tail_mbuf = NULL; 7055 control->length = 0; 7056 if (out_flags & MSG_EOR) { 7057 /* Done with this control */ 7058 goto done_with_control; 7059 } 7060 } 7061 release: 7062 if (hold_rlock == 1) { 7063 SCTP_INP_READ_UNLOCK(inp); 7064 hold_rlock = 0; 7065 } 7066 #if defined(__Userspace__) 7067 if (hold_sblock == 0) { 7068 SOCKBUF_LOCK(&so->so_rcv); 7069 hold_sblock = 1; 7070 } 7071 #else 7072 if (hold_sblock == 1) { 7073 SOCKBUF_UNLOCK(&so->so_rcv); 7074 hold_sblock = 0; 7075 } 7076 #endif 7077 #if defined(__APPLE__) && !defined(__Userspace__) 7078 sbunlock(&so->so_rcv, 1); 7079 #endif 7080 7081 #if defined(__FreeBSD__) && !defined(__Userspace__) 7082 SOCK_IO_RECV_UNLOCK(so); 7083 sockbuf_lock = 0; 7084 #endif 7085 7086 release_unlocked: 7087 if (hold_sblock) { 7088 SOCKBUF_UNLOCK(&so->so_rcv); 7089 hold_sblock = 0; 7090 } 7091 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 7092 if ((freed_so_far >= rwnd_req) && 7093 (control && (control->do_not_ref_stcb == 0)) && 7094 (no_rcv_needed == 0)) 7095 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 7096 } 7097 out: 7098 if (msg_flags) { 7099 *msg_flags = out_flags; 7100 } 7101 if (((out_flags & MSG_EOR) == 0) && 7102 ((in_flags & MSG_PEEK) == 0) && 7103 (sinfo) && 7104 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 7105 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 7106 struct sctp_extrcvinfo *s_extra; 7107 s_extra = (struct sctp_extrcvinfo *)sinfo; 7108 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 7109 } 7110 if (hold_rlock == 1) { 7111 SCTP_INP_READ_UNLOCK(inp); 7112 } 7113 if (hold_sblock) { 7114 SOCKBUF_UNLOCK(&so->so_rcv); 7115 } 7116 #if defined(__FreeBSD__) && !defined(__Userspace__) 7117 if (sockbuf_lock) { 7118 SOCK_IO_RECV_UNLOCK(so); 7119 } 7120 #endif 7121 7122 if (freecnt_applied) { 7123 /* 7124 * The lock on the socket buffer protects us so the free 7125 * code will stop. But since we used the socketbuf lock and 7126 * the sender uses the tcb_lock to increment, we need to use 7127 * the atomic add to the refcnt. 7128 */ 7129 if (stcb == NULL) { 7130 #ifdef INVARIANTS 7131 panic("stcb for refcnt has gone NULL?"); 7132 goto stage_left; 7133 #else 7134 goto stage_left; 7135 #endif 7136 } 7137 /* Save the value back for next time */ 7138 stcb->freed_by_sorcv_sincelast = freed_so_far; 7139 atomic_subtract_int(&stcb->asoc.refcnt, 1); 7140 } 7141 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) { 7142 if (stcb) { 7143 sctp_misc_ints(SCTP_SORECV_DONE, 7144 freed_so_far, 7145 #if defined(__APPLE__) && !defined(__Userspace__) 7146 #if defined(APPLE_LEOPARD) 7147 ((uio) ? (slen - uio->uio_resid) : slen), 7148 #else 7149 ((uio) ? (slen - uio_resid(uio)) : slen), 7150 #endif 7151 #else 7152 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 7153 #endif 7154 stcb->asoc.my_rwnd, 7155 SCTP_SBAVAIL(&so->so_rcv)); 7156 } else { 7157 sctp_misc_ints(SCTP_SORECV_DONE, 7158 freed_so_far, 7159 #if defined(__APPLE__) && !defined(__Userspace__) 7160 #if defined(APPLE_LEOPARD) 7161 ((uio) ? (slen - uio->uio_resid) : slen), 7162 #else 7163 ((uio) ? (slen - uio_resid(uio)) : slen), 7164 #endif 7165 #else 7166 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 7167 #endif 7168 0, 7169 SCTP_SBAVAIL(&so->so_rcv)); 7170 } 7171 } 7172 stage_left: 7173 if (wakeup_read_socket) { 7174 sctp_sorwakeup(inp, so); 7175 } 7176 return (error); 7177 } 7178 7179 #ifdef SCTP_MBUF_LOGGING 7180 struct mbuf * 7181 sctp_m_free(struct mbuf *m) 7182 { 7183 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 7184 sctp_log_mb(m, SCTP_MBUF_IFREE); 7185 } 7186 return (m_free(m)); 7187 } 7188 7189 void 7190 sctp_m_freem(struct mbuf *mb) 7191 { 7192 while (mb != NULL) 7193 mb = sctp_m_free(mb); 7194 } 7195 7196 #endif 7197 7198 int 7199 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 7200 { 7201 /* Given a local address. For all associations 7202 * that holds the address, request a peer-set-primary. 7203 */ 7204 struct sctp_ifa *ifa; 7205 struct sctp_laddr *wi; 7206 7207 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 7208 if (ifa == NULL) { 7209 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 7210 return (EADDRNOTAVAIL); 7211 } 7212 /* Now that we have the ifa we must awaken the 7213 * iterator with this message. 7214 */ 7215 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 7216 if (wi == NULL) { 7217 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 7218 return (ENOMEM); 7219 } 7220 /* Now incr the count and int wi structure */ 7221 SCTP_INCR_LADDR_COUNT(); 7222 memset(wi, 0, sizeof(*wi)); 7223 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 7224 wi->ifa = ifa; 7225 wi->action = SCTP_SET_PRIM_ADDR; 7226 atomic_add_int(&ifa->refcount, 1); 7227 7228 /* Now add it to the work queue */ 7229 SCTP_WQ_ADDR_LOCK(); 7230 /* 7231 * Should this really be a tailq? As it is we will process the 7232 * newest first :-0 7233 */ 7234 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 7235 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 7236 (struct sctp_inpcb *)NULL, 7237 (struct sctp_tcb *)NULL, 7238 (struct sctp_nets *)NULL); 7239 SCTP_WQ_ADDR_UNLOCK(); 7240 return (0); 7241 } 7242 7243 #if defined(__Userspace__) 7244 /* no sctp_soreceive for __Userspace__ now */ 7245 #endif 7246 #if !defined(__Userspace__) 7247 int 7248 sctp_soreceive( struct socket *so, 7249 struct sockaddr **psa, 7250 struct uio *uio, 7251 struct mbuf **mp0, 7252 struct mbuf **controlp, 7253 int *flagsp) 7254 { 7255 int error, fromlen; 7256 uint8_t sockbuf[256]; 7257 struct sockaddr *from; 7258 struct sctp_extrcvinfo sinfo; 7259 int filling_sinfo = 1; 7260 int flags; 7261 struct sctp_inpcb *inp; 7262 7263 inp = (struct sctp_inpcb *)so->so_pcb; 7264 /* pickup the assoc we are reading from */ 7265 if (inp == NULL) { 7266 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7267 return (EINVAL); 7268 } 7269 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 7270 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 7271 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 7272 (controlp == NULL)) { 7273 /* user does not want the sndrcv ctl */ 7274 filling_sinfo = 0; 7275 } 7276 if (psa) { 7277 from = (struct sockaddr *)sockbuf; 7278 fromlen = sizeof(sockbuf); 7279 #ifdef HAVE_SA_LEN 7280 from->sa_len = 0; 7281 #endif 7282 } else { 7283 from = NULL; 7284 fromlen = 0; 7285 } 7286 7287 #if defined(__APPLE__) && !defined(__Userspace__) 7288 SCTP_SOCKET_LOCK(so, 1); 7289 #endif 7290 if (filling_sinfo) { 7291 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 7292 } 7293 if (flagsp != NULL) { 7294 flags = *flagsp; 7295 } else { 7296 flags = 0; 7297 } 7298 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 7299 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 7300 if (flagsp != NULL) { 7301 *flagsp = flags; 7302 } 7303 if (controlp != NULL) { 7304 /* copy back the sinfo in a CMSG format */ 7305 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 7306 *controlp = sctp_build_ctl_nchunk(inp, 7307 (struct sctp_sndrcvinfo *)&sinfo); 7308 } else { 7309 *controlp = NULL; 7310 } 7311 } 7312 if (psa) { 7313 /* copy back the address info */ 7314 #ifdef HAVE_SA_LEN 7315 if (from && from->sa_len) { 7316 #else 7317 if (from) { 7318 #endif 7319 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__) 7320 *psa = sodupsockaddr(from, M_NOWAIT); 7321 #else 7322 *psa = dup_sockaddr(from, mp0 == 0); 7323 #endif 7324 } else { 7325 *psa = NULL; 7326 } 7327 } 7328 #if defined(__APPLE__) && !defined(__Userspace__) 7329 SCTP_SOCKET_UNLOCK(so, 1); 7330 #endif 7331 return (error); 7332 } 7333 7334 #if defined(_WIN32) && !defined(__Userspace__) 7335 /* 7336 * General routine to allocate a hash table with control of memory flags. 7337 * is in 7.0 and beyond for sure :-) 7338 */ 7339 void * 7340 sctp_hashinit_flags(int elements, struct malloc_type *type, 7341 u_long *hashmask, int flags) 7342 { 7343 long hashsize; 7344 LIST_HEAD(generic, generic) *hashtbl; 7345 int i; 7346 7347 7348 if (elements <= 0) { 7349 #ifdef INVARIANTS 7350 panic("hashinit: bad elements"); 7351 #else 7352 SCTP_PRINTF("hashinit: bad elements?"); 7353 elements = 1; 7354 #endif 7355 } 7356 for (hashsize = 1; hashsize <= elements; hashsize <<= 1) 7357 continue; 7358 hashsize >>= 1; 7359 if (flags & HASH_WAITOK) 7360 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK); 7361 else if (flags & HASH_NOWAIT) 7362 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT); 7363 else { 7364 #ifdef INVARIANTS 7365 panic("flag incorrect in hashinit_flags"); 7366 #else 7367 return (NULL); 7368 #endif 7369 } 7370 7371 /* no memory? */ 7372 if (hashtbl == NULL) 7373 return (NULL); 7374 7375 for (i = 0; i < hashsize; i++) 7376 LIST_INIT(&hashtbl[i]); 7377 *hashmask = hashsize - 1; 7378 return (hashtbl); 7379 } 7380 #endif 7381 #else /* __Userspace__ ifdef above sctp_soreceive */ 7382 /* 7383 * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland. 7384 * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for 7385 *__FreeBSD__ must be excluded. 7386 * 7387 */ 7388 7389 void * 7390 sctp_hashinit_flags(int elements, struct malloc_type *type, 7391 u_long *hashmask, int flags) 7392 { 7393 long hashsize; 7394 LIST_HEAD(generic, generic) *hashtbl; 7395 int i; 7396 7397 if (elements <= 0) { 7398 SCTP_PRINTF("hashinit: bad elements?"); 7399 #ifdef INVARIANTS 7400 return (NULL); 7401 #else 7402 elements = 1; 7403 #endif 7404 } 7405 for (hashsize = 1; hashsize <= elements; hashsize <<= 1) 7406 continue; 7407 hashsize >>= 1; 7408 /*cannot use MALLOC here because it has to be declared or defined 7409 using MALLOC_DECLARE or MALLOC_DEFINE first. */ 7410 if (flags & HASH_WAITOK) 7411 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl)); 7412 else if (flags & HASH_NOWAIT) 7413 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl)); 7414 else { 7415 #ifdef INVARIANTS 7416 SCTP_PRINTF("flag incorrect in hashinit_flags.\n"); 7417 #endif 7418 return (NULL); 7419 } 7420 7421 /* no memory? */ 7422 if (hashtbl == NULL) 7423 return (NULL); 7424 7425 for (i = 0; i < hashsize; i++) 7426 LIST_INIT(&hashtbl[i]); 7427 *hashmask = hashsize - 1; 7428 return (hashtbl); 7429 } 7430 7431 void 7432 sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask) 7433 { 7434 LIST_HEAD(generic, generic) *hashtbl, *hp; 7435 7436 hashtbl = vhashtbl; 7437 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++) 7438 if (!LIST_EMPTY(hp)) { 7439 SCTP_PRINTF("hashdestroy: hash not empty.\n"); 7440 return; 7441 } 7442 FREE(hashtbl, type); 7443 } 7444 7445 void 7446 sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask) 7447 { 7448 LIST_HEAD(generic, generic) *hashtbl/*, *hp*/; 7449 /* 7450 LIST_ENTRY(type) *start, *temp; 7451 */ 7452 hashtbl = vhashtbl; 7453 /* Apparently temp is not dynamically allocated, so attempts to 7454 free it results in error. 7455 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++) 7456 if (!LIST_EMPTY(hp)) { 7457 start = LIST_FIRST(hp); 7458 while (start != NULL) { 7459 temp = start; 7460 start = start->le_next; 7461 SCTP_PRINTF("%s: %p \n", __func__, (void *)temp); 7462 FREE(temp, type); 7463 } 7464 } 7465 */ 7466 FREE(hashtbl, type); 7467 } 7468 7469 #endif 7470 int 7471 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 7472 int totaddr, int *error) 7473 { 7474 int added = 0; 7475 int i; 7476 struct sctp_inpcb *inp; 7477 struct sockaddr *sa; 7478 size_t incr = 0; 7479 #ifdef INET 7480 struct sockaddr_in *sin; 7481 #endif 7482 #ifdef INET6 7483 struct sockaddr_in6 *sin6; 7484 #endif 7485 7486 sa = addr; 7487 inp = stcb->sctp_ep; 7488 *error = 0; 7489 for (i = 0; i < totaddr; i++) { 7490 switch (sa->sa_family) { 7491 #ifdef INET 7492 case AF_INET: 7493 incr = sizeof(struct sockaddr_in); 7494 sin = (struct sockaddr_in *)sa; 7495 if ((sin->sin_addr.s_addr == INADDR_ANY) || 7496 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 7497 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 7498 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7499 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 7500 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 7501 *error = EINVAL; 7502 goto out_now; 7503 } 7504 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 7505 SCTP_DONOT_SETSCOPE, 7506 SCTP_ADDR_IS_CONFIRMED)) { 7507 /* assoc gone no un-lock */ 7508 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 7509 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 7510 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 7511 *error = ENOBUFS; 7512 goto out_now; 7513 } 7514 added++; 7515 break; 7516 #endif 7517 #ifdef INET6 7518 case AF_INET6: 7519 incr = sizeof(struct sockaddr_in6); 7520 sin6 = (struct sockaddr_in6 *)sa; 7521 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 7522 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 7523 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7524 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 7525 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 7526 *error = EINVAL; 7527 goto out_now; 7528 } 7529 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 7530 SCTP_DONOT_SETSCOPE, 7531 SCTP_ADDR_IS_CONFIRMED)) { 7532 /* assoc gone no un-lock */ 7533 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 7534 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 7535 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 7536 *error = ENOBUFS; 7537 goto out_now; 7538 } 7539 added++; 7540 break; 7541 #endif 7542 #if defined(__Userspace__) 7543 case AF_CONN: 7544 incr = sizeof(struct sockaddr_conn); 7545 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 7546 SCTP_DONOT_SETSCOPE, 7547 SCTP_ADDR_IS_CONFIRMED)) { 7548 /* assoc gone no un-lock */ 7549 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 7550 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 7551 SCTP_FROM_SCTPUTIL + SCTP_LOC_11); 7552 *error = ENOBUFS; 7553 goto out_now; 7554 } 7555 added++; 7556 break; 7557 #endif 7558 default: 7559 break; 7560 } 7561 sa = (struct sockaddr *)((caddr_t)sa + incr); 7562 } 7563 out_now: 7564 return (added); 7565 } 7566 7567 int 7568 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 7569 unsigned int totaddr, 7570 unsigned int *num_v4, unsigned int *num_v6, 7571 unsigned int limit) 7572 { 7573 struct sockaddr *sa; 7574 struct sctp_tcb *stcb; 7575 unsigned int incr, at, i; 7576 7577 at = 0; 7578 sa = addr; 7579 *num_v6 = *num_v4 = 0; 7580 /* account and validate addresses */ 7581 if (totaddr == 0) { 7582 return (EINVAL); 7583 } 7584 for (i = 0; i < totaddr; i++) { 7585 if (at + sizeof(struct sockaddr) > limit) { 7586 return (EINVAL); 7587 } 7588 switch (sa->sa_family) { 7589 #ifdef INET 7590 case AF_INET: 7591 incr = (unsigned int)sizeof(struct sockaddr_in); 7592 #ifdef HAVE_SA_LEN 7593 if (sa->sa_len != incr) { 7594 return (EINVAL); 7595 } 7596 #endif 7597 (*num_v4) += 1; 7598 break; 7599 #endif 7600 #ifdef INET6 7601 case AF_INET6: 7602 { 7603 struct sockaddr_in6 *sin6; 7604 7605 incr = (unsigned int)sizeof(struct sockaddr_in6); 7606 #ifdef HAVE_SA_LEN 7607 if (sa->sa_len != incr) { 7608 return (EINVAL); 7609 } 7610 #endif 7611 sin6 = (struct sockaddr_in6 *)sa; 7612 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 7613 /* Must be non-mapped for connectx */ 7614 return (EINVAL); 7615 } 7616 (*num_v6) += 1; 7617 break; 7618 } 7619 #endif 7620 default: 7621 return (EINVAL); 7622 } 7623 if ((at + incr) > limit) { 7624 return (EINVAL); 7625 } 7626 SCTP_INP_INCR_REF(inp); 7627 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 7628 if (stcb != NULL) { 7629 SCTP_TCB_UNLOCK(stcb); 7630 return (EALREADY); 7631 } else { 7632 SCTP_INP_DECR_REF(inp); 7633 } 7634 at += incr; 7635 sa = (struct sockaddr *)((caddr_t)sa + incr); 7636 } 7637 return (0); 7638 } 7639 7640 /* 7641 * sctp_bindx(ADD) for one address. 7642 * assumes all arguments are valid/checked by caller. 7643 */ 7644 void 7645 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 7646 struct sockaddr *sa, uint32_t vrf_id, int *error, 7647 void *p) 7648 { 7649 #if defined(INET) && defined(INET6) 7650 struct sockaddr_in sin; 7651 #endif 7652 #ifdef INET6 7653 struct sockaddr_in6 *sin6; 7654 #endif 7655 #ifdef INET 7656 struct sockaddr_in *sinp; 7657 #endif 7658 struct sockaddr *addr_to_use; 7659 struct sctp_inpcb *lep; 7660 #ifdef SCTP_MVRF 7661 int i; 7662 #endif 7663 uint16_t port; 7664 7665 /* see if we're bound all already! */ 7666 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 7667 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7668 *error = EINVAL; 7669 return; 7670 } 7671 #ifdef SCTP_MVRF 7672 /* Is the VRF one we have */ 7673 for (i = 0; i < inp->num_vrfs; i++) { 7674 if (vrf_id == inp->m_vrf_ids[i]) { 7675 break; 7676 } 7677 } 7678 if (i == inp->num_vrfs) { 7679 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7680 *error = EINVAL; 7681 return; 7682 } 7683 #endif 7684 switch (sa->sa_family) { 7685 #ifdef INET6 7686 case AF_INET6: 7687 #ifdef HAVE_SA_LEN 7688 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 7689 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7690 *error = EINVAL; 7691 return; 7692 } 7693 #endif 7694 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 7695 /* can only bind v6 on PF_INET6 sockets */ 7696 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7697 *error = EINVAL; 7698 return; 7699 } 7700 sin6 = (struct sockaddr_in6 *)sa; 7701 port = sin6->sin6_port; 7702 #ifdef INET 7703 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 7704 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 7705 SCTP_IPV6_V6ONLY(inp)) { 7706 /* can't bind v4-mapped on PF_INET sockets */ 7707 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7708 *error = EINVAL; 7709 return; 7710 } 7711 in6_sin6_2_sin(&sin, sin6); 7712 addr_to_use = (struct sockaddr *)&sin; 7713 } else { 7714 addr_to_use = sa; 7715 } 7716 #else 7717 addr_to_use = sa; 7718 #endif 7719 break; 7720 #endif 7721 #ifdef INET 7722 case AF_INET: 7723 #ifdef HAVE_SA_LEN 7724 if (sa->sa_len != sizeof(struct sockaddr_in)) { 7725 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7726 *error = EINVAL; 7727 return; 7728 } 7729 #endif 7730 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 7731 SCTP_IPV6_V6ONLY(inp)) { 7732 /* can't bind v4 on PF_INET sockets */ 7733 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7734 *error = EINVAL; 7735 return; 7736 } 7737 sinp = (struct sockaddr_in *)sa; 7738 port = sinp->sin_port; 7739 addr_to_use = sa; 7740 break; 7741 #endif 7742 default: 7743 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7744 *error = EINVAL; 7745 return; 7746 } 7747 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 7748 #if !(defined(_WIN32) || defined(__Userspace__)) 7749 if (p == NULL) { 7750 /* Can't get proc for Net/Open BSD */ 7751 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7752 *error = EINVAL; 7753 return; 7754 } 7755 #endif 7756 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 7757 return; 7758 } 7759 /* Validate the incoming port. */ 7760 if ((port != 0) && (port != inp->sctp_lport)) { 7761 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7762 *error = EINVAL; 7763 return; 7764 } 7765 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 7766 if (lep == NULL) { 7767 /* add the address */ 7768 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 7769 SCTP_ADD_IP_ADDRESS, vrf_id); 7770 } else { 7771 if (lep != inp) { 7772 *error = EADDRINUSE; 7773 } 7774 SCTP_INP_DECR_REF(lep); 7775 } 7776 } 7777 7778 /* 7779 * sctp_bindx(DELETE) for one address. 7780 * assumes all arguments are valid/checked by caller. 7781 */ 7782 void 7783 sctp_bindx_delete_address(struct sctp_inpcb *inp, 7784 struct sockaddr *sa, uint32_t vrf_id, int *error) 7785 { 7786 struct sockaddr *addr_to_use; 7787 #if defined(INET) && defined(INET6) 7788 struct sockaddr_in6 *sin6; 7789 struct sockaddr_in sin; 7790 #endif 7791 #ifdef SCTP_MVRF 7792 int i; 7793 #endif 7794 7795 /* see if we're bound all already! */ 7796 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 7797 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7798 *error = EINVAL; 7799 return; 7800 } 7801 #ifdef SCTP_MVRF 7802 /* Is the VRF one we have */ 7803 for (i = 0; i < inp->num_vrfs; i++) { 7804 if (vrf_id == inp->m_vrf_ids[i]) { 7805 break; 7806 } 7807 } 7808 if (i == inp->num_vrfs) { 7809 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7810 *error = EINVAL; 7811 return; 7812 } 7813 #endif 7814 switch (sa->sa_family) { 7815 #ifdef INET6 7816 case AF_INET6: 7817 #ifdef HAVE_SA_LEN 7818 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 7819 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7820 *error = EINVAL; 7821 return; 7822 } 7823 #endif 7824 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 7825 /* can only bind v6 on PF_INET6 sockets */ 7826 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7827 *error = EINVAL; 7828 return; 7829 } 7830 #ifdef INET 7831 sin6 = (struct sockaddr_in6 *)sa; 7832 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 7833 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 7834 SCTP_IPV6_V6ONLY(inp)) { 7835 /* can't bind mapped-v4 on PF_INET sockets */ 7836 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7837 *error = EINVAL; 7838 return; 7839 } 7840 in6_sin6_2_sin(&sin, sin6); 7841 addr_to_use = (struct sockaddr *)&sin; 7842 } else { 7843 addr_to_use = sa; 7844 } 7845 #else 7846 addr_to_use = sa; 7847 #endif 7848 break; 7849 #endif 7850 #ifdef INET 7851 case AF_INET: 7852 #ifdef HAVE_SA_LEN 7853 if (sa->sa_len != sizeof(struct sockaddr_in)) { 7854 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7855 *error = EINVAL; 7856 return; 7857 } 7858 #endif 7859 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 7860 SCTP_IPV6_V6ONLY(inp)) { 7861 /* can't bind v4 on PF_INET sockets */ 7862 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7863 *error = EINVAL; 7864 return; 7865 } 7866 addr_to_use = sa; 7867 break; 7868 #endif 7869 default: 7870 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7871 *error = EINVAL; 7872 return; 7873 } 7874 /* No lock required mgmt_ep_sa does its own locking. */ 7875 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 7876 vrf_id); 7877 } 7878 7879 /* 7880 * returns the valid local address count for an assoc, taking into account 7881 * all scoping rules 7882 */ 7883 int 7884 sctp_local_addr_count(struct sctp_tcb *stcb) 7885 { 7886 int loopback_scope; 7887 #if defined(INET) 7888 int ipv4_local_scope, ipv4_addr_legal; 7889 #endif 7890 #if defined(INET6) 7891 int local_scope, site_scope, ipv6_addr_legal; 7892 #endif 7893 #if defined(__Userspace__) 7894 int conn_addr_legal; 7895 #endif 7896 struct sctp_vrf *vrf; 7897 struct sctp_ifn *sctp_ifn; 7898 struct sctp_ifa *sctp_ifa; 7899 int count = 0; 7900 7901 /* Turn on all the appropriate scopes */ 7902 loopback_scope = stcb->asoc.scope.loopback_scope; 7903 #if defined(INET) 7904 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 7905 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 7906 #endif 7907 #if defined(INET6) 7908 local_scope = stcb->asoc.scope.local_scope; 7909 site_scope = stcb->asoc.scope.site_scope; 7910 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 7911 #endif 7912 #if defined(__Userspace__) 7913 conn_addr_legal = stcb->asoc.scope.conn_addr_legal; 7914 #endif 7915 SCTP_IPI_ADDR_RLOCK(); 7916 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 7917 if (vrf == NULL) { 7918 /* no vrf, no addresses */ 7919 SCTP_IPI_ADDR_RUNLOCK(); 7920 return (0); 7921 } 7922 7923 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 7924 /* 7925 * bound all case: go through all ifns on the vrf 7926 */ 7927 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 7928 if ((loopback_scope == 0) && 7929 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 7930 continue; 7931 } 7932 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 7933 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 7934 continue; 7935 switch (sctp_ifa->address.sa.sa_family) { 7936 #ifdef INET 7937 case AF_INET: 7938 if (ipv4_addr_legal) { 7939 struct sockaddr_in *sin; 7940 7941 sin = &sctp_ifa->address.sin; 7942 if (sin->sin_addr.s_addr == 0) { 7943 /* skip unspecified addrs */ 7944 continue; 7945 } 7946 #if defined(__FreeBSD__) && !defined(__Userspace__) 7947 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 7948 &sin->sin_addr) != 0) { 7949 continue; 7950 } 7951 #endif 7952 if ((ipv4_local_scope == 0) && 7953 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 7954 continue; 7955 } 7956 /* count this one */ 7957 count++; 7958 } else { 7959 continue; 7960 } 7961 break; 7962 #endif 7963 #ifdef INET6 7964 case AF_INET6: 7965 if (ipv6_addr_legal) { 7966 struct sockaddr_in6 *sin6; 7967 7968 #if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME) 7969 struct sockaddr_in6 lsa6; 7970 #endif 7971 sin6 = &sctp_ifa->address.sin6; 7972 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7973 continue; 7974 } 7975 #if defined(__FreeBSD__) && !defined(__Userspace__) 7976 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7977 &sin6->sin6_addr) != 0) { 7978 continue; 7979 } 7980 #endif 7981 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7982 if (local_scope == 0) 7983 continue; 7984 #if defined(SCTP_EMBEDDED_V6_SCOPE) 7985 if (sin6->sin6_scope_id == 0) { 7986 #ifdef SCTP_KAME 7987 if (sa6_recoverscope(sin6) != 0) 7988 /* 7989 * bad link 7990 * local 7991 * address 7992 */ 7993 continue; 7994 #else 7995 lsa6 = *sin6; 7996 if (in6_recoverscope(&lsa6, 7997 &lsa6.sin6_addr, 7998 NULL)) 7999 /* 8000 * bad link 8001 * local 8002 * address 8003 */ 8004 continue; 8005 sin6 = &lsa6; 8006 #endif /* SCTP_KAME */ 8007 } 8008 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 8009 } 8010 if ((site_scope == 0) && 8011 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 8012 continue; 8013 } 8014 /* count this one */ 8015 count++; 8016 } 8017 break; 8018 #endif 8019 #if defined(__Userspace__) 8020 case AF_CONN: 8021 if (conn_addr_legal) { 8022 count++; 8023 } 8024 break; 8025 #endif 8026 default: 8027 /* TSNH */ 8028 break; 8029 } 8030 } 8031 } 8032 } else { 8033 /* 8034 * subset bound case 8035 */ 8036 struct sctp_laddr *laddr; 8037 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 8038 sctp_nxt_addr) { 8039 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 8040 continue; 8041 } 8042 /* count this one */ 8043 count++; 8044 } 8045 } 8046 SCTP_IPI_ADDR_RUNLOCK(); 8047 return (count); 8048 } 8049 8050 #if defined(SCTP_LOCAL_TRACE_BUF) 8051 8052 void 8053 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 8054 { 8055 uint32_t saveindex, newindex; 8056 8057 #if defined(_WIN32) && !defined(__Userspace__) 8058 if (SCTP_BASE_SYSCTL(sctp_log) == NULL) { 8059 return; 8060 } 8061 do { 8062 saveindex = SCTP_BASE_SYSCTL(sctp_log)->index; 8063 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 8064 newindex = 1; 8065 } else { 8066 newindex = saveindex + 1; 8067 } 8068 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0); 8069 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 8070 saveindex = 0; 8071 } 8072 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 8073 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys; 8074 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a; 8075 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b; 8076 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c; 8077 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d; 8078 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e; 8079 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f; 8080 #else 8081 do { 8082 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 8083 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 8084 newindex = 1; 8085 } else { 8086 newindex = saveindex + 1; 8087 } 8088 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 8089 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 8090 saveindex = 0; 8091 } 8092 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 8093 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 8094 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 8095 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 8096 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 8097 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 8098 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 8099 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 8100 #endif 8101 } 8102 8103 #endif 8104 #if defined(__FreeBSD__) && !defined(__Userspace__) 8105 static bool 8106 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 8107 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 8108 { 8109 struct ip *iph; 8110 #ifdef INET6 8111 struct ip6_hdr *ip6; 8112 #endif 8113 struct mbuf *sp, *last; 8114 struct udphdr *uhdr; 8115 uint16_t port; 8116 8117 if ((m->m_flags & M_PKTHDR) == 0) { 8118 /* Can't handle one that is not a pkt hdr */ 8119 goto out; 8120 } 8121 /* Pull the src port */ 8122 iph = mtod(m, struct ip *); 8123 uhdr = (struct udphdr *)((caddr_t)iph + off); 8124 port = uhdr->uh_sport; 8125 /* Split out the mbuf chain. Leave the 8126 * IP header in m, place the 8127 * rest in the sp. 8128 */ 8129 sp = m_split(m, off, M_NOWAIT); 8130 if (sp == NULL) { 8131 /* Gak, drop packet, we can't do a split */ 8132 goto out; 8133 } 8134 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 8135 /* Gak, packet can't have an SCTP header in it - too small */ 8136 m_freem(sp); 8137 goto out; 8138 } 8139 /* Now pull up the UDP header and SCTP header together */ 8140 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 8141 if (sp == NULL) { 8142 /* Gak pullup failed */ 8143 goto out; 8144 } 8145 /* Trim out the UDP header */ 8146 m_adj(sp, sizeof(struct udphdr)); 8147 8148 /* Now reconstruct the mbuf chain */ 8149 for (last = m; last->m_next; last = last->m_next); 8150 last->m_next = sp; 8151 m->m_pkthdr.len += sp->m_pkthdr.len; 8152 /* 8153 * The CSUM_DATA_VALID flags indicates that the HW checked the 8154 * UDP checksum and it was valid. 8155 * Since CSUM_DATA_VALID == CSUM_SCTP_VALID this would imply that 8156 * the HW also verified the SCTP checksum. Therefore, clear the bit. 8157 */ 8158 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 8159 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 8160 m->m_pkthdr.len, 8161 if_name(m->m_pkthdr.rcvif), 8162 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 8163 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 8164 iph = mtod(m, struct ip *); 8165 switch (iph->ip_v) { 8166 #ifdef INET 8167 case IPVERSION: 8168 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 8169 sctp_input_with_port(m, off, port); 8170 break; 8171 #endif 8172 #ifdef INET6 8173 case IPV6_VERSION >> 4: 8174 ip6 = mtod(m, struct ip6_hdr *); 8175 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 8176 sctp6_input_with_port(&m, &off, port); 8177 break; 8178 #endif 8179 default: 8180 goto out; 8181 break; 8182 } 8183 return (true); 8184 out: 8185 m_freem(m); 8186 8187 return (true); 8188 } 8189 8190 #ifdef INET 8191 static void 8192 sctp_recv_icmp_tunneled_packet(udp_tun_icmp_param_t param) 8193 { 8194 struct icmp *icmp = param.icmp; 8195 struct ip *outer_ip, *inner_ip; 8196 struct sctphdr *sh; 8197 struct udphdr *udp; 8198 struct sctp_inpcb *inp; 8199 struct sctp_tcb *stcb; 8200 struct sctp_nets *net; 8201 struct sctp_init_chunk *ch; 8202 struct sockaddr_in src, dst; 8203 uint8_t type, code; 8204 8205 inner_ip = &icmp->icmp_ip; 8206 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 8207 if (ntohs(outer_ip->ip_len) < 8208 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 8209 return; 8210 } 8211 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 8212 sh = (struct sctphdr *)(udp + 1); 8213 memset(&src, 0, sizeof(struct sockaddr_in)); 8214 src.sin_family = AF_INET; 8215 #ifdef HAVE_SIN_LEN 8216 src.sin_len = sizeof(struct sockaddr_in); 8217 #endif 8218 src.sin_port = sh->src_port; 8219 src.sin_addr = inner_ip->ip_src; 8220 memset(&dst, 0, sizeof(struct sockaddr_in)); 8221 dst.sin_family = AF_INET; 8222 #ifdef HAVE_SIN_LEN 8223 dst.sin_len = sizeof(struct sockaddr_in); 8224 #endif 8225 dst.sin_port = sh->dest_port; 8226 dst.sin_addr = inner_ip->ip_dst; 8227 /* 8228 * 'dst' holds the dest of the packet that failed to be sent. 8229 * 'src' holds our local endpoint address. Thus we reverse 8230 * the dst and the src in the lookup. 8231 */ 8232 inp = NULL; 8233 net = NULL; 8234 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 8235 (struct sockaddr *)&src, 8236 &inp, &net, 1, 8237 SCTP_DEFAULT_VRFID); 8238 if ((stcb != NULL) && 8239 (net != NULL) && 8240 (inp != NULL)) { 8241 /* Check the UDP port numbers */ 8242 if ((udp->uh_dport != net->port) || 8243 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 8244 SCTP_TCB_UNLOCK(stcb); 8245 return; 8246 } 8247 /* Check the verification tag */ 8248 if (ntohl(sh->v_tag) != 0) { 8249 /* 8250 * This must be the verification tag used 8251 * for sending out packets. We don't 8252 * consider packets reflecting the 8253 * verification tag. 8254 */ 8255 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 8256 SCTP_TCB_UNLOCK(stcb); 8257 return; 8258 } 8259 } else { 8260 if (ntohs(outer_ip->ip_len) >= 8261 sizeof(struct ip) + 8262 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 8263 /* 8264 * In this case we can check if we 8265 * got an INIT chunk and if the 8266 * initiate tag matches. 8267 */ 8268 ch = (struct sctp_init_chunk *)(sh + 1); 8269 if ((ch->ch.chunk_type != SCTP_INITIATION) || 8270 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 8271 SCTP_TCB_UNLOCK(stcb); 8272 return; 8273 } 8274 } else { 8275 SCTP_TCB_UNLOCK(stcb); 8276 return; 8277 } 8278 } 8279 type = icmp->icmp_type; 8280 code = icmp->icmp_code; 8281 if ((type == ICMP_UNREACH) && 8282 (code == ICMP_UNREACH_PORT)) { 8283 code = ICMP_UNREACH_PROTOCOL; 8284 } 8285 sctp_notify(inp, stcb, net, type, code, 8286 ntohs(inner_ip->ip_len), 8287 (uint32_t)ntohs(icmp->icmp_nextmtu)); 8288 #if defined(__Userspace__) 8289 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 8290 (stcb->sctp_socket != NULL)) { 8291 struct socket *upcall_socket; 8292 8293 upcall_socket = stcb->sctp_socket; 8294 SOCK_LOCK(upcall_socket); 8295 soref(upcall_socket); 8296 SOCK_UNLOCK(upcall_socket); 8297 if ((upcall_socket->so_upcall != NULL) && 8298 (upcall_socket->so_error != 0)) { 8299 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT); 8300 } 8301 ACCEPT_LOCK(); 8302 SOCK_LOCK(upcall_socket); 8303 sorele(upcall_socket); 8304 } 8305 #endif 8306 } else { 8307 if ((stcb == NULL) && (inp != NULL)) { 8308 /* reduce ref-count */ 8309 SCTP_INP_WLOCK(inp); 8310 SCTP_INP_DECR_REF(inp); 8311 SCTP_INP_WUNLOCK(inp); 8312 } 8313 if (stcb) { 8314 SCTP_TCB_UNLOCK(stcb); 8315 } 8316 } 8317 return; 8318 } 8319 #endif 8320 8321 #ifdef INET6 8322 static void 8323 sctp_recv_icmp6_tunneled_packet(udp_tun_icmp_param_t param) 8324 { 8325 struct ip6ctlparam *ip6cp = param.ip6cp; 8326 struct sctp_inpcb *inp; 8327 struct sctp_tcb *stcb; 8328 struct sctp_nets *net; 8329 struct sctphdr sh; 8330 struct udphdr udp; 8331 struct sockaddr_in6 src, dst; 8332 uint8_t type, code; 8333 8334 /* 8335 * XXX: We assume that when IPV6 is non NULL, M and OFF are 8336 * valid. 8337 */ 8338 if (ip6cp->ip6c_m == NULL) { 8339 return; 8340 } 8341 /* Check if we can safely examine the ports and the 8342 * verification tag of the SCTP common header. 8343 */ 8344 if (ip6cp->ip6c_m->m_pkthdr.len < 8345 ip6cp->ip6c_off + sizeof(struct udphdr)+ offsetof(struct sctphdr, checksum)) { 8346 return; 8347 } 8348 /* Copy out the UDP header. */ 8349 memset(&udp, 0, sizeof(struct udphdr)); 8350 m_copydata(ip6cp->ip6c_m, 8351 ip6cp->ip6c_off, 8352 sizeof(struct udphdr), 8353 (caddr_t)&udp); 8354 /* Copy out the port numbers and the verification tag. */ 8355 memset(&sh, 0, sizeof(struct sctphdr)); 8356 m_copydata(ip6cp->ip6c_m, 8357 ip6cp->ip6c_off + sizeof(struct udphdr), 8358 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 8359 (caddr_t)&sh); 8360 memset(&src, 0, sizeof(struct sockaddr_in6)); 8361 src.sin6_family = AF_INET6; 8362 #ifdef HAVE_SIN6_LEN 8363 src.sin6_len = sizeof(struct sockaddr_in6); 8364 #endif 8365 src.sin6_port = sh.src_port; 8366 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 8367 #if defined(__FreeBSD__) && !defined(__Userspace__) 8368 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 8369 return; 8370 } 8371 #endif 8372 memset(&dst, 0, sizeof(struct sockaddr_in6)); 8373 dst.sin6_family = AF_INET6; 8374 #ifdef HAVE_SIN6_LEN 8375 dst.sin6_len = sizeof(struct sockaddr_in6); 8376 #endif 8377 dst.sin6_port = sh.dest_port; 8378 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 8379 #if defined(__FreeBSD__) && !defined(__Userspace__) 8380 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 8381 return; 8382 } 8383 #endif 8384 inp = NULL; 8385 net = NULL; 8386 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 8387 (struct sockaddr *)&src, 8388 &inp, &net, 1, SCTP_DEFAULT_VRFID); 8389 if ((stcb != NULL) && 8390 (net != NULL) && 8391 (inp != NULL)) { 8392 /* Check the UDP port numbers */ 8393 if ((udp.uh_dport != net->port) || 8394 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 8395 SCTP_TCB_UNLOCK(stcb); 8396 return; 8397 } 8398 /* Check the verification tag */ 8399 if (ntohl(sh.v_tag) != 0) { 8400 /* 8401 * This must be the verification tag used for 8402 * sending out packets. We don't consider 8403 * packets reflecting the verification tag. 8404 */ 8405 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 8406 SCTP_TCB_UNLOCK(stcb); 8407 return; 8408 } 8409 } else { 8410 #if defined(__FreeBSD__) && !defined(__Userspace__) 8411 if (ip6cp->ip6c_m->m_pkthdr.len >= 8412 ip6cp->ip6c_off + sizeof(struct udphdr) + 8413 sizeof(struct sctphdr) + 8414 sizeof(struct sctp_chunkhdr) + 8415 offsetof(struct sctp_init, a_rwnd)) { 8416 /* 8417 * In this case we can check if we 8418 * got an INIT chunk and if the 8419 * initiate tag matches. 8420 */ 8421 uint32_t initiate_tag; 8422 uint8_t chunk_type; 8423 8424 m_copydata(ip6cp->ip6c_m, 8425 ip6cp->ip6c_off + 8426 sizeof(struct udphdr) + 8427 sizeof(struct sctphdr), 8428 sizeof(uint8_t), 8429 (caddr_t)&chunk_type); 8430 m_copydata(ip6cp->ip6c_m, 8431 ip6cp->ip6c_off + 8432 sizeof(struct udphdr) + 8433 sizeof(struct sctphdr) + 8434 sizeof(struct sctp_chunkhdr), 8435 sizeof(uint32_t), 8436 (caddr_t)&initiate_tag); 8437 if ((chunk_type != SCTP_INITIATION) || 8438 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 8439 SCTP_TCB_UNLOCK(stcb); 8440 return; 8441 } 8442 } else { 8443 SCTP_TCB_UNLOCK(stcb); 8444 return; 8445 } 8446 #else 8447 SCTP_TCB_UNLOCK(stcb); 8448 return; 8449 #endif 8450 } 8451 type = ip6cp->ip6c_icmp6->icmp6_type; 8452 code = ip6cp->ip6c_icmp6->icmp6_code; 8453 if ((type == ICMP6_DST_UNREACH) && 8454 (code == ICMP6_DST_UNREACH_NOPORT)) { 8455 type = ICMP6_PARAM_PROB; 8456 code = ICMP6_PARAMPROB_NEXTHEADER; 8457 } 8458 sctp6_notify(inp, stcb, net, type, code, 8459 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 8460 #if defined(__Userspace__) 8461 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 8462 (stcb->sctp_socket != NULL)) { 8463 struct socket *upcall_socket; 8464 8465 upcall_socket = stcb->sctp_socket; 8466 SOCK_LOCK(upcall_socket); 8467 soref(upcall_socket); 8468 SOCK_UNLOCK(upcall_socket); 8469 if ((upcall_socket->so_upcall != NULL) && 8470 (upcall_socket->so_error != 0)) { 8471 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT); 8472 } 8473 ACCEPT_LOCK(); 8474 SOCK_LOCK(upcall_socket); 8475 sorele(upcall_socket); 8476 } 8477 #endif 8478 } else { 8479 if ((stcb == NULL) && (inp != NULL)) { 8480 /* reduce inp's ref-count */ 8481 SCTP_INP_WLOCK(inp); 8482 SCTP_INP_DECR_REF(inp); 8483 SCTP_INP_WUNLOCK(inp); 8484 } 8485 if (stcb) { 8486 SCTP_TCB_UNLOCK(stcb); 8487 } 8488 } 8489 } 8490 #endif 8491 8492 void 8493 sctp_over_udp_stop(void) 8494 { 8495 /* 8496 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writing! 8497 */ 8498 #ifdef INET 8499 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 8500 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 8501 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 8502 } 8503 #endif 8504 #ifdef INET6 8505 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 8506 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 8507 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 8508 } 8509 #endif 8510 } 8511 8512 int 8513 sctp_over_udp_start(void) 8514 { 8515 uint16_t port; 8516 int ret; 8517 #ifdef INET 8518 struct sockaddr_in sin; 8519 #endif 8520 #ifdef INET6 8521 struct sockaddr_in6 sin6; 8522 #endif 8523 /* 8524 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writing! 8525 */ 8526 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 8527 if (ntohs(port) == 0) { 8528 /* Must have a port set */ 8529 return (EINVAL); 8530 } 8531 #ifdef INET 8532 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 8533 /* Already running -- must stop first */ 8534 return (EALREADY); 8535 } 8536 #endif 8537 #ifdef INET6 8538 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 8539 /* Already running -- must stop first */ 8540 return (EALREADY); 8541 } 8542 #endif 8543 #ifdef INET 8544 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 8545 SOCK_DGRAM, IPPROTO_UDP, 8546 curthread->td_ucred, curthread))) { 8547 sctp_over_udp_stop(); 8548 return (ret); 8549 } 8550 /* Call the special UDP hook. */ 8551 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 8552 sctp_recv_udp_tunneled_packet, 8553 sctp_recv_icmp_tunneled_packet, 8554 NULL))) { 8555 sctp_over_udp_stop(); 8556 return (ret); 8557 } 8558 /* Ok, we have a socket, bind it to the port. */ 8559 memset(&sin, 0, sizeof(struct sockaddr_in)); 8560 sin.sin_len = sizeof(struct sockaddr_in); 8561 sin.sin_family = AF_INET; 8562 sin.sin_port = htons(port); 8563 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 8564 (struct sockaddr *)&sin, curthread))) { 8565 sctp_over_udp_stop(); 8566 return (ret); 8567 } 8568 #endif 8569 #ifdef INET6 8570 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 8571 SOCK_DGRAM, IPPROTO_UDP, 8572 curthread->td_ucred, curthread))) { 8573 sctp_over_udp_stop(); 8574 return (ret); 8575 } 8576 /* Call the special UDP hook. */ 8577 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 8578 sctp_recv_udp_tunneled_packet, 8579 sctp_recv_icmp6_tunneled_packet, 8580 NULL))) { 8581 sctp_over_udp_stop(); 8582 return (ret); 8583 } 8584 /* Ok, we have a socket, bind it to the port. */ 8585 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 8586 sin6.sin6_len = sizeof(struct sockaddr_in6); 8587 sin6.sin6_family = AF_INET6; 8588 sin6.sin6_port = htons(port); 8589 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 8590 (struct sockaddr *)&sin6, curthread))) { 8591 sctp_over_udp_stop(); 8592 return (ret); 8593 } 8594 #endif 8595 return (0); 8596 } 8597 #endif 8598 8599 /* 8600 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 8601 * If all arguments are zero, zero is returned. 8602 */ 8603 uint32_t 8604 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 8605 { 8606 if (mtu1 > 0) { 8607 if (mtu2 > 0) { 8608 if (mtu3 > 0) { 8609 return (min(mtu1, min(mtu2, mtu3))); 8610 } else { 8611 return (min(mtu1, mtu2)); 8612 } 8613 } else { 8614 if (mtu3 > 0) { 8615 return (min(mtu1, mtu3)); 8616 } else { 8617 return (mtu1); 8618 } 8619 } 8620 } else { 8621 if (mtu2 > 0) { 8622 if (mtu3 > 0) { 8623 return (min(mtu2, mtu3)); 8624 } else { 8625 return (mtu2); 8626 } 8627 } else { 8628 return (mtu3); 8629 } 8630 } 8631 } 8632 8633 #if defined(__FreeBSD__) && !defined(__Userspace__) 8634 void 8635 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 8636 { 8637 struct in_conninfo inc; 8638 8639 memset(&inc, 0, sizeof(struct in_conninfo)); 8640 inc.inc_fibnum = fibnum; 8641 switch (addr->sa.sa_family) { 8642 #ifdef INET 8643 case AF_INET: 8644 inc.inc_faddr = addr->sin.sin_addr; 8645 break; 8646 #endif 8647 #ifdef INET6 8648 case AF_INET6: 8649 inc.inc_flags |= INC_ISIPV6; 8650 inc.inc6_faddr = addr->sin6.sin6_addr; 8651 break; 8652 #endif 8653 default: 8654 return; 8655 } 8656 tcp_hc_updatemtu(&inc, (u_long)mtu); 8657 } 8658 8659 uint32_t 8660 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 8661 { 8662 struct in_conninfo inc; 8663 8664 memset(&inc, 0, sizeof(struct in_conninfo)); 8665 inc.inc_fibnum = fibnum; 8666 switch (addr->sa.sa_family) { 8667 #ifdef INET 8668 case AF_INET: 8669 inc.inc_faddr = addr->sin.sin_addr; 8670 break; 8671 #endif 8672 #ifdef INET6 8673 case AF_INET6: 8674 inc.inc_flags |= INC_ISIPV6; 8675 inc.inc6_faddr = addr->sin6.sin6_addr; 8676 break; 8677 #endif 8678 default: 8679 return (0); 8680 } 8681 return ((uint32_t)tcp_hc_getmtu(&inc)); 8682 } 8683 #endif 8684 8685 void 8686 sctp_set_state(struct sctp_tcb *stcb, int new_state) 8687 { 8688 #if defined(KDTRACE_HOOKS) 8689 int old_state = stcb->asoc.state; 8690 #endif 8691 8692 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 8693 ("sctp_set_state: Can't set substate (new_state = %x)", 8694 new_state)); 8695 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 8696 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 8697 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 8698 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 8699 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 8700 } 8701 #if defined(KDTRACE_HOOKS) 8702 if (((old_state & SCTP_STATE_MASK) != new_state) && 8703 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 8704 (new_state == SCTP_STATE_INUSE))) { 8705 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 8706 } 8707 #endif 8708 } 8709 8710 void 8711 sctp_add_substate(struct sctp_tcb *stcb, int substate) 8712 { 8713 #if defined(KDTRACE_HOOKS) 8714 int old_state = stcb->asoc.state; 8715 #endif 8716 8717 KASSERT((substate & SCTP_STATE_MASK) == 0, 8718 ("sctp_add_substate: Can't set state (substate = %x)", 8719 substate)); 8720 stcb->asoc.state |= substate; 8721 #if defined(KDTRACE_HOOKS) 8722 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 8723 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 8724 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 8725 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 8726 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 8727 } 8728 #endif 8729 }