sctp_input.c (212606B)
1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <netinet/sctp_os.h> 36 #include <netinet/sctp_var.h> 37 #include <netinet/sctp_sysctl.h> 38 #include <netinet/sctp_pcb.h> 39 #include <netinet/sctp_header.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_output.h> 42 #include <netinet/sctp_input.h> 43 #include <netinet/sctp_auth.h> 44 #include <netinet/sctp_indata.h> 45 #include <netinet/sctp_asconf.h> 46 #include <netinet/sctp_bsd_addr.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_crc32.h> 49 #if defined(__FreeBSD__) && !defined(__Userspace__) 50 #include <netinet/sctp_kdtrace.h> 51 #endif 52 #if defined(INET) || defined(INET6) 53 #if !defined(_WIN32) 54 #include <netinet/udp.h> 55 #endif 56 #endif 57 #if defined(__FreeBSD__) && !defined(__Userspace__) 58 #include <sys/smp.h> 59 #endif 60 61 #if defined(_WIN32) && !defined(_MSC_VER) 62 #include <minmax.h> 63 #endif 64 65 static void 66 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 67 { 68 struct sctp_nets *net; 69 70 /* This now not only stops all cookie timers 71 * it also stops any INIT timers as well. This 72 * will make sure that the timers are stopped in 73 * all collision cases. 74 */ 75 SCTP_TCB_LOCK_ASSERT(stcb); 76 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 77 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 78 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 79 stcb->sctp_ep, 80 stcb, 81 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 82 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 83 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 84 stcb->sctp_ep, 85 stcb, 86 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 87 } 88 } 89 } 90 91 /* INIT handler */ 92 static void 93 sctp_handle_init(struct mbuf *m, int iphlen, int offset, 94 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, 95 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, 96 struct sctp_tcb *stcb, struct sctp_nets *net, 97 #if defined(__FreeBSD__) && !defined(__Userspace__) 98 uint8_t mflowtype, uint32_t mflowid, 99 #endif 100 uint32_t vrf_id, uint16_t port) 101 { 102 struct sctp_init *init; 103 struct mbuf *op_err; 104 105 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 106 (void *)stcb); 107 if (stcb == NULL) { 108 SCTP_INP_RLOCK(inp); 109 } 110 /* Validate parameters */ 111 init = &cp->init; 112 if (ntohl(init->initiate_tag) == 0) { 113 goto outnow; 114 } 115 if ((ntohl(init->a_rwnd) < SCTP_MIN_RWND) || 116 (ntohs(init->num_inbound_streams) == 0) || 117 (ntohs(init->num_outbound_streams) == 0)) { 118 /* protocol error... send abort */ 119 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 120 sctp_send_abort(m, iphlen, src, dst, sh, init->initiate_tag, op_err, 121 #if defined(__FreeBSD__) && !defined(__Userspace__) 122 mflowtype, mflowid, inp->fibnum, 123 #endif 124 vrf_id, port); 125 goto outnow; 126 } 127 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 128 offset + ntohs(cp->ch.chunk_length))) { 129 /* auth parameter(s) error... send abort */ 130 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 131 "Problem with AUTH parameters"); 132 sctp_send_abort(m, iphlen, src, dst, sh, init->initiate_tag, op_err, 133 #if defined(__FreeBSD__) && !defined(__Userspace__) 134 mflowtype, mflowid, inp->fibnum, 135 #endif 136 vrf_id, port); 137 goto outnow; 138 } 139 /* We are only accepting if we have a listening socket.*/ 140 if ((stcb == NULL) && 141 ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 142 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 143 (!SCTP_IS_LISTENING(inp)))) { 144 /* 145 * FIX ME ?? What about TCP model and we have a 146 * match/restart case? Actually no fix is needed. 147 * the lookup will always find the existing assoc so stcb 148 * would not be NULL. It may be questionable to do this 149 * since we COULD just send back the INIT-ACK and hope that 150 * the app did accept()'s by the time the COOKIE was sent. But 151 * there is a price to pay for COOKIE generation and I don't 152 * want to pay it on the chance that the app will actually do 153 * some accepts(). The App just looses and should NOT be in 154 * this state :-) 155 */ 156 if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) { 157 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 158 "No listener"); 159 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, 160 #if defined(__FreeBSD__) && !defined(__Userspace__) 161 mflowtype, mflowid, inp->fibnum, 162 #endif 163 vrf_id, port); 164 } 165 goto outnow; 166 } 167 if ((stcb != NULL) && 168 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 169 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n"); 170 sctp_send_shutdown_ack(stcb, NULL); 171 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 172 } else { 173 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 174 sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset, 175 src, dst, sh, cp, 176 #if defined(__FreeBSD__) && !defined(__Userspace__) 177 mflowtype, mflowid, 178 #endif 179 vrf_id, port); 180 } 181 outnow: 182 if (stcb == NULL) { 183 SCTP_INP_RUNLOCK(inp); 184 } 185 } 186 187 /* 188 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 189 */ 190 191 int 192 sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked) 193 { 194 int unsent_data; 195 unsigned int i; 196 struct sctp_stream_queue_pending *sp; 197 struct sctp_association *asoc; 198 199 SCTP_TCB_LOCK_ASSERT(stcb); 200 201 /* This function returns if any stream has true unsent data on it. 202 * Note that as it looks through it will clean up any places that 203 * have old data that has been sent but left at top of stream queue. 204 */ 205 asoc = &stcb->asoc; 206 unsent_data = 0; 207 if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 208 /* Check to see if some data queued */ 209 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 210 /*sa_ignore FREED_MEMORY*/ 211 sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue); 212 if (sp == NULL) { 213 continue; 214 } 215 if ((sp->msg_is_complete) && 216 (sp->length == 0) && 217 (sp->sender_all_done)) { 218 /* We are doing differed cleanup. Last 219 * time through when we took all the data 220 * the sender_all_done was not set. 221 */ 222 if (sp->put_last_out == 0) { 223 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 224 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 225 sp->sender_all_done, 226 sp->length, 227 sp->msg_is_complete, 228 sp->put_last_out); 229 } 230 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 231 TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next); 232 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, &asoc->strmout[i], sp); 233 if (sp->net) { 234 sctp_free_remote_addr(sp->net); 235 sp->net = NULL; 236 } 237 if (sp->data) { 238 sctp_m_freem(sp->data); 239 sp->data = NULL; 240 } 241 sctp_free_a_strmoq(stcb, sp, so_locked); 242 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 243 unsent_data++; 244 } 245 } else { 246 unsent_data++; 247 } 248 if (unsent_data > 0) { 249 break; 250 } 251 } 252 } 253 return (unsent_data); 254 } 255 256 static int 257 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb) 258 { 259 struct sctp_init *init; 260 struct sctp_association *asoc; 261 struct sctp_nets *lnet; 262 unsigned int i; 263 264 SCTP_TCB_LOCK_ASSERT(stcb); 265 266 init = &cp->init; 267 asoc = &stcb->asoc; 268 /* save off parameters */ 269 asoc->peer_vtag = ntohl(init->initiate_tag); 270 asoc->peers_rwnd = ntohl(init->a_rwnd); 271 /* init tsn's */ 272 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 273 274 if (!TAILQ_EMPTY(&asoc->nets)) { 275 /* update any ssthresh's that may have a default */ 276 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 277 lnet->ssthresh = asoc->peers_rwnd; 278 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) { 279 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 280 } 281 } 282 } 283 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 284 unsigned int newcnt; 285 struct sctp_stream_out *outs; 286 struct sctp_stream_queue_pending *sp, *nsp; 287 struct sctp_tmit_chunk *chk, *nchk; 288 289 /* abandon the upper streams */ 290 newcnt = ntohs(init->num_inbound_streams); 291 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 292 if (chk->rec.data.sid >= newcnt) { 293 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 294 asoc->send_queue_cnt--; 295 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 296 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 297 #ifdef INVARIANTS 298 } else { 299 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 300 #endif 301 } 302 if (chk->data != NULL) { 303 sctp_free_bufspace(stcb, asoc, chk, 1); 304 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 305 0, chk, SCTP_SO_NOT_LOCKED); 306 if (chk->data) { 307 sctp_m_freem(chk->data); 308 chk->data = NULL; 309 } 310 } 311 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 312 /*sa_ignore FREED_MEMORY*/ 313 } 314 } 315 if (asoc->strmout) { 316 for (i = newcnt; i < asoc->pre_open_streams; i++) { 317 outs = &asoc->strmout[i]; 318 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 319 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 320 TAILQ_REMOVE(&outs->outqueue, sp, next); 321 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp); 322 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 323 stcb, 0, sp, SCTP_SO_NOT_LOCKED); 324 if (sp->data) { 325 sctp_m_freem(sp->data); 326 sp->data = NULL; 327 } 328 if (sp->net) { 329 sctp_free_remote_addr(sp->net); 330 sp->net = NULL; 331 } 332 /* Free the chunk */ 333 sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED); 334 /*sa_ignore FREED_MEMORY*/ 335 } 336 outs->state = SCTP_STREAM_CLOSED; 337 } 338 } 339 /* cut back the count */ 340 asoc->pre_open_streams = newcnt; 341 } 342 asoc->streamoutcnt = asoc->pre_open_streams; 343 if (asoc->strmout) { 344 for (i = 0; i < asoc->streamoutcnt; i++) { 345 asoc->strmout[i].state = SCTP_STREAM_OPEN; 346 } 347 } 348 /* EY - nr_sack: initialize highest tsn in nr_mapping_array */ 349 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map; 350 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 351 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 352 } 353 /* This is the next one we expect */ 354 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 355 356 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 357 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in; 358 359 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 360 /* open the requested streams */ 361 362 if (asoc->strmin != NULL) { 363 /* Free the old ones */ 364 for (i = 0; i < asoc->streamincnt; i++) { 365 sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue); 366 sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue); 367 } 368 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 369 } 370 if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) { 371 asoc->streamincnt = ntohs(init->num_outbound_streams); 372 } else { 373 asoc->streamincnt = asoc->max_inbound_streams; 374 } 375 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 376 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 377 if (asoc->strmin == NULL) { 378 /* we didn't get memory for the streams! */ 379 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 380 return (-1); 381 } 382 for (i = 0; i < asoc->streamincnt; i++) { 383 asoc->strmin[i].sid = i; 384 asoc->strmin[i].last_mid_delivered = 0xffffffff; 385 TAILQ_INIT(&asoc->strmin[i].inqueue); 386 TAILQ_INIT(&asoc->strmin[i].uno_inqueue); 387 asoc->strmin[i].pd_api_started = 0; 388 asoc->strmin[i].delivery_started = 0; 389 } 390 /* 391 * load_address_from_init will put the addresses into the 392 * association when the COOKIE is processed or the INIT-ACK is 393 * processed. Both types of COOKIE's existing and new call this 394 * routine. It will remove addresses that are no longer in the 395 * association (for the restarting case where addresses are 396 * removed). Up front when the INIT arrives we will discard it if it 397 * is a restart and new addresses have been added. 398 */ 399 /* sa_ignore MEMLEAK */ 400 return (0); 401 } 402 403 /* 404 * INIT-ACK message processing/consumption returns value < 0 on error 405 */ 406 static int 407 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 408 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, 409 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 410 struct sctp_nets *net, int *abort_no_unlock, 411 #if defined(__FreeBSD__) && !defined(__Userspace__) 412 uint8_t mflowtype, uint32_t mflowid, 413 #endif 414 uint32_t vrf_id) 415 { 416 struct sctp_association *asoc; 417 struct mbuf *op_err; 418 int retval, abort_flag, cookie_found; 419 int initack_limit; 420 int nat_friendly = 0; 421 422 /* First verify that we have no illegal param's */ 423 abort_flag = 0; 424 cookie_found = 0; 425 426 op_err = sctp_arethere_unrecognized_parameters(m, 427 (offset + sizeof(struct sctp_init_chunk)), 428 &abort_flag, (struct sctp_chunkhdr *)cp, 429 &nat_friendly, &cookie_found, NULL); 430 if (abort_flag) { 431 /* Send an abort and notify peer */ 432 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 433 src, dst, sh, op_err, 434 #if defined(__FreeBSD__) && !defined(__Userspace__) 435 mflowtype, mflowid, 436 #endif 437 vrf_id, net->port); 438 *abort_no_unlock = 1; 439 return (-1); 440 } 441 if (!cookie_found) { 442 uint16_t len; 443 444 /* Only report the missing cookie parameter */ 445 if (op_err != NULL) { 446 sctp_m_freem(op_err); 447 } 448 len = (uint16_t)(sizeof(struct sctp_error_missing_param) + sizeof(uint16_t)); 449 /* We abort with an error of missing mandatory param */ 450 op_err = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 451 if (op_err != NULL) { 452 struct sctp_error_missing_param *cause; 453 454 SCTP_BUF_LEN(op_err) = len; 455 cause = mtod(op_err, struct sctp_error_missing_param *); 456 /* Subtract the reserved param */ 457 cause->cause.code = htons(SCTP_CAUSE_MISSING_PARAM); 458 cause->cause.length = htons(len); 459 cause->num_missing_params = htonl(1); 460 cause->type[0] = htons(SCTP_STATE_COOKIE); 461 } 462 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 463 src, dst, sh, op_err, 464 #if defined(__FreeBSD__) && !defined(__Userspace__) 465 mflowtype, mflowid, 466 #endif 467 vrf_id, net->port); 468 *abort_no_unlock = 1; 469 return (-3); 470 } 471 asoc = &stcb->asoc; 472 asoc->peer_supports_nat = (uint8_t)nat_friendly; 473 /* process the peer's parameters in the INIT-ACK */ 474 if (sctp_process_init((struct sctp_init_chunk *)cp, stcb) < 0) { 475 if (op_err != NULL) { 476 sctp_m_freem(op_err); 477 } 478 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 479 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n"); 480 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 481 src, dst, sh, op_err, 482 #if defined(__FreeBSD__) && !defined(__Userspace__) 483 mflowtype, mflowid, 484 #endif 485 vrf_id, net->port); 486 *abort_no_unlock = 1; 487 return (-1); 488 } 489 initack_limit = offset + ntohs(cp->ch.chunk_length); 490 /* load all addresses */ 491 if ((retval = sctp_load_addresses_from_init(stcb, m, 492 offset + sizeof(struct sctp_init_chunk), 493 initack_limit, src, dst, NULL, stcb->asoc.port)) < 0) { 494 if (op_err != NULL) { 495 sctp_m_freem(op_err); 496 } 497 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 498 "Problem with address parameters"); 499 SCTPDBG(SCTP_DEBUG_INPUT1, 500 "Load addresses from INIT causes an abort %d\n", 501 retval); 502 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 503 src, dst, sh, op_err, 504 #if defined(__FreeBSD__) && !defined(__Userspace__) 505 mflowtype, mflowid, 506 #endif 507 vrf_id, net->port); 508 *abort_no_unlock = 1; 509 return (-1); 510 } 511 /* if the peer doesn't support asconf, flush the asconf queue */ 512 if (asoc->asconf_supported == 0) { 513 struct sctp_asconf_addr *param, *nparam; 514 515 TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) { 516 TAILQ_REMOVE(&asoc->asconf_queue, param, next); 517 SCTP_FREE(param, SCTP_M_ASC_ADDR); 518 } 519 } 520 521 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 522 stcb->asoc.local_hmacs); 523 if (op_err) { 524 sctp_queue_op_err(stcb, op_err); 525 /* queuing will steal away the mbuf chain to the out queue */ 526 op_err = NULL; 527 } 528 /* extract the cookie and queue it to "echo" it back... */ 529 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 530 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 531 stcb->asoc.overall_error_count, 532 0, 533 SCTP_FROM_SCTP_INPUT, 534 __LINE__); 535 } 536 537 /* 538 * Cancel the INIT timer, We do this first before queueing the 539 * cookie. We always cancel at the primary to assume that we are 540 * canceling the timer started by the INIT which always goes to the 541 * primary. 542 */ 543 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 544 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 545 546 /* calculate the RTO */ 547 if (asoc->overall_error_count == 0) { 548 sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, 549 SCTP_RTT_FROM_NON_DATA); 550 } 551 stcb->asoc.overall_error_count = 0; 552 net->error_count = 0; 553 #if defined(__Userspace__) 554 if (stcb->sctp_ep->recv_callback) { 555 if (stcb->sctp_socket) { 556 uint32_t inqueue_bytes, sb_free_now; 557 struct sctp_inpcb *inp; 558 559 inp = stcb->sctp_ep; 560 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 561 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv); 562 563 /* check if the amount free in the send socket buffer crossed the threshold */ 564 if (inp->send_callback && 565 (((inp->send_sb_threshold > 0) && 566 (sb_free_now >= inp->send_sb_threshold) && 567 (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) || 568 (inp->send_sb_threshold == 0))) { 569 atomic_add_int(&stcb->asoc.refcnt, 1); 570 SCTP_TCB_UNLOCK(stcb); 571 inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info); 572 SCTP_TCB_LOCK(stcb); 573 atomic_subtract_int(&stcb->asoc.refcnt, 1); 574 } 575 } 576 } 577 #endif 578 retval = sctp_send_cookie_echo(m, offset, initack_limit, stcb, net); 579 return (retval); 580 } 581 582 static void 583 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 584 struct sctp_tcb *stcb, struct sctp_nets *net) 585 { 586 union sctp_sockstore store; 587 struct sctp_nets *r_net, *f_net; 588 struct timeval tv; 589 int req_prim = 0; 590 uint16_t old_error_counter; 591 592 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 593 /* Invalid length */ 594 return; 595 } 596 597 memset(&store, 0, sizeof(store)); 598 switch (cp->heartbeat.hb_info.addr_family) { 599 #ifdef INET 600 case AF_INET: 601 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 602 store.sin.sin_family = cp->heartbeat.hb_info.addr_family; 603 #ifdef HAVE_SIN_LEN 604 store.sin.sin_len = cp->heartbeat.hb_info.addr_len; 605 #endif 606 store.sin.sin_port = stcb->rport; 607 memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address, 608 sizeof(store.sin.sin_addr)); 609 } else { 610 return; 611 } 612 break; 613 #endif 614 #ifdef INET6 615 case AF_INET6: 616 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 617 store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family; 618 #ifdef HAVE_SIN6_LEN 619 store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len; 620 #endif 621 store.sin6.sin6_port = stcb->rport; 622 memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr)); 623 } else { 624 return; 625 } 626 break; 627 #endif 628 #if defined(__Userspace__) 629 case AF_CONN: 630 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_conn)) { 631 store.sconn.sconn_family = cp->heartbeat.hb_info.addr_family; 632 #ifdef HAVE_SCONN_LEN 633 store.sconn.sconn_len = cp->heartbeat.hb_info.addr_len; 634 #endif 635 store.sconn.sconn_port = stcb->rport; 636 memcpy(&store.sconn.sconn_addr, cp->heartbeat.hb_info.address, sizeof(void *)); 637 } else { 638 return; 639 } 640 break; 641 #endif 642 default: 643 return; 644 } 645 r_net = sctp_findnet(stcb, &store.sa); 646 if (r_net == NULL) { 647 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 648 return; 649 } 650 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 651 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 652 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 653 /* 654 * If the its a HB and it's random value is correct when can 655 * confirm the destination. 656 */ 657 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 658 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 659 stcb->asoc.primary_destination = r_net; 660 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 661 f_net = TAILQ_FIRST(&stcb->asoc.nets); 662 if (f_net != r_net) { 663 /* first one on the list is NOT the primary 664 * sctp_cmpaddr() is much more efficient if 665 * the primary is the first on the list, make it 666 * so. 667 */ 668 TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next); 669 TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next); 670 } 671 req_prim = 1; 672 } 673 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 674 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 675 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, 676 r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 677 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net); 678 } 679 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 680 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 681 stcb->asoc.overall_error_count, 682 0, 683 SCTP_FROM_SCTP_INPUT, 684 __LINE__); 685 } 686 stcb->asoc.overall_error_count = 0; 687 old_error_counter = r_net->error_count; 688 r_net->error_count = 0; 689 r_net->hb_responded = 1; 690 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 691 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 692 /* Now lets do a RTO with this */ 693 sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, 694 SCTP_RTT_FROM_NON_DATA); 695 if ((r_net->dest_state & SCTP_ADDR_REACHABLE) == 0) { 696 r_net->dest_state |= SCTP_ADDR_REACHABLE; 697 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 698 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 699 } 700 if (r_net->dest_state & SCTP_ADDR_PF) { 701 r_net->dest_state &= ~SCTP_ADDR_PF; 702 stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 703 } 704 if (old_error_counter > 0) { 705 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 706 stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 707 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net); 708 } 709 if (r_net == stcb->asoc.primary_destination) { 710 if (stcb->asoc.alternate) { 711 /* release the alternate, primary is good */ 712 sctp_free_remote_addr(stcb->asoc.alternate); 713 stcb->asoc.alternate = NULL; 714 } 715 } 716 /* Mobility adaptation */ 717 if (req_prim) { 718 if ((sctp_is_mobility_feature_on(stcb->sctp_ep, 719 SCTP_MOBILITY_BASE) || 720 sctp_is_mobility_feature_on(stcb->sctp_ep, 721 SCTP_MOBILITY_FASTHANDOFF)) && 722 sctp_is_mobility_feature_on(stcb->sctp_ep, 723 SCTP_MOBILITY_PRIM_DELETED)) { 724 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, 725 stcb->sctp_ep, stcb, NULL, 726 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 727 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 728 SCTP_MOBILITY_FASTHANDOFF)) { 729 sctp_assoc_immediate_retrans(stcb, 730 stcb->asoc.primary_destination); 731 } 732 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 733 SCTP_MOBILITY_BASE)) { 734 sctp_move_chunks_from_net(stcb, 735 stcb->asoc.deleted_primary); 736 } 737 sctp_delete_prim_timer(stcb->sctp_ep, stcb); 738 } 739 } 740 } 741 742 static int 743 sctp_handle_nat_colliding_state(struct sctp_tcb *stcb) 744 { 745 /* 746 * Return 0 means we want you to proceed with the abort 747 * non-zero means no abort processing. 748 */ 749 uint32_t new_vtag; 750 struct sctpasochead *head; 751 752 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 753 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 754 atomic_add_int(&stcb->asoc.refcnt, 1); 755 SCTP_TCB_UNLOCK(stcb); 756 SCTP_INP_INFO_WLOCK(); 757 SCTP_TCB_LOCK(stcb); 758 atomic_subtract_int(&stcb->asoc.refcnt, 1); 759 } else { 760 return (0); 761 } 762 new_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 763 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 764 /* generate a new vtag and send init */ 765 LIST_REMOVE(stcb, sctp_asocs); 766 stcb->asoc.my_vtag = new_vtag; 767 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 768 /* put it in the bucket in the vtag hash of assoc's for the system */ 769 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 770 SCTP_INP_INFO_WUNLOCK(); 771 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 772 return (1); 773 } else { 774 /* treat like a case where the cookie expired i.e.: 775 * - dump current cookie. 776 * - generate a new vtag. 777 * - resend init. 778 */ 779 /* generate a new vtag and send init */ 780 LIST_REMOVE(stcb, sctp_asocs); 781 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); 782 sctp_stop_all_cookie_timers(stcb); 783 sctp_toss_old_cookies(stcb, &stcb->asoc); 784 stcb->asoc.my_vtag = new_vtag; 785 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 786 /* put it in the bucket in the vtag hash of assoc's for the system */ 787 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 788 SCTP_INP_INFO_WUNLOCK(); 789 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 790 return (1); 791 } 792 return (0); 793 } 794 795 static int 796 sctp_handle_nat_missing_state(struct sctp_tcb *stcb, 797 struct sctp_nets *net) 798 { 799 /* return 0 means we want you to proceed with the abort 800 * non-zero means no abort processing 801 */ 802 if (stcb->asoc.auth_supported == 0) { 803 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n"); 804 return (0); 805 } 806 sctp_asconf_send_nat_state_update(stcb, net); 807 return (1); 808 } 809 810 /* Returns 1 if the stcb was aborted, 0 otherwise */ 811 static int 812 sctp_handle_abort(struct sctp_abort_chunk *abort, 813 struct sctp_tcb *stcb, struct sctp_nets *net) 814 { 815 #if defined(__APPLE__) && !defined(__Userspace__) 816 struct socket *so; 817 #endif 818 uint16_t len; 819 uint16_t error; 820 821 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 822 if (stcb == NULL) 823 return (0); 824 825 len = ntohs(abort->ch.chunk_length); 826 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_error_cause)) { 827 /* Need to check the cause codes for our 828 * two magic nat aborts which don't kill the assoc 829 * necessarily. 830 */ 831 struct sctp_error_cause *cause; 832 833 cause = (struct sctp_error_cause *)(abort + 1); 834 error = ntohs(cause->code); 835 if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) { 836 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state, ABORT flags:%x\n", 837 abort->ch.chunk_flags); 838 if (sctp_handle_nat_colliding_state(stcb)) { 839 return (0); 840 } 841 } else if (error == SCTP_CAUSE_NAT_MISSING_STATE) { 842 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state, ABORT flags:%x\n", 843 abort->ch.chunk_flags); 844 if (sctp_handle_nat_missing_state(stcb, net)) { 845 return (0); 846 } 847 } 848 } else { 849 error = 0; 850 } 851 /* stop any receive timers */ 852 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 853 SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); 854 /* notify user of the abort and clean up... */ 855 sctp_abort_notification(stcb, true, false, error, abort, SCTP_SO_NOT_LOCKED); 856 /* free the tcb */ 857 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 858 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 859 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 860 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 861 } 862 #ifdef SCTP_ASOCLOG_OF_TSNS 863 sctp_print_out_track_log(stcb); 864 #endif 865 #if defined(__APPLE__) && !defined(__Userspace__) 866 so = SCTP_INP_SO(stcb->sctp_ep); 867 atomic_add_int(&stcb->asoc.refcnt, 1); 868 SCTP_TCB_UNLOCK(stcb); 869 SCTP_SOCKET_LOCK(so, 1); 870 SCTP_TCB_LOCK(stcb); 871 atomic_subtract_int(&stcb->asoc.refcnt, 1); 872 #endif 873 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 874 SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 875 #if defined(__APPLE__) && !defined(__Userspace__) 876 SCTP_SOCKET_UNLOCK(so, 1); 877 #endif 878 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 879 return (1); 880 } 881 882 static void 883 sctp_start_net_timers(struct sctp_tcb *stcb) 884 { 885 uint32_t cnt_hb_sent; 886 struct sctp_nets *net; 887 888 cnt_hb_sent = 0; 889 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 890 /* For each network start: 891 * 1) A pmtu timer. 892 * 2) A HB timer 893 * 3) If the dest in unconfirmed send 894 * a hb as well if under max_hb_burst have 895 * been sent. 896 */ 897 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net); 898 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 899 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 900 (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) { 901 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 902 cnt_hb_sent++; 903 } 904 } 905 if (cnt_hb_sent) { 906 sctp_chunk_output(stcb->sctp_ep, stcb, 907 SCTP_OUTPUT_FROM_COOKIE_ACK, 908 SCTP_SO_NOT_LOCKED); 909 } 910 } 911 912 static void 913 sctp_check_data_from_peer(struct sctp_tcb *stcb, int *abort_flag) 914 { 915 char msg[SCTP_DIAG_INFO_LEN]; 916 struct sctp_association *asoc; 917 struct mbuf *op_err; 918 unsigned int i; 919 920 *abort_flag = 0; 921 asoc = &stcb->asoc; 922 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn) || 923 SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) { 924 SCTP_SNPRINTF(msg, sizeof(msg), "Missing TSN"); 925 *abort_flag = 1; 926 } 927 if (!*abort_flag) { 928 for (i = 0; i < asoc->streamincnt; i++) { 929 if (!TAILQ_EMPTY(&asoc->strmin[i].inqueue) || 930 !TAILQ_EMPTY(&asoc->strmin[i].uno_inqueue)) { 931 SCTP_SNPRINTF(msg, sizeof(msg), "Missing user data"); 932 *abort_flag = 1; 933 break; 934 } 935 } 936 } 937 if (*abort_flag) { 938 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 939 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INPUT + SCTP_LOC_9; 940 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 941 } 942 } 943 944 static void 945 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 946 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 947 { 948 int some_on_streamwheel; 949 int old_state; 950 #if defined(__APPLE__) && !defined(__Userspace__) 951 struct socket *so; 952 #endif 953 954 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_shutdown: handling SHUTDOWN\n"); 955 if (stcb == NULL) 956 return; 957 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 958 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 959 return; 960 } 961 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 962 /* Shutdown NOT the expected size */ 963 return; 964 } 965 old_state = SCTP_GET_STATE(stcb); 966 sctp_update_acked(stcb, cp, abort_flag); 967 if (*abort_flag) { 968 return; 969 } 970 sctp_check_data_from_peer(stcb, abort_flag); 971 if (*abort_flag) { 972 return; 973 } 974 if (stcb->sctp_socket) { 975 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) && 976 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 977 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) { 978 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_RECEIVED); 979 /* notify upper layer that peer has initiated a shutdown */ 980 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 981 982 /* reset time */ 983 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 984 } 985 } 986 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { 987 /* 988 * stop the shutdown timer, since we WILL move to 989 * SHUTDOWN-ACK-SENT. 990 */ 991 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 992 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 993 } 994 /* Now is there unsent data on a stream somewhere? */ 995 some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED); 996 997 if (!TAILQ_EMPTY(&stcb->asoc.send_queue) || 998 !TAILQ_EMPTY(&stcb->asoc.sent_queue) || 999 some_on_streamwheel) { 1000 /* By returning we will push more data out */ 1001 return; 1002 } else { 1003 /* no outstanding data to send, so move on... */ 1004 /* send SHUTDOWN-ACK */ 1005 /* move to SHUTDOWN-ACK-SENT state */ 1006 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 1007 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1008 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1009 } 1010 if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 1011 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); 1012 sctp_stop_timers_for_shutdown(stcb); 1013 sctp_send_shutdown_ack(stcb, net); 1014 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 1015 stcb->sctp_ep, stcb, net); 1016 } else if (old_state == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1017 sctp_send_shutdown_ack(stcb, net); 1018 } 1019 } 1020 } 1021 1022 static void 1023 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED, 1024 struct sctp_tcb *stcb, 1025 struct sctp_nets *net) 1026 { 1027 int abort_flag; 1028 #if defined(__APPLE__) && !defined(__Userspace__) 1029 struct socket *so; 1030 1031 so = SCTP_INP_SO(stcb->sctp_ep); 1032 #endif 1033 SCTPDBG(SCTP_DEBUG_INPUT2, 1034 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 1035 if (stcb == NULL) { 1036 return; 1037 } 1038 1039 /* process according to association state */ 1040 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 1041 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 1042 /* unexpected SHUTDOWN-ACK... do OOTB handling... */ 1043 sctp_send_shutdown_complete(stcb, net, 1); 1044 SCTP_TCB_UNLOCK(stcb); 1045 return; 1046 } 1047 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) && 1048 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 1049 /* unexpected SHUTDOWN-ACK... so ignore... */ 1050 SCTP_TCB_UNLOCK(stcb); 1051 return; 1052 } 1053 sctp_check_data_from_peer(stcb, &abort_flag); 1054 if (abort_flag) { 1055 return; 1056 } 1057 #ifdef INVARIANTS 1058 if (!TAILQ_EMPTY(&stcb->asoc.send_queue) || 1059 !TAILQ_EMPTY(&stcb->asoc.sent_queue) || 1060 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) { 1061 panic("Queues are not empty when handling SHUTDOWN-ACK"); 1062 } 1063 #endif 1064 /* stop the timer */ 1065 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, 1066 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 1067 /* send SHUTDOWN-COMPLETE */ 1068 sctp_send_shutdown_complete(stcb, net, 0); 1069 /* notify upper layer protocol */ 1070 if (stcb->sctp_socket) { 1071 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1072 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 1073 SCTP_SB_CLEAR(stcb->sctp_socket->so_snd); 1074 } 1075 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 1076 } 1077 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 1078 /* free the TCB but first save off the ep */ 1079 #if defined(__APPLE__) && !defined(__Userspace__) 1080 atomic_add_int(&stcb->asoc.refcnt, 1); 1081 SCTP_TCB_UNLOCK(stcb); 1082 SCTP_SOCKET_LOCK(so, 1); 1083 SCTP_TCB_LOCK(stcb); 1084 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1085 #endif 1086 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1087 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1088 #if defined(__APPLE__) && !defined(__Userspace__) 1089 SCTP_SOCKET_UNLOCK(so, 1); 1090 #endif 1091 } 1092 1093 static void 1094 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, uint8_t chunk_type) 1095 { 1096 switch (chunk_type) { 1097 case SCTP_ASCONF_ACK: 1098 case SCTP_ASCONF: 1099 sctp_asconf_cleanup(stcb); 1100 break; 1101 case SCTP_IFORWARD_CUM_TSN: 1102 case SCTP_FORWARD_CUM_TSN: 1103 stcb->asoc.prsctp_supported = 0; 1104 break; 1105 default: 1106 SCTPDBG(SCTP_DEBUG_INPUT2, 1107 "Peer does not support chunk type %d (0x%x).\n", 1108 chunk_type, chunk_type); 1109 break; 1110 } 1111 } 1112 1113 /* 1114 * Skip past the param header and then we will find the param that caused the 1115 * problem. There are a number of param's in a ASCONF OR the prsctp param 1116 * these will turn of specific features. 1117 * XXX: Is this the right thing to do? 1118 */ 1119 static void 1120 sctp_process_unrecog_param(struct sctp_tcb *stcb, uint16_t parameter_type) 1121 { 1122 switch (parameter_type) { 1123 /* pr-sctp draft */ 1124 case SCTP_PRSCTP_SUPPORTED: 1125 stcb->asoc.prsctp_supported = 0; 1126 break; 1127 case SCTP_SUPPORTED_CHUNK_EXT: 1128 break; 1129 /* draft-ietf-tsvwg-addip-sctp */ 1130 case SCTP_HAS_NAT_SUPPORT: 1131 stcb->asoc.peer_supports_nat = 0; 1132 break; 1133 case SCTP_ADD_IP_ADDRESS: 1134 case SCTP_DEL_IP_ADDRESS: 1135 case SCTP_SET_PRIM_ADDR: 1136 stcb->asoc.asconf_supported = 0; 1137 break; 1138 case SCTP_SUCCESS_REPORT: 1139 case SCTP_ERROR_CAUSE_IND: 1140 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 1141 SCTPDBG(SCTP_DEBUG_INPUT2, 1142 "Turning off ASCONF to this strange peer\n"); 1143 stcb->asoc.asconf_supported = 0; 1144 break; 1145 default: 1146 SCTPDBG(SCTP_DEBUG_INPUT2, 1147 "Peer does not support param type %d (0x%x)??\n", 1148 parameter_type, parameter_type); 1149 break; 1150 } 1151 } 1152 1153 static int 1154 sctp_handle_error(struct sctp_chunkhdr *ch, 1155 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 1156 { 1157 struct sctp_error_cause *cause; 1158 struct sctp_association *asoc; 1159 uint32_t remaining_length, adjust; 1160 uint16_t code, cause_code, cause_length; 1161 #if defined(__APPLE__) && !defined(__Userspace__) 1162 struct socket *so; 1163 #endif 1164 1165 /* parse through all of the errors and process */ 1166 asoc = &stcb->asoc; 1167 cause = (struct sctp_error_cause *)((caddr_t)ch + 1168 sizeof(struct sctp_chunkhdr)); 1169 remaining_length = ntohs(ch->chunk_length); 1170 if (remaining_length > limit) { 1171 remaining_length = limit; 1172 } 1173 if (remaining_length >= sizeof(struct sctp_chunkhdr)) { 1174 remaining_length -= sizeof(struct sctp_chunkhdr); 1175 } else { 1176 remaining_length = 0; 1177 } 1178 code = 0; 1179 while (remaining_length >= sizeof(struct sctp_error_cause)) { 1180 /* Process an Error Cause */ 1181 cause_code = ntohs(cause->code); 1182 cause_length = ntohs(cause->length); 1183 if ((cause_length > remaining_length) || (cause_length == 0)) { 1184 /* Invalid cause length, possibly due to truncation. */ 1185 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in cause - bytes left: %u cause length: %u\n", 1186 remaining_length, cause_length); 1187 return (0); 1188 } 1189 if (code == 0) { 1190 /* report the first error cause */ 1191 code = cause_code; 1192 } 1193 switch (cause_code) { 1194 case SCTP_CAUSE_INVALID_STREAM: 1195 case SCTP_CAUSE_MISSING_PARAM: 1196 case SCTP_CAUSE_INVALID_PARAM: 1197 case SCTP_CAUSE_NO_USER_DATA: 1198 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %u back? We have a bug :/ (or do they?)\n", 1199 cause_code); 1200 break; 1201 case SCTP_CAUSE_NAT_COLLIDING_STATE: 1202 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state, ERROR flags: %x\n", 1203 ch->chunk_flags); 1204 if (sctp_handle_nat_colliding_state(stcb)) { 1205 return (0); 1206 } 1207 break; 1208 case SCTP_CAUSE_NAT_MISSING_STATE: 1209 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state, ERROR flags: %x\n", 1210 ch->chunk_flags); 1211 if (sctp_handle_nat_missing_state(stcb, net)) { 1212 return (0); 1213 } 1214 break; 1215 case SCTP_CAUSE_STALE_COOKIE: 1216 /* 1217 * We only act if we have echoed a cookie and are 1218 * waiting. 1219 */ 1220 if ((cause_length >= sizeof(struct sctp_error_stale_cookie)) && 1221 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 1222 struct timeval now; 1223 struct sctp_error_stale_cookie *stale_cookie; 1224 uint64_t stale_time; 1225 1226 asoc->stale_cookie_count++; 1227 if (asoc->stale_cookie_count > asoc->max_init_times) { 1228 sctp_abort_notification(stcb, false, true, 0, NULL, SCTP_SO_NOT_LOCKED); 1229 #if defined(__APPLE__) && !defined(__Userspace__) 1230 so = SCTP_INP_SO(stcb->sctp_ep); 1231 atomic_add_int(&stcb->asoc.refcnt, 1); 1232 SCTP_TCB_UNLOCK(stcb); 1233 SCTP_SOCKET_LOCK(so, 1); 1234 SCTP_TCB_LOCK(stcb); 1235 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1236 #endif 1237 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1238 SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1239 #if defined(__APPLE__) && !defined(__Userspace__) 1240 SCTP_SOCKET_UNLOCK(so, 1); 1241 #endif 1242 return (-1); 1243 } 1244 stale_cookie = (struct sctp_error_stale_cookie *)cause; 1245 stale_time = ntohl(stale_cookie->stale_time); 1246 if (stale_time == 0) { 1247 /* Use an RTT as an approximation. */ 1248 (void)SCTP_GETTIME_TIMEVAL(&now); 1249 timevalsub(&now, &asoc->time_entered); 1250 stale_time = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 1251 if (stale_time == 0) { 1252 stale_time = 1; 1253 } 1254 } 1255 /* 1256 * stale_time is in usec, convert it to msec. 1257 * Round upwards, to ensure that it is non-zero. 1258 */ 1259 stale_time = (stale_time + 999) / 1000; 1260 /* Double it, to be more robust on RTX. */ 1261 stale_time = 2 * stale_time; 1262 asoc->cookie_preserve_req = (uint32_t)stale_time; 1263 if (asoc->overall_error_count == 0) { 1264 sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, 1265 SCTP_RTT_FROM_NON_DATA); 1266 } 1267 asoc->overall_error_count = 0; 1268 /* Blast back to INIT state */ 1269 sctp_toss_old_cookies(stcb, &stcb->asoc); 1270 sctp_stop_all_cookie_timers(stcb); 1271 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); 1272 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 1273 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1274 } 1275 break; 1276 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 1277 /* 1278 * Nothing we can do here, we don't do hostname 1279 * addresses so if the peer does not like my IPv6 1280 * (or IPv4 for that matter) it does not matter. If 1281 * they don't support that type of address, they can 1282 * NOT possibly get that packet type... i.e. with no 1283 * IPv6 you can't receive a IPv6 packet. so we can 1284 * safely ignore this one. If we ever added support 1285 * for HOSTNAME Addresses, then we would need to do 1286 * something here. 1287 */ 1288 break; 1289 case SCTP_CAUSE_UNRECOG_CHUNK: 1290 if (cause_length >= sizeof(struct sctp_error_unrecognized_chunk)) { 1291 struct sctp_error_unrecognized_chunk *unrec_chunk; 1292 1293 unrec_chunk = (struct sctp_error_unrecognized_chunk *)cause; 1294 sctp_process_unrecog_chunk(stcb, unrec_chunk->ch.chunk_type); 1295 } 1296 break; 1297 case SCTP_CAUSE_UNRECOG_PARAM: 1298 /* XXX: We only consider the first parameter */ 1299 if (cause_length >= sizeof(struct sctp_error_cause) + sizeof(struct sctp_paramhdr)) { 1300 struct sctp_paramhdr *unrec_parameter; 1301 1302 unrec_parameter = (struct sctp_paramhdr *)(cause + 1); 1303 sctp_process_unrecog_param(stcb, ntohs(unrec_parameter->param_type)); 1304 } 1305 break; 1306 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 1307 /* 1308 * We ignore this since the timer will drive out a 1309 * new cookie anyway and there timer will drive us 1310 * to send a SHUTDOWN_COMPLETE. We can't send one 1311 * here since we don't have their tag. 1312 */ 1313 break; 1314 case SCTP_CAUSE_DELETING_LAST_ADDR: 1315 case SCTP_CAUSE_RESOURCE_SHORTAGE: 1316 case SCTP_CAUSE_DELETING_SRC_ADDR: 1317 /* 1318 * We should NOT get these here, but in a 1319 * ASCONF-ACK. 1320 */ 1321 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a error cause with code %u.\n", 1322 cause_code); 1323 break; 1324 case SCTP_CAUSE_OUT_OF_RESC: 1325 /* 1326 * And what, pray tell do we do with the fact that 1327 * the peer is out of resources? Not really sure we 1328 * could do anything but abort. I suspect this 1329 * should have came WITH an abort instead of in a 1330 * OP-ERROR. 1331 */ 1332 break; 1333 default: 1334 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown code 0x%x\n", 1335 cause_code); 1336 break; 1337 } 1338 adjust = SCTP_SIZE32(cause_length); 1339 if (remaining_length >= adjust) { 1340 remaining_length -= adjust; 1341 } else { 1342 remaining_length = 0; 1343 } 1344 cause = (struct sctp_error_cause *)((caddr_t)cause + adjust); 1345 } 1346 sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, code, ch, SCTP_SO_NOT_LOCKED); 1347 return (0); 1348 } 1349 1350 static int 1351 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 1352 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, 1353 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 1354 struct sctp_nets *net, int *abort_no_unlock, 1355 #if defined(__FreeBSD__) && !defined(__Userspace__) 1356 uint8_t mflowtype, uint32_t mflowid, 1357 #endif 1358 uint32_t vrf_id) 1359 { 1360 struct sctp_init_ack *init_ack; 1361 struct mbuf *op_err; 1362 1363 SCTPDBG(SCTP_DEBUG_INPUT2, 1364 "sctp_handle_init_ack: handling INIT-ACK\n"); 1365 1366 if (stcb == NULL) { 1367 SCTPDBG(SCTP_DEBUG_INPUT2, 1368 "sctp_handle_init_ack: TCB is null\n"); 1369 return (-1); 1370 } 1371 /* Only process the INIT-ACK chunk in COOKIE WAIT state.*/ 1372 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 1373 init_ack = &cp->init; 1374 /* Validate parameters. */ 1375 if ((ntohl(init_ack->initiate_tag) == 0) || 1376 (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) || 1377 (ntohs(init_ack->num_inbound_streams) == 0) || 1378 (ntohs(init_ack->num_outbound_streams) == 0)) { 1379 /* One of the mandatory parameters is illegal. */ 1380 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1381 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1382 src, dst, sh, op_err, 1383 #if defined(__FreeBSD__) && !defined(__Userspace__) 1384 mflowtype, mflowid, 1385 #endif 1386 vrf_id, net->port); 1387 *abort_no_unlock = 1; 1388 return (-1); 1389 } 1390 if (stcb->asoc.primary_destination->dest_state & 1391 SCTP_ADDR_UNCONFIRMED) { 1392 /* 1393 * The primary is where we sent the INIT, we can 1394 * always consider it confirmed when the INIT-ACK is 1395 * returned. Do this before we load addresses 1396 * though. 1397 */ 1398 stcb->asoc.primary_destination->dest_state &= 1399 ~SCTP_ADDR_UNCONFIRMED; 1400 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 1401 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED); 1402 } 1403 if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb, 1404 net, abort_no_unlock, 1405 #if defined(__FreeBSD__) && !defined(__Userspace__) 1406 mflowtype, mflowid, 1407 #endif 1408 vrf_id) < 0) { 1409 /* error in parsing parameters */ 1410 return (-1); 1411 } 1412 /* Update our state. */ 1413 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 1414 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_ECHOED); 1415 1416 /* Reset the RTO calculation. */ 1417 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 1418 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1419 stcb->asoc.overall_error_count, 1420 0, 1421 SCTP_FROM_SCTP_INPUT, 1422 __LINE__); 1423 } 1424 stcb->asoc.overall_error_count = 0; 1425 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1426 /* 1427 * Collapse the init timer back in case of a exponential 1428 * backoff. 1429 */ 1430 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1431 stcb, net); 1432 /* 1433 * The output routine at the end of the inbound data processing 1434 * will cause the cookie to be sent. 1435 */ 1436 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1437 return (0); 1438 } else { 1439 return (-1); 1440 } 1441 } 1442 1443 static struct sctp_tcb * 1444 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1445 struct sockaddr *src, struct sockaddr *dst, 1446 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1447 struct sctp_inpcb *inp, struct sctp_nets **netp, 1448 struct sockaddr *init_src, int *notification, 1449 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1450 #if defined(__FreeBSD__) && !defined(__Userspace__) 1451 uint8_t mflowtype, uint32_t mflowid, 1452 #endif 1453 uint32_t vrf_id, uint16_t port); 1454 1455 /* 1456 * handle a state cookie for an existing association m: input packet mbuf 1457 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1458 * "split" mbuf and the cookie signature does not exist offset: offset into 1459 * mbuf to the cookie-echo chunk 1460 */ 1461 static struct sctp_tcb * 1462 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1463 struct sockaddr *src, struct sockaddr *dst, 1464 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1465 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp, 1466 struct sockaddr *init_src, int *notification, 1467 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1468 #if defined(__FreeBSD__) && !defined(__Userspace__) 1469 uint8_t mflowtype, uint32_t mflowid, 1470 #endif 1471 uint32_t vrf_id, uint16_t port) 1472 { 1473 struct sctp_association *asoc; 1474 struct sctp_init_chunk *init_cp, init_buf; 1475 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1476 struct sctp_asconf_addr *aparam, *naparam; 1477 struct sctp_asconf_ack *aack, *naack; 1478 struct sctp_tmit_chunk *chk, *nchk; 1479 struct sctp_stream_reset_list *strrst, *nstrrst; 1480 struct sctp_queued_to_read *sq, *nsq; 1481 struct sctp_nets *net; 1482 struct mbuf *op_err; 1483 int init_offset, initack_offset, i; 1484 int retval; 1485 int spec_flag = 0; 1486 uint32_t how_indx; 1487 #if defined(SCTP_DETAILED_STR_STATS) 1488 int j; 1489 #endif 1490 1491 net = *netp; 1492 /* I know that the TCB is non-NULL from the caller */ 1493 asoc = &stcb->asoc; 1494 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1495 if (asoc->cookie_how[how_indx] == 0) 1496 break; 1497 } 1498 if (how_indx < sizeof(asoc->cookie_how)) { 1499 asoc->cookie_how[how_indx] = 1; 1500 } 1501 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1502 /* SHUTDOWN came in after sending INIT-ACK */ 1503 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1504 op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, ""); 1505 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err, 1506 #if defined(__FreeBSD__) && !defined(__Userspace__) 1507 mflowtype, mflowid, inp->fibnum, 1508 #endif 1509 vrf_id, net->port); 1510 if (how_indx < sizeof(asoc->cookie_how)) 1511 asoc->cookie_how[how_indx] = 2; 1512 SCTP_TCB_UNLOCK(stcb); 1513 return (NULL); 1514 } 1515 /* 1516 * find and validate the INIT chunk in the cookie (peer's info) the 1517 * INIT should start after the cookie-echo header struct (chunk 1518 * header, state cookie header struct) 1519 */ 1520 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1521 1522 init_cp = (struct sctp_init_chunk *) 1523 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1524 (uint8_t *) & init_buf); 1525 if (init_cp == NULL) { 1526 /* could not pull a INIT chunk in cookie */ 1527 SCTP_TCB_UNLOCK(stcb); 1528 return (NULL); 1529 } 1530 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1531 SCTP_TCB_UNLOCK(stcb); 1532 return (NULL); 1533 } 1534 /* 1535 * find and validate the INIT-ACK chunk in the cookie (my info) the 1536 * INIT-ACK follows the INIT chunk 1537 */ 1538 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length)); 1539 initack_cp = (struct sctp_init_ack_chunk *) 1540 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1541 (uint8_t *) & initack_buf); 1542 if (initack_cp == NULL) { 1543 /* could not pull INIT-ACK chunk in cookie */ 1544 SCTP_TCB_UNLOCK(stcb); 1545 return (NULL); 1546 } 1547 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1548 SCTP_TCB_UNLOCK(stcb); 1549 return (NULL); 1550 } 1551 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1552 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1553 /* 1554 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1555 * to get into the OPEN state 1556 */ 1557 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1558 /*- 1559 * Opps, this means that we somehow generated two vtag's 1560 * the same. I.e. we did: 1561 * Us Peer 1562 * <---INIT(tag=a)------ 1563 * ----INIT-ACK(tag=t)--> 1564 * ----INIT(tag=t)------> *1 1565 * <---INIT-ACK(tag=a)--- 1566 * <----CE(tag=t)------------- *2 1567 * 1568 * At point *1 we should be generating a different 1569 * tag t'. Which means we would throw away the CE and send 1570 * ours instead. Basically this is case C (throw away side). 1571 */ 1572 if (how_indx < sizeof(asoc->cookie_how)) 1573 asoc->cookie_how[how_indx] = 17; 1574 SCTP_TCB_UNLOCK(stcb); 1575 return (NULL); 1576 } 1577 switch (SCTP_GET_STATE(stcb)) { 1578 case SCTP_STATE_COOKIE_WAIT: 1579 case SCTP_STATE_COOKIE_ECHOED: 1580 /* 1581 * INIT was sent but got a COOKIE_ECHO with the 1582 * correct tags... just accept it...but we must 1583 * process the init so that we can make sure we 1584 * have the right seq no's. 1585 */ 1586 /* First we must process the INIT !! */ 1587 if (sctp_process_init(init_cp, stcb) < 0) { 1588 if (how_indx < sizeof(asoc->cookie_how)) 1589 asoc->cookie_how[how_indx] = 3; 1590 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1591 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n"); 1592 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1593 src, dst, sh, op_err, 1594 #if defined(__FreeBSD__) && !defined(__Userspace__) 1595 mflowtype, mflowid, 1596 #endif 1597 vrf_id, net->port); 1598 return (NULL); 1599 } 1600 /* we have already processed the INIT so no problem */ 1601 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, 1602 stcb, net, 1603 SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1604 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, 1605 stcb, net, 1606 SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1607 /* update current state */ 1608 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) 1609 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1610 else 1611 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1612 1613 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN); 1614 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1615 sctp_stop_all_cookie_timers(stcb); 1616 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1617 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1618 (!SCTP_IS_LISTENING(inp))) { 1619 #if defined(__APPLE__) && !defined(__Userspace__) 1620 struct socket *so; 1621 #endif 1622 /* 1623 * Here is where collision would go if we 1624 * did a connect() and instead got a 1625 * init/init-ack/cookie done before the 1626 * init-ack came back.. 1627 */ 1628 sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED); 1629 #if defined(__APPLE__) && !defined(__Userspace__) 1630 so = SCTP_INP_SO(stcb->sctp_ep); 1631 atomic_add_int(&stcb->asoc.refcnt, 1); 1632 SCTP_TCB_UNLOCK(stcb); 1633 SCTP_SOCKET_LOCK(so, 1); 1634 SCTP_TCB_LOCK(stcb); 1635 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1636 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1637 SCTP_TCB_UNLOCK(stcb); 1638 SCTP_SOCKET_UNLOCK(so, 1); 1639 return (NULL); 1640 } 1641 #endif 1642 soisconnected(stcb->sctp_socket); 1643 #if defined(__APPLE__) && !defined(__Userspace__) 1644 SCTP_SOCKET_UNLOCK(so, 1); 1645 #endif 1646 } 1647 /* notify upper layer */ 1648 *notification = SCTP_NOTIFY_ASSOC_UP; 1649 net->hb_responded = 1; 1650 if (stcb->asoc.sctp_autoclose_ticks && 1651 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1652 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1653 inp, stcb, NULL); 1654 } 1655 break; 1656 default: 1657 /* 1658 * we're in the OPEN state (or beyond), so 1659 * peer must have simply lost the COOKIE-ACK 1660 */ 1661 break; 1662 } /* end switch */ 1663 sctp_stop_all_cookie_timers(stcb); 1664 if ((retval = sctp_load_addresses_from_init(stcb, m, 1665 init_offset + sizeof(struct sctp_init_chunk), 1666 initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) { 1667 if (how_indx < sizeof(asoc->cookie_how)) 1668 asoc->cookie_how[how_indx] = 4; 1669 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1670 "Problem with address parameters"); 1671 SCTPDBG(SCTP_DEBUG_INPUT1, 1672 "Load addresses from INIT causes an abort %d\n", 1673 retval); 1674 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1675 src, dst, sh, op_err, 1676 #if defined(__FreeBSD__) && !defined(__Userspace__) 1677 mflowtype, mflowid, 1678 #endif 1679 vrf_id, net->port); 1680 return (NULL); 1681 } 1682 /* respond with a COOKIE-ACK */ 1683 sctp_toss_old_cookies(stcb, asoc); 1684 sctp_send_cookie_ack(stcb); 1685 if (how_indx < sizeof(asoc->cookie_how)) 1686 asoc->cookie_how[how_indx] = 5; 1687 return (stcb); 1688 } 1689 1690 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1691 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1692 cookie->tie_tag_my_vtag == 0 && 1693 cookie->tie_tag_peer_vtag == 0) { 1694 /* 1695 * case C in Section 5.2.4 Table 2: XMOO silently discard 1696 */ 1697 if (how_indx < sizeof(asoc->cookie_how)) 1698 asoc->cookie_how[how_indx] = 6; 1699 SCTP_TCB_UNLOCK(stcb); 1700 return (NULL); 1701 } 1702 /* If nat support, and the below and stcb is established, 1703 * send back a ABORT(colliding state) if we are established. 1704 */ 1705 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) && 1706 (asoc->peer_supports_nat) && 1707 ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1708 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1709 (asoc->peer_vtag == 0)))) { 1710 /* Special case - Peer's support nat. We may have 1711 * two init's that we gave out the same tag on since 1712 * one was not established.. i.e. we get INIT from host-1 1713 * behind the nat and we respond tag-a, we get a INIT from 1714 * host-2 behind the nat and we get tag-a again. Then we 1715 * bring up host-1 (or 2's) assoc, Then comes the cookie 1716 * from hsot-2 (or 1). Now we have colliding state. We must 1717 * send an abort here with colliding state indication. 1718 */ 1719 op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, ""); 1720 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, 1721 #if defined(__FreeBSD__) && !defined(__Userspace__) 1722 mflowtype, mflowid, inp->fibnum, 1723 #endif 1724 vrf_id, port); 1725 SCTP_TCB_UNLOCK(stcb); 1726 return (NULL); 1727 } 1728 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1729 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1730 (asoc->peer_vtag == 0))) { 1731 /* 1732 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1733 * should be ok, re-accept peer info 1734 */ 1735 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1736 /* Extension of case C. 1737 * If we hit this, then the random number 1738 * generator returned the same vtag when we 1739 * first sent our INIT-ACK and when we later sent 1740 * our INIT. The side with the seq numbers that are 1741 * different will be the one that normally would 1742 * have hit case C. This in effect "extends" our vtags 1743 * in this collision case to be 64 bits. The same collision 1744 * could occur aka you get both vtag and seq number the 1745 * same twice in a row.. but is much less likely. If it 1746 * did happen then we would proceed through and bring 1747 * up the assoc.. we may end up with the wrong stream 1748 * setup however.. which would be bad.. but there is 1749 * no way to tell.. until we send on a stream that does 1750 * not exist :-) 1751 */ 1752 if (how_indx < sizeof(asoc->cookie_how)) 1753 asoc->cookie_how[how_indx] = 7; 1754 1755 SCTP_TCB_UNLOCK(stcb); 1756 return (NULL); 1757 } 1758 if (how_indx < sizeof(asoc->cookie_how)) 1759 asoc->cookie_how[how_indx] = 8; 1760 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 1761 SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1762 sctp_stop_all_cookie_timers(stcb); 1763 /* 1764 * since we did not send a HB make sure we don't double 1765 * things 1766 */ 1767 net->hb_responded = 1; 1768 if (stcb->asoc.sctp_autoclose_ticks && 1769 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1770 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1771 NULL); 1772 } 1773 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1774 if (asoc->pre_open_streams < asoc->streamoutcnt) { 1775 asoc->pre_open_streams = asoc->streamoutcnt; 1776 } 1777 1778 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1779 /* Ok the peer probably discarded our 1780 * data (if we echoed a cookie+data). So anything 1781 * on the sent_queue should be marked for 1782 * retransmit, we may not get something to 1783 * kick us so it COULD still take a timeout 1784 * to move these.. but it can't hurt to mark them. 1785 */ 1786 1787 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1788 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1789 chk->sent = SCTP_DATAGRAM_RESEND; 1790 sctp_flight_size_decrease(chk); 1791 sctp_total_flight_decrease(stcb, chk); 1792 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1793 spec_flag++; 1794 } 1795 } 1796 } 1797 /* process the INIT info (peer's info) */ 1798 if (sctp_process_init(init_cp, stcb) < 0) { 1799 if (how_indx < sizeof(asoc->cookie_how)) 1800 asoc->cookie_how[how_indx] = 9; 1801 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1802 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n"); 1803 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1804 src, dst, sh, op_err, 1805 #if defined(__FreeBSD__) && !defined(__Userspace__) 1806 mflowtype, mflowid, 1807 #endif 1808 vrf_id, net->port); 1809 return (NULL); 1810 } 1811 if ((retval = sctp_load_addresses_from_init(stcb, m, 1812 init_offset + sizeof(struct sctp_init_chunk), 1813 initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) { 1814 if (how_indx < sizeof(asoc->cookie_how)) 1815 asoc->cookie_how[how_indx] = 10; 1816 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1817 "Problem with address parameters"); 1818 SCTPDBG(SCTP_DEBUG_INPUT1, 1819 "Load addresses from INIT causes an abort %d\n", 1820 retval); 1821 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1822 src, dst, sh, op_err, 1823 #if defined(__FreeBSD__) && !defined(__Userspace__) 1824 mflowtype, mflowid, 1825 #endif 1826 vrf_id, net->port); 1827 return (NULL); 1828 } 1829 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 1830 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 1831 *notification = SCTP_NOTIFY_ASSOC_UP; 1832 1833 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1834 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1835 (!SCTP_IS_LISTENING(inp))) { 1836 #if defined(__APPLE__) && !defined(__Userspace__) 1837 struct socket *so; 1838 #endif 1839 sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED); 1840 #if defined(__APPLE__) && !defined(__Userspace__) 1841 so = SCTP_INP_SO(stcb->sctp_ep); 1842 atomic_add_int(&stcb->asoc.refcnt, 1); 1843 SCTP_TCB_UNLOCK(stcb); 1844 SCTP_SOCKET_LOCK(so, 1); 1845 SCTP_TCB_LOCK(stcb); 1846 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1847 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1848 SCTP_TCB_UNLOCK(stcb); 1849 SCTP_SOCKET_UNLOCK(so, 1); 1850 return (NULL); 1851 } 1852 #endif 1853 soisconnected(stcb->sctp_socket); 1854 #if defined(__APPLE__) && !defined(__Userspace__) 1855 SCTP_SOCKET_UNLOCK(so, 1); 1856 #endif 1857 } 1858 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) 1859 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1860 else 1861 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1862 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1863 } else if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) { 1864 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1865 } else { 1866 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1867 } 1868 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN); 1869 sctp_stop_all_cookie_timers(stcb); 1870 sctp_toss_old_cookies(stcb, asoc); 1871 sctp_send_cookie_ack(stcb); 1872 if (spec_flag) { 1873 /* only if we have retrans set do we do this. What 1874 * this call does is get only the COOKIE-ACK out 1875 * and then when we return the normal call to 1876 * sctp_chunk_output will get the retrans out 1877 * behind this. 1878 */ 1879 sctp_chunk_output(inp,stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED); 1880 } 1881 if (how_indx < sizeof(asoc->cookie_how)) 1882 asoc->cookie_how[how_indx] = 11; 1883 1884 return (stcb); 1885 } 1886 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1887 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1888 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1889 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1890 cookie->tie_tag_peer_vtag != 0) { 1891 struct sctpasochead *head; 1892 #if defined(__APPLE__) && !defined(__Userspace__) 1893 struct socket *so; 1894 #endif 1895 1896 if (asoc->peer_supports_nat) { 1897 struct sctp_tcb *local_stcb; 1898 1899 /* This is a gross gross hack. 1900 * Just call the cookie_new code since we 1901 * are allowing a duplicate association. 1902 * I hope this works... 1903 */ 1904 local_stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, 1905 sh, cookie, cookie_len, 1906 inp, netp, init_src,notification, 1907 auth_skipped, auth_offset, auth_len, 1908 #if defined(__FreeBSD__) && !defined(__Userspace__) 1909 mflowtype, mflowid, 1910 #endif 1911 vrf_id, port); 1912 if (local_stcb == NULL) { 1913 SCTP_TCB_UNLOCK(stcb); 1914 } 1915 return (local_stcb); 1916 } 1917 /* 1918 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1919 */ 1920 /* temp code */ 1921 if (how_indx < sizeof(asoc->cookie_how)) 1922 asoc->cookie_how[how_indx] = 12; 1923 sctp_stop_association_timers(stcb, false); 1924 /* notify upper layer */ 1925 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1926 atomic_add_int(&stcb->asoc.refcnt, 1); 1927 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) && 1928 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1929 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) { 1930 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1931 } 1932 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) { 1933 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1934 } else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) { 1935 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1936 } 1937 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1938 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN); 1939 1940 } else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) { 1941 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1942 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN); 1943 } 1944 if (asoc->pre_open_streams < asoc->streamoutcnt) { 1945 asoc->pre_open_streams = asoc->streamoutcnt; 1946 } 1947 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1948 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1949 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1950 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1951 asoc->str_reset_seq_in = asoc->init_seq_number; 1952 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1953 asoc->send_sack = 1; 1954 asoc->data_pkts_seen = 0; 1955 asoc->last_data_chunk_from = NULL; 1956 asoc->last_control_chunk_from = NULL; 1957 asoc->last_net_cmt_send_started = NULL; 1958 if (asoc->mapping_array) { 1959 memset(asoc->mapping_array, 0, 1960 asoc->mapping_array_size); 1961 } 1962 if (asoc->nr_mapping_array) { 1963 memset(asoc->nr_mapping_array, 0, 1964 asoc->mapping_array_size); 1965 } 1966 SCTP_TCB_UNLOCK(stcb); 1967 #if defined(__APPLE__) && !defined(__Userspace__) 1968 so = SCTP_INP_SO(stcb->sctp_ep); 1969 SCTP_SOCKET_LOCK(so, 1); 1970 #endif 1971 SCTP_INP_INFO_WLOCK(); 1972 SCTP_INP_WLOCK(stcb->sctp_ep); 1973 SCTP_TCB_LOCK(stcb); 1974 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1975 /* send up all the data */ 1976 sctp_report_all_outbound(stcb, 0, SCTP_SO_LOCKED); 1977 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1978 stcb->asoc.strmout[i].chunks_on_queues = 0; 1979 #if defined(SCTP_DETAILED_STR_STATS) 1980 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1981 asoc->strmout[i].abandoned_sent[j] = 0; 1982 asoc->strmout[i].abandoned_unsent[j] = 0; 1983 } 1984 #else 1985 asoc->strmout[i].abandoned_sent[0] = 0; 1986 asoc->strmout[i].abandoned_unsent[0] = 0; 1987 #endif 1988 stcb->asoc.strmout[i].next_mid_ordered = 0; 1989 stcb->asoc.strmout[i].next_mid_unordered = 0; 1990 stcb->asoc.strmout[i].sid = i; 1991 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1992 } 1993 TAILQ_FOREACH_SAFE(strrst, &asoc->resetHead, next_resp, nstrrst) { 1994 TAILQ_REMOVE(&asoc->resetHead, strrst, next_resp); 1995 SCTP_FREE(strrst, SCTP_M_STRESET); 1996 } 1997 TAILQ_FOREACH_SAFE(sq, &asoc->pending_reply_queue, next, nsq) { 1998 TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next); 1999 if (sq->data) { 2000 sctp_m_freem(sq->data); 2001 sq->data = NULL; 2002 } 2003 sctp_free_remote_addr(sq->whoFrom); 2004 sq->whoFrom = NULL; 2005 sq->stcb = NULL; 2006 sctp_free_a_readq(stcb, sq); 2007 } 2008 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 2009 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 2010 if (chk->data) { 2011 sctp_m_freem(chk->data); 2012 chk->data = NULL; 2013 } 2014 if (chk->holds_key_ref) 2015 sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 2016 sctp_free_remote_addr(chk->whoTo); 2017 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 2018 SCTP_DECR_CHK_COUNT(); 2019 } 2020 asoc->ctrl_queue_cnt = 0; 2021 asoc->str_reset = NULL; 2022 asoc->stream_reset_outstanding = 0; 2023 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { 2024 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next); 2025 if (chk->data) { 2026 sctp_m_freem(chk->data); 2027 chk->data = NULL; 2028 } 2029 if (chk->holds_key_ref) 2030 sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 2031 sctp_free_remote_addr(chk->whoTo); 2032 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 2033 SCTP_DECR_CHK_COUNT(); 2034 } 2035 TAILQ_FOREACH_SAFE(aparam, &asoc->asconf_queue, next, naparam) { 2036 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); 2037 SCTP_FREE(aparam,SCTP_M_ASC_ADDR); 2038 } 2039 TAILQ_FOREACH_SAFE(aack, &asoc->asconf_ack_sent, next, naack) { 2040 TAILQ_REMOVE(&asoc->asconf_ack_sent, aack, next); 2041 if (aack->data != NULL) { 2042 sctp_m_freem(aack->data); 2043 } 2044 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), aack); 2045 } 2046 asoc->rcv_edmid = cookie->rcv_edmid; 2047 2048 /* process the INIT-ACK info (my info) */ 2049 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 2050 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 2051 2052 /* pull from vtag hash */ 2053 LIST_REMOVE(stcb, sctp_asocs); 2054 /* re-insert to new vtag position */ 2055 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 2056 SCTP_BASE_INFO(hashasocmark))]; 2057 /* 2058 * put it in the bucket in the vtag hash of assoc's for the 2059 * system 2060 */ 2061 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 2062 2063 SCTP_INP_WUNLOCK(stcb->sctp_ep); 2064 SCTP_INP_INFO_WUNLOCK(); 2065 #if defined(__APPLE__) && !defined(__Userspace__) 2066 SCTP_SOCKET_UNLOCK(so, 1); 2067 #endif 2068 asoc->total_flight = 0; 2069 asoc->total_flight_count = 0; 2070 /* process the INIT info (peer's info) */ 2071 if (sctp_process_init(init_cp, stcb) < 0) { 2072 if (how_indx < sizeof(asoc->cookie_how)) 2073 asoc->cookie_how[how_indx] = 13; 2074 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 2075 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n"); 2076 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 2077 src, dst, sh, op_err, 2078 #if defined(__FreeBSD__) && !defined(__Userspace__) 2079 mflowtype, mflowid, 2080 #endif 2081 vrf_id, net->port); 2082 return (NULL); 2083 } 2084 /* 2085 * since we did not send a HB make sure we don't double 2086 * things 2087 */ 2088 net->hb_responded = 1; 2089 2090 if ((retval = sctp_load_addresses_from_init(stcb, m, 2091 init_offset + sizeof(struct sctp_init_chunk), 2092 initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) { 2093 if (how_indx < sizeof(asoc->cookie_how)) 2094 asoc->cookie_how[how_indx] = 14; 2095 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2096 "Problem with address parameters"); 2097 SCTPDBG(SCTP_DEBUG_INPUT1, 2098 "Load addresses from INIT causes an abort %d\n", 2099 retval); 2100 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 2101 src, dst, sh, op_err, 2102 #if defined(__FreeBSD__) && !defined(__Userspace__) 2103 mflowtype, mflowid, 2104 #endif 2105 vrf_id, net->port); 2106 return (NULL); 2107 } 2108 /* respond with a COOKIE-ACK */ 2109 sctp_send_cookie_ack(stcb); 2110 if (how_indx < sizeof(asoc->cookie_how)) 2111 asoc->cookie_how[how_indx] = 15; 2112 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE) && 2113 (asoc->sctp_autoclose_ticks > 0)) { 2114 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2115 } 2116 return (stcb); 2117 } 2118 if (how_indx < sizeof(asoc->cookie_how)) 2119 asoc->cookie_how[how_indx] = 16; 2120 /* all other cases... */ 2121 SCTP_TCB_UNLOCK(stcb); 2122 return (NULL); 2123 } 2124 2125 /* 2126 * handle a state cookie for a new association m: input packet mbuf chain-- 2127 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 2128 * and the cookie signature does not exist offset: offset into mbuf to the 2129 * cookie-echo chunk length: length of the cookie chunk to: where the init 2130 * was from returns a new TCB 2131 */ 2132 static struct sctp_tcb * 2133 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 2134 struct sockaddr *src, struct sockaddr *dst, 2135 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 2136 struct sctp_inpcb *inp, struct sctp_nets **netp, 2137 struct sockaddr *init_src, int *notification, 2138 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2139 #if defined(__FreeBSD__) && !defined(__Userspace__) 2140 uint8_t mflowtype, uint32_t mflowid, 2141 #endif 2142 uint32_t vrf_id, uint16_t port) 2143 { 2144 struct sctp_tcb *stcb; 2145 struct sctp_init_chunk *init_cp, init_buf; 2146 struct sctp_init_ack_chunk *initack_cp, initack_buf; 2147 union sctp_sockstore store; 2148 struct sctp_association *asoc; 2149 int init_offset, initack_offset, initack_limit; 2150 int error = 0; 2151 uint8_t auth_chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 2152 #if defined(__APPLE__) && !defined(__Userspace__) 2153 struct socket *so; 2154 2155 so = SCTP_INP_SO(inp); 2156 #endif 2157 2158 /* 2159 * find and validate the INIT chunk in the cookie (peer's info) the 2160 * INIT should start after the cookie-echo header struct (chunk 2161 * header, state cookie header struct) 2162 */ 2163 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 2164 init_cp = (struct sctp_init_chunk *) 2165 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 2166 (uint8_t *) & init_buf); 2167 if (init_cp == NULL) { 2168 /* could not pull a INIT chunk in cookie */ 2169 SCTPDBG(SCTP_DEBUG_INPUT1, 2170 "process_cookie_new: could not pull INIT chunk hdr\n"); 2171 return (NULL); 2172 } 2173 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 2174 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 2175 return (NULL); 2176 } 2177 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length)); 2178 /* 2179 * find and validate the INIT-ACK chunk in the cookie (my info) the 2180 * INIT-ACK follows the INIT chunk 2181 */ 2182 initack_cp = (struct sctp_init_ack_chunk *) 2183 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 2184 (uint8_t *) & initack_buf); 2185 if (initack_cp == NULL) { 2186 /* could not pull INIT-ACK chunk in cookie */ 2187 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 2188 return (NULL); 2189 } 2190 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 2191 return (NULL); 2192 } 2193 /* 2194 * NOTE: We can't use the INIT_ACK's chk_length to determine the 2195 * "initack_limit" value. This is because the chk_length field 2196 * includes the length of the cookie, but the cookie is omitted when 2197 * the INIT and INIT_ACK are tacked onto the cookie... 2198 */ 2199 initack_limit = offset + cookie_len; 2200 2201 /* 2202 * now that we know the INIT/INIT-ACK are in place, create a new TCB 2203 * and populate 2204 */ 2205 2206 /* 2207 * Here we do a trick, we set in NULL for the proc/thread argument. We 2208 * do this since in effect we only use the p argument when 2209 * the socket is unbound and we must do an implicit bind. 2210 * Since we are getting a cookie, we cannot be unbound. 2211 */ 2212 stcb = sctp_aloc_assoc(inp, init_src, &error, 2213 ntohl(initack_cp->init.initiate_tag), 2214 ntohl(initack_cp->init.initial_tsn), vrf_id, 2215 ntohs(initack_cp->init.num_outbound_streams), 2216 port, 2217 #if defined(__FreeBSD__) && !defined(__Userspace__) 2218 (struct thread *)NULL, 2219 #elif defined(_WIN32) && !defined(__Userspace__) 2220 (PKTHREAD)NULL, 2221 #else 2222 (struct proc *)NULL, 2223 #endif 2224 SCTP_DONT_INITIALIZE_AUTH_PARAMS); 2225 if (stcb == NULL) { 2226 struct mbuf *op_err; 2227 2228 /* memory problem? */ 2229 SCTPDBG(SCTP_DEBUG_INPUT1, 2230 "process_cookie_new: no room for another TCB!\n"); 2231 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 2232 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2233 src, dst, sh, op_err, 2234 #if defined(__FreeBSD__) && !defined(__Userspace__) 2235 mflowtype, mflowid, 2236 #endif 2237 vrf_id, port); 2238 return (NULL); 2239 } 2240 asoc = &stcb->asoc; 2241 /* get scope variables out of cookie */ 2242 asoc->scope.ipv4_local_scope = cookie->ipv4_scope; 2243 asoc->scope.site_scope = cookie->site_scope; 2244 asoc->scope.local_scope = cookie->local_scope; 2245 asoc->scope.loopback_scope = cookie->loopback_scope; 2246 2247 #if defined(__Userspace__) 2248 if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) || 2249 (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal) || 2250 (asoc->scope.conn_addr_legal != cookie->conn_addr_legal)) { 2251 #else 2252 if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) || 2253 (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) { 2254 #endif 2255 struct mbuf *op_err; 2256 2257 /* 2258 * Houston we have a problem. The EP changed while the 2259 * cookie was in flight. Only recourse is to abort the 2260 * association. 2261 */ 2262 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 2263 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2264 src, dst, sh, op_err, 2265 #if defined(__FreeBSD__) && !defined(__Userspace__) 2266 mflowtype, mflowid, 2267 #endif 2268 vrf_id, port); 2269 #if defined(__APPLE__) && !defined(__Userspace__) 2270 atomic_add_int(&stcb->asoc.refcnt, 1); 2271 SCTP_TCB_UNLOCK(stcb); 2272 SCTP_SOCKET_LOCK(so, 1); 2273 SCTP_TCB_LOCK(stcb); 2274 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2275 #endif 2276 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2277 SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 2278 #if defined(__APPLE__) && !defined(__Userspace__) 2279 SCTP_SOCKET_UNLOCK(so, 1); 2280 #endif 2281 return (NULL); 2282 } 2283 asoc->rcv_edmid = cookie->rcv_edmid; 2284 /* process the INIT-ACK info (my info) */ 2285 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 2286 2287 /* process the INIT info (peer's info) */ 2288 if (sctp_process_init(init_cp, stcb) < 0) { 2289 #if defined(__APPLE__) && !defined(__Userspace__) 2290 atomic_add_int(&stcb->asoc.refcnt, 1); 2291 SCTP_TCB_UNLOCK(stcb); 2292 SCTP_SOCKET_LOCK(so, 1); 2293 SCTP_TCB_LOCK(stcb); 2294 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2295 #endif 2296 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2297 SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 2298 #if defined(__APPLE__) && !defined(__Userspace__) 2299 SCTP_SOCKET_UNLOCK(so, 1); 2300 #endif 2301 return (NULL); 2302 } 2303 /* load all addresses */ 2304 if (sctp_load_addresses_from_init(stcb, m, 2305 init_offset + sizeof(struct sctp_init_chunk), 2306 initack_offset, src, dst, init_src, port) < 0) { 2307 #if defined(__APPLE__) && !defined(__Userspace__) 2308 atomic_add_int(&stcb->asoc.refcnt, 1); 2309 SCTP_TCB_UNLOCK(stcb); 2310 SCTP_SOCKET_LOCK(so, 1); 2311 SCTP_TCB_LOCK(stcb); 2312 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2313 #endif 2314 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2315 SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2316 #if defined(__APPLE__) && !defined(__Userspace__) 2317 SCTP_SOCKET_UNLOCK(so, 1); 2318 #endif 2319 return (NULL); 2320 } 2321 /* 2322 * verify any preceding AUTH chunk that was skipped 2323 */ 2324 /* pull the local authentication parameters from the cookie/init-ack */ 2325 sctp_auth_get_cookie_params(stcb, m, 2326 initack_offset + sizeof(struct sctp_init_ack_chunk), 2327 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 2328 if (auth_skipped) { 2329 struct sctp_auth_chunk *auth; 2330 2331 if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) { 2332 auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 2333 } else { 2334 auth = NULL; 2335 } 2336 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 2337 /* auth HMAC failed, dump the assoc and packet */ 2338 SCTPDBG(SCTP_DEBUG_AUTH1, 2339 "COOKIE-ECHO: AUTH failed\n"); 2340 #if defined(__APPLE__) && !defined(__Userspace__) 2341 atomic_add_int(&stcb->asoc.refcnt, 1); 2342 SCTP_TCB_UNLOCK(stcb); 2343 SCTP_SOCKET_LOCK(so, 1); 2344 SCTP_TCB_LOCK(stcb); 2345 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2346 #endif 2347 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2348 SCTP_FROM_SCTP_INPUT + SCTP_LOC_21); 2349 #if defined(__APPLE__) && !defined(__Userspace__) 2350 SCTP_SOCKET_UNLOCK(so, 1); 2351 #endif 2352 return (NULL); 2353 } else { 2354 /* remaining chunks checked... good to go */ 2355 stcb->asoc.authenticated = 1; 2356 } 2357 } 2358 2359 /* 2360 * if we're doing ASCONFs, check to see if we have any new local 2361 * addresses that need to get added to the peer (eg. addresses 2362 * changed while cookie echo in flight). This needs to be done 2363 * after we go to the OPEN state to do the correct asconf 2364 * processing. else, make sure we have the correct addresses in our 2365 * lists 2366 */ 2367 2368 /* warning, we re-use sin, sin6, sa_store here! */ 2369 /* pull in local_address (our "from" address) */ 2370 switch (cookie->laddr_type) { 2371 #ifdef INET 2372 case SCTP_IPV4_ADDRESS: 2373 /* source addr is IPv4 */ 2374 memset(&store.sin, 0, sizeof(struct sockaddr_in)); 2375 store.sin.sin_family = AF_INET; 2376 #ifdef HAVE_SIN_LEN 2377 store.sin.sin_len = sizeof(struct sockaddr_in); 2378 #endif 2379 store.sin.sin_addr.s_addr = cookie->laddress[0]; 2380 break; 2381 #endif 2382 #ifdef INET6 2383 case SCTP_IPV6_ADDRESS: 2384 /* source addr is IPv6 */ 2385 memset(&store.sin6, 0, sizeof(struct sockaddr_in6)); 2386 store.sin6.sin6_family = AF_INET6; 2387 #ifdef HAVE_SIN6_LEN 2388 store.sin6.sin6_len = sizeof(struct sockaddr_in6); 2389 #endif 2390 store.sin6.sin6_scope_id = cookie->scope_id; 2391 memcpy(&store.sin6.sin6_addr, cookie->laddress, sizeof(struct in6_addr)); 2392 break; 2393 #endif 2394 #if defined(__Userspace__) 2395 case SCTP_CONN_ADDRESS: 2396 /* source addr is conn */ 2397 memset(&store.sconn, 0, sizeof(struct sockaddr_conn)); 2398 store.sconn.sconn_family = AF_CONN; 2399 #ifdef HAVE_SCONN_LEN 2400 store.sconn.sconn_len = sizeof(struct sockaddr_conn); 2401 #endif 2402 memcpy(&store.sconn.sconn_addr, cookie->laddress, sizeof(void *)); 2403 break; 2404 #endif 2405 default: 2406 #if defined(__APPLE__) && !defined(__Userspace__) 2407 atomic_add_int(&stcb->asoc.refcnt, 1); 2408 SCTP_TCB_UNLOCK(stcb); 2409 SCTP_SOCKET_LOCK(so, 1); 2410 SCTP_TCB_LOCK(stcb); 2411 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2412 #endif 2413 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2414 SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 2415 #if defined(__APPLE__) && !defined(__Userspace__) 2416 SCTP_SOCKET_UNLOCK(so, 1); 2417 #endif 2418 return (NULL); 2419 } 2420 2421 /* update current state */ 2422 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2423 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN); 2424 sctp_stop_all_cookie_timers(stcb); 2425 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 2426 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2427 2428 /* set up to notify upper layer */ 2429 *notification = SCTP_NOTIFY_ASSOC_UP; 2430 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2431 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2432 (!SCTP_IS_LISTENING(inp))) { 2433 /* 2434 * This is an endpoint that called connect() how it got a 2435 * cookie that is NEW is a bit of a mystery. It must be that 2436 * the INIT was sent, but before it got there.. a complete 2437 * INIT/INIT-ACK/COOKIE arrived. But of course then it 2438 * should have went to the other code.. not here.. oh well.. 2439 * a bit of protection is worth having.. 2440 * 2441 * XXXMJ unlocked 2442 */ 2443 sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED); 2444 #if defined(__APPLE__) && !defined(__Userspace__) 2445 atomic_add_int(&stcb->asoc.refcnt, 1); 2446 SCTP_TCB_UNLOCK(stcb); 2447 SCTP_SOCKET_LOCK(so, 1); 2448 SCTP_TCB_LOCK(stcb); 2449 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2450 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2451 SCTP_SOCKET_UNLOCK(so, 1); 2452 return (NULL); 2453 } 2454 #endif 2455 soisconnected(stcb->sctp_socket); 2456 #if defined(__APPLE__) && !defined(__Userspace__) 2457 SCTP_SOCKET_UNLOCK(so, 1); 2458 #endif 2459 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 2460 (SCTP_IS_LISTENING(inp))) { 2461 /* 2462 * We don't want to do anything with this one. Since it is 2463 * the listening guy. The timer will get started for 2464 * accepted connections in the caller. 2465 */ 2466 ; 2467 } 2468 if (stcb->asoc.sctp_autoclose_ticks && 2469 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2470 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2471 } 2472 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2473 *netp = sctp_findnet(stcb, init_src); 2474 if (*netp != NULL) { 2475 /* 2476 * Since we did not send a HB, make sure we don't double 2477 * things. 2478 */ 2479 (*netp)->hb_responded = 1; 2480 } 2481 /* respond with a COOKIE-ACK */ 2482 sctp_send_cookie_ack(stcb); 2483 2484 /* 2485 * check the address lists for any ASCONFs that need to be sent 2486 * AFTER the cookie-ack is sent 2487 */ 2488 sctp_check_address_list(stcb, m, 2489 initack_offset + sizeof(struct sctp_init_ack_chunk), 2490 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 2491 &store.sa, cookie->local_scope, cookie->site_scope, 2492 cookie->ipv4_scope, cookie->loopback_scope); 2493 2494 return (stcb); 2495 } 2496 2497 /* 2498 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e 2499 * we NEED to make sure we are not already using the vtag. If so we 2500 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit! 2501 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag, 2502 SCTP_BASE_INFO(hashasocmark))]; 2503 LIST_FOREACH(stcb, head, sctp_asocs) { 2504 if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) { 2505 -- SEND ABORT - TRY AGAIN -- 2506 } 2507 } 2508 */ 2509 2510 /* 2511 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 2512 * existing (non-NULL) TCB 2513 */ 2514 static struct mbuf * 2515 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 2516 struct sockaddr *src, struct sockaddr *dst, 2517 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 2518 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 2519 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2520 struct sctp_tcb **locked_tcb, 2521 #if defined(__FreeBSD__) && !defined(__Userspace__) 2522 uint8_t mflowtype, uint32_t mflowid, 2523 #endif 2524 uint32_t vrf_id, uint16_t port) 2525 { 2526 struct sctp_state_cookie *cookie; 2527 struct sctp_tcb *l_stcb = *stcb; 2528 struct sctp_inpcb *l_inp; 2529 struct sockaddr *to; 2530 struct sctp_pcb *ep; 2531 struct mbuf *m_sig; 2532 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 2533 uint8_t *sig; 2534 #if defined(__Userspace__) && defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) 2535 uint8_t cookie_ok = 1; 2536 #else 2537 uint8_t cookie_ok = 0; 2538 #endif 2539 unsigned int sig_offset, cookie_offset; 2540 unsigned int cookie_len; 2541 struct timeval now; 2542 struct timeval time_entered, time_expires; 2543 int notification = 0; 2544 struct sctp_nets *netl; 2545 int had_a_existing_tcb = 0; 2546 int send_int_conf = 0; 2547 #ifdef INET 2548 struct sockaddr_in sin; 2549 #endif 2550 #ifdef INET6 2551 struct sockaddr_in6 sin6; 2552 #endif 2553 #if defined(__Userspace__) 2554 struct sockaddr_conn sconn; 2555 #endif 2556 2557 SCTPDBG(SCTP_DEBUG_INPUT2, 2558 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 2559 2560 if (inp_p == NULL) { 2561 return (NULL); 2562 } 2563 cookie = &cp->cookie; 2564 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 2565 cookie_len = ntohs(cp->ch.chunk_length); 2566 2567 if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 2568 sizeof(struct sctp_init_chunk) + 2569 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 2570 /* cookie too small */ 2571 return (NULL); 2572 } 2573 if ((cookie->peerport != sh->src_port) || 2574 (cookie->myport != sh->dest_port) || 2575 (cookie->my_vtag != sh->v_tag)) { 2576 /* 2577 * invalid ports or bad tag. Note that we always leave the 2578 * v_tag in the header in network order and when we stored 2579 * it in the my_vtag slot we also left it in network order. 2580 * This maintains the match even though it may be in the 2581 * opposite byte order of the machine :-> 2582 */ 2583 return (NULL); 2584 } 2585 #if defined(__Userspace__) 2586 /* 2587 * Recover the AF_CONN addresses within the cookie. 2588 * This needs to be done in the buffer provided for later processing 2589 * of the cookie and in the mbuf chain for HMAC validation. 2590 */ 2591 if ((cookie->addr_type == SCTP_CONN_ADDRESS) && (src->sa_family == AF_CONN)) { 2592 struct sockaddr_conn *sconnp = (struct sockaddr_conn *)src; 2593 2594 memcpy(cookie->address, &sconnp->sconn_addr , sizeof(void *)); 2595 m_copyback(m, cookie_offset + offsetof(struct sctp_state_cookie, address), 2596 (int)sizeof(void *), (caddr_t)&sconnp->sconn_addr); 2597 } 2598 if ((cookie->laddr_type == SCTP_CONN_ADDRESS) && (dst->sa_family == AF_CONN)) { 2599 struct sockaddr_conn *sconnp = (struct sockaddr_conn *)dst; 2600 2601 memcpy(cookie->laddress, &sconnp->sconn_addr , sizeof(void *)); 2602 m_copyback(m, cookie_offset + offsetof(struct sctp_state_cookie, laddress), 2603 (int)sizeof(void *), (caddr_t)&sconnp->sconn_addr); 2604 } 2605 #endif 2606 /* 2607 * split off the signature into its own mbuf (since it should not be 2608 * calculated in the sctp_hmac_m() call). 2609 */ 2610 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 2611 m_sig = m_split(m, sig_offset, M_NOWAIT); 2612 if (m_sig == NULL) { 2613 /* out of memory or ?? */ 2614 return (NULL); 2615 } 2616 #ifdef SCTP_MBUF_LOGGING 2617 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2618 sctp_log_mbc(m_sig, SCTP_MBUF_SPLIT); 2619 } 2620 #endif 2621 2622 /* 2623 * compute the signature/digest for the cookie 2624 */ 2625 if (l_stcb != NULL) { 2626 atomic_add_int(&l_stcb->asoc.refcnt, 1); 2627 SCTP_TCB_UNLOCK(l_stcb); 2628 } 2629 l_inp = *inp_p; 2630 SCTP_INP_RLOCK(l_inp); 2631 if (l_stcb != NULL) { 2632 SCTP_TCB_LOCK(l_stcb); 2633 atomic_subtract_int(&l_stcb->asoc.refcnt, 1); 2634 } 2635 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 2636 SCTP_INP_RUNLOCK(l_inp); 2637 sctp_m_freem(m_sig); 2638 return (NULL); 2639 } 2640 ep = &(*inp_p)->sctp_ep; 2641 /* which cookie is it? */ 2642 if ((cookie->time_entered.tv_sec < ep->time_of_secret_change) && 2643 (ep->current_secret_number != ep->last_secret_number)) { 2644 /* it's the old cookie */ 2645 (void)sctp_hmac_m(SCTP_HMAC, 2646 (uint8_t *)ep->secret_key[(int)ep->last_secret_number], 2647 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2648 } else { 2649 /* it's the current cookie */ 2650 (void)sctp_hmac_m(SCTP_HMAC, 2651 (uint8_t *)ep->secret_key[(int)ep->current_secret_number], 2652 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2653 } 2654 /* get the signature */ 2655 SCTP_INP_RUNLOCK(l_inp); 2656 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 2657 if (sig == NULL) { 2658 /* couldn't find signature */ 2659 sctp_m_freem(m_sig); 2660 return (NULL); 2661 } 2662 /* compare the received digest with the computed digest */ 2663 if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 2664 /* try the old cookie? */ 2665 if ((cookie->time_entered.tv_sec == ep->time_of_secret_change) && 2666 (ep->current_secret_number != ep->last_secret_number)) { 2667 /* compute digest with old */ 2668 (void)sctp_hmac_m(SCTP_HMAC, 2669 (uint8_t *)ep->secret_key[(int)ep->last_secret_number], 2670 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2671 /* compare */ 2672 if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 2673 cookie_ok = 1; 2674 } 2675 } else { 2676 cookie_ok = 1; 2677 } 2678 2679 /* 2680 * Now before we continue we must reconstruct our mbuf so that 2681 * normal processing of any other chunks will work. 2682 */ 2683 { 2684 struct mbuf *m_at; 2685 2686 m_at = m; 2687 while (SCTP_BUF_NEXT(m_at) != NULL) { 2688 m_at = SCTP_BUF_NEXT(m_at); 2689 } 2690 SCTP_BUF_NEXT(m_at) = m_sig; 2691 } 2692 2693 if (cookie_ok == 0) { 2694 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 2695 SCTPDBG(SCTP_DEBUG_INPUT2, 2696 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 2697 (uint32_t) offset, cookie_offset, sig_offset); 2698 return (NULL); 2699 } 2700 2701 if (sctp_ticks_to_msecs(cookie->cookie_life) > SCTP_MAX_COOKIE_LIFE) { 2702 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: Invalid cookie lifetime\n"); 2703 return (NULL); 2704 } 2705 time_entered.tv_sec = cookie->time_entered.tv_sec; 2706 time_entered.tv_usec = cookie->time_entered.tv_usec; 2707 if ((time_entered.tv_sec < 0) || 2708 (time_entered.tv_usec < 0) || 2709 (time_entered.tv_usec >= 1000000)) { 2710 /* Invalid time stamp. Cookie must have been modified. */ 2711 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: Invalid time stamp\n"); 2712 return (NULL); 2713 } 2714 (void)SCTP_GETTIME_TIMEVAL(&now); 2715 #if !(defined(__FreeBSD__) && !defined(__Userspace__)) 2716 if (timercmp(&now, &time_entered, <)) { 2717 #else 2718 if (timevalcmp(&now, &time_entered, <)) { 2719 #endif 2720 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie generated in the future!\n"); 2721 return (NULL); 2722 } 2723 /* 2724 * Check the cookie timestamps to be sure it's not stale. 2725 * cookie_life is in ticks, so we convert to seconds. 2726 */ 2727 time_expires.tv_sec = time_entered.tv_sec + sctp_ticks_to_secs(cookie->cookie_life); 2728 time_expires.tv_usec = time_entered.tv_usec; 2729 #if !(defined(__FreeBSD__) && !defined(__Userspace__)) 2730 if (timercmp(&now, &time_expires, >)) 2731 #else 2732 if (timevalcmp(&now, &time_expires, >)) 2733 #endif 2734 { 2735 /* cookie is stale! */ 2736 struct mbuf *op_err; 2737 struct sctp_error_stale_cookie *cause; 2738 struct timeval diff; 2739 uint32_t staleness; 2740 2741 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_stale_cookie), 2742 0, M_NOWAIT, 1, MT_DATA); 2743 if (op_err == NULL) { 2744 /* FOOBAR */ 2745 return (NULL); 2746 } 2747 /* Set the len */ 2748 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_stale_cookie); 2749 cause = mtod(op_err, struct sctp_error_stale_cookie *); 2750 cause->cause.code = htons(SCTP_CAUSE_STALE_COOKIE); 2751 cause->cause.length = htons(sizeof(struct sctp_error_stale_cookie)); 2752 #if !(defined(__FreeBSD__) && !defined(__Userspace__)) 2753 timersub(&now, &time_expires, &diff); 2754 #else 2755 diff = now; 2756 timevalsub(&diff, &time_expires); 2757 #endif 2758 if ((uint32_t)diff.tv_sec > UINT32_MAX / 1000000) { 2759 staleness = UINT32_MAX; 2760 } else { 2761 staleness = (uint32_t)diff.tv_sec * 1000000; 2762 } 2763 if (UINT32_MAX - staleness >= (uint32_t)diff.tv_usec) { 2764 staleness += (uint32_t)diff.tv_usec; 2765 } else { 2766 staleness = UINT32_MAX; 2767 } 2768 cause->stale_time = htonl(staleness); 2769 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err, 2770 #if defined(__FreeBSD__) && !defined(__Userspace__) 2771 mflowtype, mflowid, l_inp->fibnum, 2772 #endif 2773 vrf_id, port); 2774 return (NULL); 2775 } 2776 /* 2777 * Now we must see with the lookup address if we have an existing 2778 * asoc. This will only happen if we were in the COOKIE-WAIT state 2779 * and a INIT collided with us and somewhere the peer sent the 2780 * cookie on another address besides the single address our assoc 2781 * had for him. In this case we will have one of the tie-tags set at 2782 * least AND the address field in the cookie can be used to look it 2783 * up. 2784 */ 2785 to = NULL; 2786 switch (cookie->addr_type) { 2787 #ifdef INET6 2788 case SCTP_IPV6_ADDRESS: 2789 memset(&sin6, 0, sizeof(sin6)); 2790 sin6.sin6_family = AF_INET6; 2791 #ifdef HAVE_SIN6_LEN 2792 sin6.sin6_len = sizeof(sin6); 2793 #endif 2794 sin6.sin6_port = sh->src_port; 2795 sin6.sin6_scope_id = cookie->scope_id; 2796 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2797 sizeof(sin6.sin6_addr.s6_addr)); 2798 to = (struct sockaddr *)&sin6; 2799 break; 2800 #endif 2801 #ifdef INET 2802 case SCTP_IPV4_ADDRESS: 2803 memset(&sin, 0, sizeof(sin)); 2804 sin.sin_family = AF_INET; 2805 #ifdef HAVE_SIN_LEN 2806 sin.sin_len = sizeof(sin); 2807 #endif 2808 sin.sin_port = sh->src_port; 2809 sin.sin_addr.s_addr = cookie->address[0]; 2810 to = (struct sockaddr *)&sin; 2811 break; 2812 #endif 2813 #if defined(__Userspace__) 2814 case SCTP_CONN_ADDRESS: 2815 memset(&sconn, 0, sizeof(struct sockaddr_conn)); 2816 sconn.sconn_family = AF_CONN; 2817 #ifdef HAVE_SCONN_LEN 2818 sconn.sconn_len = sizeof(struct sockaddr_conn); 2819 #endif 2820 sconn.sconn_port = sh->src_port; 2821 memcpy(&sconn.sconn_addr, cookie->address, sizeof(void *)); 2822 to = (struct sockaddr *)&sconn; 2823 break; 2824 #endif 2825 default: 2826 /* This should not happen */ 2827 return (NULL); 2828 } 2829 if (*stcb == NULL) { 2830 /* Yep, lets check */ 2831 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL); 2832 if (*stcb == NULL) { 2833 /* 2834 * We should have only got back the same inp. If we 2835 * got back a different ep we have a problem. The 2836 * original findep got back l_inp and now 2837 */ 2838 if (l_inp != *inp_p) { 2839 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2840 } 2841 } else { 2842 if (*locked_tcb == NULL) { 2843 /* In this case we found the assoc only 2844 * after we locked the create lock. This means 2845 * we are in a colliding case and we must make 2846 * sure that we unlock the tcb if its one of the 2847 * cases where we throw away the incoming packets. 2848 */ 2849 *locked_tcb = *stcb; 2850 2851 /* We must also increment the inp ref count 2852 * since the ref_count flags was set when we 2853 * did not find the TCB, now we found it which 2854 * reduces the refcount.. we must raise it back 2855 * out to balance it all :-) 2856 */ 2857 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2858 if ((*stcb)->sctp_ep != l_inp) { 2859 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2860 (void *)(*stcb)->sctp_ep, (void *)l_inp); 2861 } 2862 } 2863 } 2864 } 2865 2866 cookie_len -= SCTP_SIGNATURE_SIZE; 2867 if (*stcb == NULL) { 2868 /* this is the "normal" case... get a new TCB */ 2869 *stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh, 2870 cookie, cookie_len, *inp_p, 2871 netp, to, ¬ification, 2872 auth_skipped, auth_offset, auth_len, 2873 #if defined(__FreeBSD__) && !defined(__Userspace__) 2874 mflowtype, mflowid, 2875 #endif 2876 vrf_id, port); 2877 } else { 2878 /* this is abnormal... cookie-echo on existing TCB */ 2879 had_a_existing_tcb = 1; 2880 *stcb = sctp_process_cookie_existing(m, iphlen, offset, 2881 src, dst, sh, 2882 cookie, cookie_len, *inp_p, *stcb, netp, to, 2883 ¬ification, auth_skipped, auth_offset, auth_len, 2884 #if defined(__FreeBSD__) && !defined(__Userspace__) 2885 mflowtype, mflowid, 2886 #endif 2887 vrf_id, port); 2888 if (*stcb == NULL) { 2889 *locked_tcb = NULL; 2890 } 2891 } 2892 2893 if (*stcb == NULL) { 2894 /* still no TCB... must be bad cookie-echo */ 2895 return (NULL); 2896 } 2897 #if defined(__FreeBSD__) && !defined(__Userspace__) 2898 if (*netp != NULL) { 2899 (*netp)->flowtype = mflowtype; 2900 (*netp)->flowid = mflowid; 2901 } 2902 #endif 2903 /* 2904 * Ok, we built an association so confirm the address we sent the 2905 * INIT-ACK to. 2906 */ 2907 netl = sctp_findnet(*stcb, to); 2908 /* 2909 * This code should in theory NOT run but 2910 */ 2911 if (netl == NULL) { 2912 /* TSNH! Huh, why do I need to add this address here? */ 2913 if (sctp_add_remote_addr(*stcb, to, NULL, port, 2914 SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) { 2915 return (NULL); 2916 } 2917 netl = sctp_findnet(*stcb, to); 2918 } 2919 if (netl) { 2920 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2921 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2922 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2923 netl); 2924 send_int_conf = 1; 2925 } 2926 } 2927 sctp_start_net_timers(*stcb); 2928 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2929 if (!had_a_existing_tcb || 2930 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2931 /* 2932 * If we have a NEW cookie or the connect never 2933 * reached the connected state during collision we 2934 * must do the TCP accept thing. 2935 */ 2936 struct socket *so, *oso; 2937 struct sctp_inpcb *inp; 2938 2939 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2940 /* 2941 * For a restart we will keep the same 2942 * socket, no need to do anything. I THINK!! 2943 */ 2944 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2945 if (send_int_conf) { 2946 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2947 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2948 } 2949 return (m); 2950 } 2951 oso = (*inp_p)->sctp_socket; 2952 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2953 SCTP_TCB_UNLOCK((*stcb)); 2954 #if defined(__FreeBSD__) && !defined(__Userspace__) 2955 CURVNET_SET(oso->so_vnet); 2956 #endif 2957 #if defined(__APPLE__) && !defined(__Userspace__) 2958 SCTP_SOCKET_LOCK(oso, 1); 2959 #endif 2960 so = sonewconn(oso, 0 2961 #if defined(__APPLE__) && !defined(__Userspace__) 2962 ,NULL 2963 #endif 2964 ); 2965 #if defined(__APPLE__) && !defined(__Userspace__) 2966 SCTP_SOCKET_UNLOCK(oso, 1); 2967 #endif 2968 #if defined(__FreeBSD__) && !defined(__Userspace__) 2969 CURVNET_RESTORE(); 2970 #endif 2971 SCTP_TCB_LOCK((*stcb)); 2972 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2973 2974 if (so == NULL) { 2975 struct mbuf *op_err; 2976 #if defined(__APPLE__) && !defined(__Userspace__) 2977 struct socket *pcb_so; 2978 #endif 2979 /* Too many sockets */ 2980 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2981 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 2982 sctp_abort_association(*inp_p, NULL, m, iphlen, 2983 src, dst, sh, op_err, 2984 #if defined(__FreeBSD__) && !defined(__Userspace__) 2985 mflowtype, mflowid, 2986 #endif 2987 vrf_id, port); 2988 #if defined(__APPLE__) && !defined(__Userspace__) 2989 pcb_so = SCTP_INP_SO(*inp_p); 2990 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2991 SCTP_TCB_UNLOCK((*stcb)); 2992 SCTP_SOCKET_LOCK(pcb_so, 1); 2993 SCTP_TCB_LOCK((*stcb)); 2994 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2995 #endif 2996 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, 2997 SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 2998 #if defined(__APPLE__) && !defined(__Userspace__) 2999 SCTP_SOCKET_UNLOCK(pcb_so, 1); 3000 #endif 3001 return (NULL); 3002 } 3003 inp = (struct sctp_inpcb *)so->so_pcb; 3004 SCTP_INP_INCR_REF(inp); 3005 /* 3006 * We add the unbound flag here so that 3007 * if we get an soabort() before we get the 3008 * move_pcb done, we will properly cleanup. 3009 */ 3010 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 3011 SCTP_PCB_FLAGS_CONNECTED | 3012 SCTP_PCB_FLAGS_IN_TCPPOOL | 3013 SCTP_PCB_FLAGS_UNBOUND | 3014 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 3015 SCTP_PCB_FLAGS_DONT_WAKE); 3016 inp->sctp_features = (*inp_p)->sctp_features; 3017 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; 3018 inp->sctp_socket = so; 3019 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 3020 inp->max_cwnd = (*inp_p)->max_cwnd; 3021 inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off; 3022 inp->ecn_supported = (*inp_p)->ecn_supported; 3023 inp->prsctp_supported = (*inp_p)->prsctp_supported; 3024 inp->auth_supported = (*inp_p)->auth_supported; 3025 inp->asconf_supported = (*inp_p)->asconf_supported; 3026 inp->reconfig_supported = (*inp_p)->reconfig_supported; 3027 inp->nrsack_supported = (*inp_p)->nrsack_supported; 3028 inp->pktdrop_supported = (*inp_p)->pktdrop_supported; 3029 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 3030 inp->sctp_context = (*inp_p)->sctp_context; 3031 inp->local_strreset_support = (*inp_p)->local_strreset_support; 3032 inp->fibnum = (*inp_p)->fibnum; 3033 #if defined(__Userspace__) 3034 inp->ulp_info = (*inp_p)->ulp_info; 3035 inp->recv_callback = (*inp_p)->recv_callback; 3036 inp->send_callback = (*inp_p)->send_callback; 3037 inp->send_sb_threshold = (*inp_p)->send_sb_threshold; 3038 #endif 3039 /* 3040 * copy in the authentication parameters from the 3041 * original endpoint 3042 */ 3043 if (inp->sctp_ep.local_hmacs) 3044 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 3045 inp->sctp_ep.local_hmacs = 3046 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 3047 if (inp->sctp_ep.local_auth_chunks) 3048 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 3049 inp->sctp_ep.local_auth_chunks = 3050 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 3051 3052 /* 3053 * Now we must move it from one hash table to 3054 * another and get the tcb in the right place. 3055 */ 3056 3057 /* This is where the one-2-one socket is put into 3058 * the accept state waiting for the accept! 3059 */ 3060 if (*stcb) { 3061 SCTP_ADD_SUBSTATE(*stcb, SCTP_STATE_IN_ACCEPT_QUEUE); 3062 } 3063 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 3064 3065 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 3066 SCTP_TCB_UNLOCK((*stcb)); 3067 3068 #if defined(__FreeBSD__) && !defined(__Userspace__) 3069 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, 3070 0); 3071 #else 3072 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT); 3073 #endif 3074 SCTP_TCB_LOCK((*stcb)); 3075 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 3076 3077 /* now we must check to see if we were aborted while 3078 * the move was going on and the lock/unlock happened. 3079 */ 3080 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3081 /* yep it was, we leave the 3082 * assoc attached to the socket since 3083 * the sctp_inpcb_free() call will send 3084 * an abort for us. 3085 */ 3086 SCTP_INP_DECR_REF(inp); 3087 return (NULL); 3088 } 3089 SCTP_INP_DECR_REF(inp); 3090 /* Switch over to the new guy */ 3091 *inp_p = inp; 3092 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 3093 if (send_int_conf) { 3094 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 3095 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 3096 } 3097 3098 /* Pull it from the incomplete queue and wake the guy */ 3099 #if defined(__APPLE__) && !defined(__Userspace__) 3100 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 3101 SCTP_TCB_UNLOCK((*stcb)); 3102 SCTP_SOCKET_LOCK(so, 1); 3103 #endif 3104 soisconnected(so); 3105 #if defined(__APPLE__) && !defined(__Userspace__) 3106 SCTP_TCB_LOCK((*stcb)); 3107 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 3108 SCTP_SOCKET_UNLOCK(so, 1); 3109 #endif 3110 return (m); 3111 } 3112 } 3113 if (notification) { 3114 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 3115 } 3116 if (send_int_conf) { 3117 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 3118 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 3119 } 3120 return (m); 3121 } 3122 3123 static void 3124 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED, 3125 struct sctp_tcb *stcb, struct sctp_nets *net) 3126 { 3127 /* cp must not be used, others call this without a c-ack :-) */ 3128 struct sctp_association *asoc; 3129 struct sctp_tmit_chunk *chk; 3130 3131 SCTPDBG(SCTP_DEBUG_INPUT2, 3132 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 3133 if ((stcb == NULL) || (net == NULL)) { 3134 return; 3135 } 3136 3137 asoc = &stcb->asoc; 3138 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 3139 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 3140 asoc->overall_error_count, 3141 0, 3142 SCTP_FROM_SCTP_INPUT, 3143 __LINE__); 3144 } 3145 sctp_stop_all_cookie_timers(stcb); 3146 sctp_toss_old_cookies(stcb, asoc); 3147 /* process according to association state */ 3148 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) { 3149 /* state change only needed when I am in right state */ 3150 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 3151 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN); 3152 sctp_start_net_timers(stcb); 3153 /* update RTO */ 3154 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 3155 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 3156 if (asoc->overall_error_count == 0) { 3157 sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, 3158 SCTP_RTT_FROM_NON_DATA); 3159 } 3160 /* 3161 * Since we did not send a HB make sure we don't double 3162 * things. 3163 */ 3164 asoc->overall_error_count = 0; 3165 net->hb_responded = 1; 3166 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 3167 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 3168 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3169 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3170 #if defined(__APPLE__) && !defined(__Userspace__) 3171 struct socket *so; 3172 3173 #endif 3174 sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED); 3175 #if defined(__APPLE__) && !defined(__Userspace__) 3176 so = SCTP_INP_SO(stcb->sctp_ep); 3177 atomic_add_int(&stcb->asoc.refcnt, 1); 3178 SCTP_TCB_UNLOCK(stcb); 3179 SCTP_SOCKET_LOCK(so, 1); 3180 SCTP_TCB_LOCK(stcb); 3181 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3182 #endif 3183 if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) { 3184 soisconnected(stcb->sctp_socket); 3185 } 3186 #if defined(__APPLE__) && !defined(__Userspace__) 3187 SCTP_SOCKET_UNLOCK(so, 1); 3188 #endif 3189 } 3190 3191 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 3192 TAILQ_EMPTY(&asoc->send_queue) && 3193 TAILQ_EMPTY(&asoc->sent_queue) && 3194 (asoc->stream_queue_cnt == 0)) { 3195 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3196 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 3197 sctp_stop_timers_for_shutdown(stcb); 3198 sctp_send_shutdown(stcb, net); 3199 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 3200 stcb->sctp_ep, stcb, net); 3201 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 3202 stcb->sctp_ep, stcb, NULL); 3203 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); 3204 } 3205 3206 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3207 /* We don't need to do the asconf thing, 3208 * nor hb or autoclose if the socket is closed. 3209 */ 3210 goto closed_socket; 3211 } 3212 3213 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 3214 stcb, net); 3215 3216 if (stcb->asoc.sctp_autoclose_ticks && 3217 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 3218 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 3219 stcb->sctp_ep, stcb, NULL); 3220 } 3221 /* 3222 * send ASCONF if parameters are pending and ASCONFs are 3223 * allowed (eg. addresses changed when init/cookie echo were 3224 * in flight) 3225 */ 3226 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 3227 (stcb->asoc.asconf_supported == 1) && 3228 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 3229 #ifdef SCTP_TIMER_BASED_ASCONF 3230 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 3231 stcb->sctp_ep, stcb, 3232 stcb->asoc.primary_destination); 3233 #else 3234 sctp_send_asconf(stcb, stcb->asoc.primary_destination, 3235 SCTP_ADDR_NOT_LOCKED); 3236 #endif 3237 } 3238 } 3239 closed_socket: 3240 /* Restart the timer if we have pending data */ 3241 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3242 if (chk->whoTo != NULL) { 3243 break; 3244 } 3245 } 3246 if (chk != NULL) { 3247 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 3248 } 3249 } 3250 3251 static void 3252 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 3253 struct sctp_tcb *stcb) 3254 { 3255 struct sctp_nets *net; 3256 struct sctp_tmit_chunk *lchk; 3257 struct sctp_ecne_chunk bkup; 3258 uint8_t override_bit; 3259 uint32_t tsn, window_data_tsn; 3260 int len; 3261 unsigned int pkt_cnt; 3262 3263 len = ntohs(cp->ch.chunk_length); 3264 if (len == sizeof(struct old_sctp_ecne_chunk)) { 3265 /* Its the old format */ 3266 memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk)); 3267 bkup.num_pkts_since_cwr = htonl(1); 3268 cp = &bkup; 3269 } 3270 SCTP_STAT_INCR(sctps_recvecne); 3271 tsn = ntohl(cp->tsn); 3272 pkt_cnt = ntohl(cp->num_pkts_since_cwr); 3273 lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead); 3274 if (lchk == NULL) { 3275 window_data_tsn = stcb->asoc.sending_seq - 1; 3276 } else { 3277 window_data_tsn = lchk->rec.data.tsn; 3278 } 3279 3280 /* Find where it was sent to if possible. */ 3281 net = NULL; 3282 TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) { 3283 if (lchk->rec.data.tsn == tsn) { 3284 net = lchk->whoTo; 3285 net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send; 3286 break; 3287 } 3288 if (SCTP_TSN_GT(lchk->rec.data.tsn, tsn)) { 3289 break; 3290 } 3291 } 3292 if (net == NULL) { 3293 /* 3294 * What to do. A previous send of a 3295 * CWR was possibly lost. See how old it is, we 3296 * may have it marked on the actual net. 3297 */ 3298 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3299 if (tsn == net->last_cwr_tsn) { 3300 /* Found him, send it off */ 3301 break; 3302 } 3303 } 3304 if (net == NULL) { 3305 /* 3306 * If we reach here, we need to send a special 3307 * CWR that says hey, we did this a long time 3308 * ago and you lost the response. 3309 */ 3310 net = TAILQ_FIRST(&stcb->asoc.nets); 3311 if (net == NULL) { 3312 /* TSNH */ 3313 return; 3314 } 3315 override_bit = SCTP_CWR_REDUCE_OVERRIDE; 3316 } else { 3317 override_bit = 0; 3318 } 3319 } else { 3320 override_bit = 0; 3321 } 3322 if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) && 3323 ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) { 3324 /* JRS - Use the congestion control given in the pluggable CC module */ 3325 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt); 3326 /* 3327 * We reduce once every RTT. So we will only lower cwnd at 3328 * the next sending seq i.e. the window_data_tsn 3329 */ 3330 net->cwr_window_tsn = window_data_tsn; 3331 net->ecn_ce_pkt_cnt += pkt_cnt; 3332 net->lost_cnt = pkt_cnt; 3333 net->last_cwr_tsn = tsn; 3334 } else { 3335 override_bit |= SCTP_CWR_IN_SAME_WINDOW; 3336 if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) && 3337 ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) { 3338 /* 3339 * Another loss in the same window update how 3340 * many marks/packets lost we have had. 3341 */ 3342 int cnt = 1; 3343 if (pkt_cnt > net->lost_cnt) { 3344 /* Should be the case */ 3345 cnt = (pkt_cnt - net->lost_cnt); 3346 net->ecn_ce_pkt_cnt += cnt; 3347 } 3348 net->lost_cnt = pkt_cnt; 3349 net->last_cwr_tsn = tsn; 3350 /* 3351 * Most CC functions will ignore this call, since we are in-window 3352 * yet of the initial CE the peer saw. 3353 */ 3354 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt); 3355 } 3356 } 3357 /* 3358 * We always send a CWR this way if our previous one was lost our 3359 * peer will get an update, or if it is not time again to reduce we 3360 * still get the cwr to the peer. Note we set the override when we 3361 * could not find the TSN on the chunk or the destination network. 3362 */ 3363 sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit); 3364 } 3365 3366 static void 3367 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net) 3368 { 3369 /* 3370 * Here we get a CWR from the peer. We must look in the outqueue and 3371 * make sure that we have a covered ECNE in the control chunk part. 3372 * If so remove it. 3373 */ 3374 struct sctp_tmit_chunk *chk, *nchk; 3375 struct sctp_ecne_chunk *ecne; 3376 int override; 3377 uint32_t cwr_tsn; 3378 3379 cwr_tsn = ntohl(cp->tsn); 3380 override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE; 3381 TAILQ_FOREACH_SAFE(chk, &stcb->asoc.control_send_queue, sctp_next, nchk) { 3382 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 3383 continue; 3384 } 3385 if ((override == 0) && (chk->whoTo != net)) { 3386 /* Must be from the right src unless override is set */ 3387 continue; 3388 } 3389 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 3390 if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) { 3391 /* this covers this ECNE, we can remove it */ 3392 stcb->asoc.ecn_echo_cnt_onq--; 3393 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 3394 sctp_next); 3395 stcb->asoc.ctrl_queue_cnt--; 3396 sctp_m_freem(chk->data); 3397 chk->data = NULL; 3398 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 3399 if (override == 0) { 3400 break; 3401 } 3402 } 3403 } 3404 } 3405 3406 static void 3407 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED, 3408 struct sctp_tcb *stcb, struct sctp_nets *net) 3409 { 3410 #if defined(__APPLE__) && !defined(__Userspace__) 3411 struct socket *so; 3412 #endif 3413 3414 SCTPDBG(SCTP_DEBUG_INPUT2, 3415 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 3416 if (stcb == NULL) 3417 return; 3418 3419 /* process according to association state */ 3420 if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 3421 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 3422 SCTPDBG(SCTP_DEBUG_INPUT2, 3423 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n"); 3424 SCTP_TCB_UNLOCK(stcb); 3425 return; 3426 } 3427 /* notify upper layer protocol */ 3428 if (stcb->sctp_socket) { 3429 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 3430 } 3431 #ifdef INVARIANTS 3432 if (!TAILQ_EMPTY(&stcb->asoc.send_queue) || 3433 !TAILQ_EMPTY(&stcb->asoc.sent_queue) || 3434 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) { 3435 panic("Queues are not empty when handling SHUTDOWN-COMPLETE"); 3436 } 3437 #endif 3438 /* stop the timer */ 3439 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, 3440 SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 3441 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 3442 /* free the TCB */ 3443 SCTPDBG(SCTP_DEBUG_INPUT2, 3444 "sctp_handle_shutdown_complete: calls free-asoc\n"); 3445 #if defined(__APPLE__) && !defined(__Userspace__) 3446 so = SCTP_INP_SO(stcb->sctp_ep); 3447 atomic_add_int(&stcb->asoc.refcnt, 1); 3448 SCTP_TCB_UNLOCK(stcb); 3449 SCTP_SOCKET_LOCK(so, 1); 3450 SCTP_TCB_LOCK(stcb); 3451 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3452 #endif 3453 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 3454 SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 3455 #if defined(__APPLE__) && !defined(__Userspace__) 3456 SCTP_SOCKET_UNLOCK(so, 1); 3457 #endif 3458 return; 3459 } 3460 3461 static int 3462 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 3463 struct sctp_nets *net, uint8_t flg) 3464 { 3465 switch (desc->chunk_type) { 3466 case SCTP_DATA: 3467 case SCTP_IDATA: 3468 /* find the tsn to resend (possibly) */ 3469 { 3470 uint32_t tsn; 3471 struct sctp_tmit_chunk *tp1; 3472 3473 tsn = ntohl(desc->tsn_ifany); 3474 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3475 if (tp1->rec.data.tsn == tsn) { 3476 /* found it */ 3477 break; 3478 } 3479 if (SCTP_TSN_GT(tp1->rec.data.tsn, tsn)) { 3480 /* not found */ 3481 tp1 = NULL; 3482 break; 3483 } 3484 } 3485 if (tp1 == NULL) { 3486 /* 3487 * Do it the other way , aka without paying 3488 * attention to queue seq order. 3489 */ 3490 SCTP_STAT_INCR(sctps_pdrpdnfnd); 3491 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3492 if (tp1->rec.data.tsn == tsn) { 3493 /* found it */ 3494 break; 3495 } 3496 } 3497 } 3498 if (tp1 == NULL) { 3499 SCTP_STAT_INCR(sctps_pdrptsnnf); 3500 } 3501 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 3502 if (((flg & SCTP_BADCRC) == 0) && 3503 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 3504 return (0); 3505 } 3506 if ((stcb->asoc.peers_rwnd == 0) && 3507 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 3508 SCTP_STAT_INCR(sctps_pdrpdiwnp); 3509 return (0); 3510 } 3511 if (stcb->asoc.peers_rwnd == 0 && 3512 (flg & SCTP_FROM_MIDDLE_BOX)) { 3513 SCTP_STAT_INCR(sctps_pdrpdizrw); 3514 return (0); 3515 } 3516 if ((uint32_t)SCTP_BUF_LEN(tp1->data) < 3517 SCTP_DATA_CHUNK_OVERHEAD(stcb) + SCTP_NUM_DB_TO_VERIFY) { 3518 /* Payload not matching. */ 3519 SCTP_STAT_INCR(sctps_pdrpbadd); 3520 return (-1); 3521 } 3522 if (memcmp(mtod(tp1->data, caddr_t) + SCTP_DATA_CHUNK_OVERHEAD(stcb), 3523 desc->data_bytes, SCTP_NUM_DB_TO_VERIFY) != 0) { 3524 /* Payload not matching. */ 3525 SCTP_STAT_INCR(sctps_pdrpbadd); 3526 return (-1); 3527 } 3528 if (tp1->do_rtt) { 3529 /* 3530 * this guy had a RTO calculation 3531 * pending on it, cancel it 3532 */ 3533 if (tp1->whoTo->rto_needed == 0) { 3534 tp1->whoTo->rto_needed = 1; 3535 } 3536 tp1->do_rtt = 0; 3537 } 3538 SCTP_STAT_INCR(sctps_pdrpmark); 3539 if (tp1->sent != SCTP_DATAGRAM_RESEND) 3540 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3541 /* 3542 * mark it as if we were doing a FR, since 3543 * we will be getting gap ack reports behind 3544 * the info from the router. 3545 */ 3546 tp1->rec.data.doing_fast_retransmit = 1; 3547 /* 3548 * mark the tsn with what sequences can 3549 * cause a new FR. 3550 */ 3551 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 3552 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 3553 } else { 3554 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn; 3555 } 3556 3557 /* restart the timer */ 3558 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3559 stcb, tp1->whoTo, 3560 SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3561 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3562 stcb, tp1->whoTo); 3563 3564 /* fix counts and things */ 3565 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3566 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 3567 tp1->whoTo->flight_size, 3568 tp1->book_size, 3569 (uint32_t)(uintptr_t)stcb, 3570 tp1->rec.data.tsn); 3571 } 3572 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3573 sctp_flight_size_decrease(tp1); 3574 sctp_total_flight_decrease(stcb, tp1); 3575 } 3576 tp1->sent = SCTP_DATAGRAM_RESEND; 3577 } { 3578 /* audit code */ 3579 unsigned int audit; 3580 3581 audit = 0; 3582 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3583 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3584 audit++; 3585 } 3586 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 3587 sctp_next) { 3588 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3589 audit++; 3590 } 3591 if (audit != stcb->asoc.sent_queue_retran_cnt) { 3592 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 3593 audit, stcb->asoc.sent_queue_retran_cnt); 3594 #ifndef SCTP_AUDITING_ENABLED 3595 stcb->asoc.sent_queue_retran_cnt = audit; 3596 #endif 3597 } 3598 } 3599 } 3600 break; 3601 case SCTP_ASCONF: 3602 { 3603 struct sctp_tmit_chunk *asconf; 3604 3605 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 3606 sctp_next) { 3607 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 3608 break; 3609 } 3610 } 3611 if (asconf) { 3612 if (asconf->sent != SCTP_DATAGRAM_RESEND) 3613 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3614 asconf->sent = SCTP_DATAGRAM_RESEND; 3615 asconf->snd_count--; 3616 } 3617 } 3618 break; 3619 case SCTP_INITIATION: 3620 /* resend the INIT */ 3621 stcb->asoc.dropped_special_cnt++; 3622 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 3623 /* 3624 * If we can get it in, in a few attempts we do 3625 * this, otherwise we let the timer fire. 3626 */ 3627 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 3628 stcb, net, 3629 SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 3630 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 3631 } 3632 break; 3633 case SCTP_SELECTIVE_ACK: 3634 case SCTP_NR_SELECTIVE_ACK: 3635 /* resend the sack */ 3636 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 3637 break; 3638 case SCTP_HEARTBEAT_REQUEST: 3639 /* resend a demand HB */ 3640 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) { 3641 /* Only retransmit if we KNOW we wont destroy the tcb */ 3642 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 3643 } 3644 break; 3645 case SCTP_SHUTDOWN: 3646 sctp_send_shutdown(stcb, net); 3647 break; 3648 case SCTP_SHUTDOWN_ACK: 3649 sctp_send_shutdown_ack(stcb, net); 3650 break; 3651 case SCTP_COOKIE_ECHO: 3652 { 3653 struct sctp_tmit_chunk *cookie; 3654 3655 cookie = NULL; 3656 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 3657 sctp_next) { 3658 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 3659 break; 3660 } 3661 } 3662 if (cookie) { 3663 if (cookie->sent != SCTP_DATAGRAM_RESEND) 3664 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3665 cookie->sent = SCTP_DATAGRAM_RESEND; 3666 sctp_stop_all_cookie_timers(stcb); 3667 } 3668 } 3669 break; 3670 case SCTP_COOKIE_ACK: 3671 sctp_send_cookie_ack(stcb); 3672 break; 3673 case SCTP_ASCONF_ACK: 3674 /* resend last asconf ack */ 3675 sctp_send_asconf_ack(stcb); 3676 break; 3677 case SCTP_IFORWARD_CUM_TSN: 3678 case SCTP_FORWARD_CUM_TSN: 3679 send_forward_tsn(stcb, &stcb->asoc); 3680 break; 3681 /* can't do anything with these */ 3682 case SCTP_PACKET_DROPPED: 3683 case SCTP_INITIATION_ACK: /* this should not happen */ 3684 case SCTP_HEARTBEAT_ACK: 3685 case SCTP_ABORT_ASSOCIATION: 3686 case SCTP_OPERATION_ERROR: 3687 case SCTP_SHUTDOWN_COMPLETE: 3688 case SCTP_ECN_ECHO: 3689 case SCTP_ECN_CWR: 3690 default: 3691 break; 3692 } 3693 return (0); 3694 } 3695 3696 void 3697 sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list) 3698 { 3699 uint32_t i; 3700 uint16_t temp; 3701 3702 /* 3703 * We set things to 0xffffffff since this is the last delivered sequence 3704 * and we will be sending in 0 after the reset. 3705 */ 3706 3707 if (number_entries) { 3708 for (i = 0; i < number_entries; i++) { 3709 temp = ntohs(list[i]); 3710 if (temp >= stcb->asoc.streamincnt) { 3711 continue; 3712 } 3713 stcb->asoc.strmin[temp].last_mid_delivered = 0xffffffff; 3714 } 3715 } else { 3716 list = NULL; 3717 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3718 stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff; 3719 } 3720 } 3721 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3722 } 3723 3724 static void 3725 sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list) 3726 { 3727 uint32_t i; 3728 uint16_t temp; 3729 3730 if (number_entries > 0) { 3731 for (i = 0; i < number_entries; i++) { 3732 temp = ntohs(list[i]); 3733 if (temp >= stcb->asoc.streamoutcnt) { 3734 /* no such stream */ 3735 continue; 3736 } 3737 stcb->asoc.strmout[temp].next_mid_ordered = 0; 3738 stcb->asoc.strmout[temp].next_mid_unordered = 0; 3739 } 3740 } else { 3741 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3742 stcb->asoc.strmout[i].next_mid_ordered = 0; 3743 stcb->asoc.strmout[i].next_mid_unordered = 0; 3744 } 3745 } 3746 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3747 } 3748 3749 static void 3750 sctp_reset_clear_pending(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list) 3751 { 3752 uint32_t i; 3753 uint16_t temp; 3754 3755 if (number_entries > 0) { 3756 for (i = 0; i < number_entries; i++) { 3757 temp = ntohs(list[i]); 3758 if (temp >= stcb->asoc.streamoutcnt) { 3759 /* no such stream */ 3760 continue; 3761 } 3762 stcb->asoc.strmout[temp].state = SCTP_STREAM_OPEN; 3763 } 3764 } else { 3765 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3766 stcb->asoc.strmout[i].state = SCTP_STREAM_OPEN; 3767 } 3768 } 3769 } 3770 3771 struct sctp_stream_reset_request * 3772 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 3773 { 3774 struct sctp_association *asoc; 3775 struct sctp_chunkhdr *ch; 3776 struct sctp_stream_reset_request *r; 3777 struct sctp_tmit_chunk *chk; 3778 int len, clen; 3779 3780 asoc = &stcb->asoc; 3781 chk = asoc->str_reset; 3782 if (TAILQ_EMPTY(&asoc->control_send_queue) || 3783 (chk == NULL)) { 3784 asoc->stream_reset_outstanding = 0; 3785 return (NULL); 3786 } 3787 if (chk->data == NULL) { 3788 return (NULL); 3789 } 3790 if (bchk != NULL) { 3791 /* he wants a copy of the chk pointer */ 3792 *bchk = chk; 3793 } 3794 clen = chk->send_size; 3795 ch = mtod(chk->data, struct sctp_chunkhdr *); 3796 r = (struct sctp_stream_reset_request *)(ch + 1); 3797 if (ntohl(r->request_seq) == seq) { 3798 /* found it */ 3799 return (r); 3800 } 3801 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 3802 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 3803 /* move to the next one, there can only be a max of two */ 3804 r = (struct sctp_stream_reset_request *)((caddr_t)r + len); 3805 if (ntohl(r->request_seq) == seq) { 3806 return (r); 3807 } 3808 } 3809 /* that seq is not here */ 3810 return (NULL); 3811 } 3812 3813 static void 3814 sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 3815 { 3816 struct sctp_association *asoc; 3817 struct sctp_tmit_chunk *chk; 3818 3819 asoc = &stcb->asoc; 3820 chk = asoc->str_reset; 3821 if (chk == NULL) { 3822 return; 3823 } 3824 asoc->str_reset = NULL; 3825 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, 3826 NULL, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28); 3827 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 3828 asoc->ctrl_queue_cnt--; 3829 if (chk->data) { 3830 sctp_m_freem(chk->data); 3831 chk->data = NULL; 3832 } 3833 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 3834 } 3835 3836 static int 3837 sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 3838 uint32_t seq, uint32_t action, 3839 struct sctp_stream_reset_response *respin) 3840 { 3841 uint16_t type; 3842 int lparam_len; 3843 struct sctp_association *asoc = &stcb->asoc; 3844 struct sctp_tmit_chunk *chk; 3845 struct sctp_stream_reset_request *req_param; 3846 struct sctp_stream_reset_out_request *req_out_param; 3847 struct sctp_stream_reset_in_request *req_in_param; 3848 uint32_t number_entries; 3849 3850 if (asoc->stream_reset_outstanding == 0) { 3851 /* duplicate */ 3852 return (0); 3853 } 3854 if (seq == stcb->asoc.str_reset_seq_out) { 3855 req_param = sctp_find_stream_reset(stcb, seq, &chk); 3856 if (req_param != NULL) { 3857 stcb->asoc.str_reset_seq_out++; 3858 type = ntohs(req_param->ph.param_type); 3859 lparam_len = ntohs(req_param->ph.param_length); 3860 if (type == SCTP_STR_RESET_OUT_REQUEST) { 3861 int no_clear = 0; 3862 3863 req_out_param = (struct sctp_stream_reset_out_request *)req_param; 3864 number_entries = (lparam_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 3865 asoc->stream_reset_out_is_outstanding = 0; 3866 if (asoc->stream_reset_outstanding) 3867 asoc->stream_reset_outstanding--; 3868 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3869 /* do it */ 3870 sctp_reset_out_streams(stcb, number_entries, req_out_param->list_of_streams); 3871 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3872 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED); 3873 } else if (action == SCTP_STREAM_RESET_RESULT_IN_PROGRESS) { 3874 /* Set it up so we don't stop retransmitting */ 3875 asoc->stream_reset_outstanding++; 3876 stcb->asoc.str_reset_seq_out--; 3877 asoc->stream_reset_out_is_outstanding = 1; 3878 no_clear = 1; 3879 } else { 3880 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED); 3881 } 3882 if (no_clear == 0) { 3883 sctp_reset_clear_pending(stcb, number_entries, req_out_param->list_of_streams); 3884 } 3885 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 3886 req_in_param = (struct sctp_stream_reset_in_request *)req_param; 3887 number_entries = (lparam_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 3888 if (asoc->stream_reset_outstanding) 3889 asoc->stream_reset_outstanding--; 3890 if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3891 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb, 3892 number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED); 3893 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) { 3894 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, 3895 number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED); 3896 } 3897 } else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) { 3898 /* Ok we now may have more streams */ 3899 int num_stream; 3900 3901 num_stream = stcb->asoc.strm_pending_add_size; 3902 if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) { 3903 /* TSNH */ 3904 num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt; 3905 } 3906 stcb->asoc.strm_pending_add_size = 0; 3907 if (asoc->stream_reset_outstanding) 3908 asoc->stream_reset_outstanding--; 3909 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3910 /* Put the new streams into effect */ 3911 int i; 3912 for (i = asoc->streamoutcnt; i < (asoc->streamoutcnt + num_stream); i++) { 3913 asoc->strmout[i].state = SCTP_STREAM_OPEN; 3914 } 3915 asoc->streamoutcnt += num_stream; 3916 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 3917 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3918 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD, stcb, SCTP_STREAM_CHANGE_DENIED, NULL, SCTP_SO_NOT_LOCKED); 3919 } else { 3920 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD, stcb, SCTP_STREAM_CHANGE_FAILED, NULL, SCTP_SO_NOT_LOCKED); 3921 } 3922 } else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) { 3923 if (asoc->stream_reset_outstanding) 3924 asoc->stream_reset_outstanding--; 3925 if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3926 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD, stcb, SCTP_STREAM_CHANGE_DENIED, NULL, SCTP_SO_NOT_LOCKED); 3927 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) { 3928 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD, stcb, SCTP_STREAM_CHANGE_DENIED, NULL, SCTP_SO_NOT_LOCKED); 3929 } 3930 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 3931 /** 3932 * a) Adopt the new in tsn. 3933 * b) reset the map 3934 * c) Adopt the new out-tsn 3935 */ 3936 struct sctp_stream_reset_response_tsn *resp; 3937 struct sctp_forward_tsn_chunk fwdtsn; 3938 int abort_flag = 0; 3939 if (respin == NULL) { 3940 /* huh ? */ 3941 return (0); 3942 } 3943 if (ntohs(respin->ph.param_length) < sizeof(struct sctp_stream_reset_response_tsn)) { 3944 return (0); 3945 } 3946 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3947 resp = (struct sctp_stream_reset_response_tsn *)respin; 3948 asoc->stream_reset_outstanding--; 3949 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3950 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3951 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 3952 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3953 if (abort_flag) { 3954 return (1); 3955 } 3956 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 3957 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3958 sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3959 } 3960 3961 stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3962 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 3963 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3964 3965 stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map; 3966 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 3967 3968 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 3969 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 3970 3971 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3972 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3973 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_TSN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 3974 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3975 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_TSN, stcb, SCTP_ASSOC_RESET_DENIED, NULL, SCTP_SO_NOT_LOCKED); 3976 } else { 3977 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_TSN, stcb, SCTP_ASSOC_RESET_FAILED, NULL, SCTP_SO_NOT_LOCKED); 3978 } 3979 } 3980 /* get rid of the request and get the request flags */ 3981 if (asoc->stream_reset_outstanding == 0) { 3982 sctp_clean_up_stream_reset(stcb); 3983 } 3984 } 3985 } 3986 if (asoc->stream_reset_outstanding == 0) { 3987 sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED); 3988 } 3989 return (0); 3990 } 3991 3992 static void 3993 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 3994 struct sctp_tmit_chunk *chk, 3995 struct sctp_stream_reset_in_request *req, int trunc) 3996 { 3997 uint32_t seq; 3998 int len, i; 3999 int number_entries; 4000 uint16_t temp; 4001 4002 /* 4003 * peer wants me to send a str-reset to him for my outgoing seq's if 4004 * seq_in is right. 4005 */ 4006 struct sctp_association *asoc = &stcb->asoc; 4007 4008 seq = ntohl(req->request_seq); 4009 if (asoc->str_reset_seq_in == seq) { 4010 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 4011 if ((asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ) == 0) { 4012 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4013 } else if (trunc) { 4014 /* Can't do it, since they exceeded our buffer size */ 4015 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4016 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 4017 len = ntohs(req->ph.param_length); 4018 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 4019 if (number_entries) { 4020 for (i = 0; i < number_entries; i++) { 4021 temp = ntohs(req->list_of_streams[i]); 4022 if (temp >= stcb->asoc.streamoutcnt) { 4023 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4024 goto bad_boy; 4025 } 4026 req->list_of_streams[i] = temp; 4027 } 4028 for (i = 0; i < number_entries; i++) { 4029 if (stcb->asoc.strmout[req->list_of_streams[i]].state == SCTP_STREAM_OPEN) { 4030 stcb->asoc.strmout[req->list_of_streams[i]].state = SCTP_STREAM_RESET_PENDING; 4031 } 4032 } 4033 } else { 4034 /* Its all */ 4035 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 4036 if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN) 4037 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING; 4038 } 4039 } 4040 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 4041 } else { 4042 /* Can't do it, since we have sent one out */ 4043 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS; 4044 } 4045 bad_boy: 4046 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4047 asoc->str_reset_seq_in++; 4048 } else if (asoc->str_reset_seq_in - 1 == seq) { 4049 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4050 } else if (asoc->str_reset_seq_in - 2 == seq) { 4051 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 4052 } else { 4053 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 4054 } 4055 sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED); 4056 } 4057 4058 static int 4059 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 4060 struct sctp_tmit_chunk *chk, 4061 struct sctp_stream_reset_tsn_request *req) 4062 { 4063 /* reset all in and out and update the tsn */ 4064 /* 4065 * A) reset my str-seq's on in and out. B) Select a receive next, 4066 * and set cum-ack to it. Also process this selected number as a 4067 * fwd-tsn as well. C) set in the response my next sending seq. 4068 */ 4069 struct sctp_forward_tsn_chunk fwdtsn; 4070 struct sctp_association *asoc = &stcb->asoc; 4071 int abort_flag = 0; 4072 uint32_t seq; 4073 4074 seq = ntohl(req->request_seq); 4075 if (asoc->str_reset_seq_in == seq) { 4076 asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0]; 4077 if ((asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ) == 0) { 4078 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4079 } else { 4080 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 4081 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 4082 fwdtsn.ch.chunk_flags = 0; 4083 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 4084 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 4085 if (abort_flag) { 4086 return (1); 4087 } 4088 asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 4089 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 4090 sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 4091 } 4092 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 4093 asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1; 4094 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 4095 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map; 4096 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 4097 atomic_add_int(&asoc->sending_seq, 1); 4098 /* save off historical data for retrans */ 4099 asoc->last_sending_seq[1] = asoc->last_sending_seq[0]; 4100 asoc->last_sending_seq[0] = asoc->sending_seq; 4101 asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0]; 4102 asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn; 4103 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 4104 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 4105 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 4106 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_TSN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 4107 } 4108 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 4109 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]); 4110 asoc->str_reset_seq_in++; 4111 } else if (asoc->str_reset_seq_in - 1 == seq) { 4112 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 4113 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]); 4114 } else if (asoc->str_reset_seq_in - 2 == seq) { 4115 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 4116 asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]); 4117 } else { 4118 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 4119 } 4120 return (0); 4121 } 4122 4123 static void 4124 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 4125 struct sctp_tmit_chunk *chk, 4126 struct sctp_stream_reset_out_request *req, int trunc) 4127 { 4128 uint32_t seq, tsn; 4129 int number_entries, len; 4130 struct sctp_association *asoc = &stcb->asoc; 4131 4132 seq = ntohl(req->request_seq); 4133 4134 /* now if its not a duplicate we process it */ 4135 if (asoc->str_reset_seq_in == seq) { 4136 len = ntohs(req->ph.param_length); 4137 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 4138 /* 4139 * the sender is resetting, handle the list issue.. we must 4140 * a) verify if we can do the reset, if so no problem b) If 4141 * we can't do the reset we must copy the request. c) queue 4142 * it, and setup the data in processor to trigger it off 4143 * when needed and dequeue all the queued data. 4144 */ 4145 tsn = ntohl(req->send_reset_at_tsn); 4146 4147 /* move the reset action back one */ 4148 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 4149 if ((asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ) == 0) { 4150 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4151 } else if (trunc) { 4152 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4153 } else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 4154 /* we can do it now */ 4155 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 4156 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 4157 } else { 4158 /* 4159 * we must queue it up and thus wait for the TSN's 4160 * to arrive that are at or before tsn 4161 */ 4162 struct sctp_stream_reset_list *liste; 4163 int siz; 4164 4165 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 4166 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 4167 siz, SCTP_M_STRESET); 4168 if (liste == NULL) { 4169 /* gak out of memory */ 4170 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4171 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4172 return; 4173 } 4174 liste->seq = seq; 4175 liste->tsn = tsn; 4176 liste->number_entries = number_entries; 4177 memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t)); 4178 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 4179 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_IN_PROGRESS; 4180 } 4181 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4182 asoc->str_reset_seq_in++; 4183 } else if ((asoc->str_reset_seq_in - 1) == seq) { 4184 /* 4185 * one seq back, just echo back last action since my 4186 * response was lost. 4187 */ 4188 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4189 } else if ((asoc->str_reset_seq_in - 2) == seq) { 4190 /* 4191 * two seq back, just echo back last action since my 4192 * response was lost. 4193 */ 4194 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 4195 } else { 4196 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 4197 } 4198 } 4199 4200 static void 4201 sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, 4202 struct sctp_stream_reset_add_strm *str_add) 4203 { 4204 /* 4205 * Peer is requesting to add more streams. 4206 * If its within our max-streams we will 4207 * allow it. 4208 */ 4209 uint32_t num_stream, i; 4210 uint32_t seq; 4211 struct sctp_association *asoc = &stcb->asoc; 4212 struct sctp_queued_to_read *ctl, *nctl; 4213 4214 /* Get the number. */ 4215 seq = ntohl(str_add->request_seq); 4216 num_stream = ntohs(str_add->number_of_streams); 4217 /* Now what would be the new total? */ 4218 if (asoc->str_reset_seq_in == seq) { 4219 num_stream += stcb->asoc.streamincnt; 4220 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 4221 if ((asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ) == 0) { 4222 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4223 } else if ((num_stream > stcb->asoc.max_inbound_streams) || 4224 (num_stream > 0xffff)) { 4225 /* We must reject it they ask for to many */ 4226 denied: 4227 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4228 } else { 4229 /* Ok, we can do that :-) */ 4230 struct sctp_stream_in *oldstrm; 4231 4232 /* save off the old */ 4233 oldstrm = stcb->asoc.strmin; 4234 SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *, 4235 (num_stream * sizeof(struct sctp_stream_in)), 4236 SCTP_M_STRMI); 4237 if (stcb->asoc.strmin == NULL) { 4238 stcb->asoc.strmin = oldstrm; 4239 goto denied; 4240 } 4241 /* copy off the old data */ 4242 for (i = 0; i < stcb->asoc.streamincnt; i++) { 4243 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 4244 TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue); 4245 stcb->asoc.strmin[i].sid = i; 4246 stcb->asoc.strmin[i].last_mid_delivered = oldstrm[i].last_mid_delivered; 4247 stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started; 4248 stcb->asoc.strmin[i].pd_api_started = oldstrm[i].pd_api_started; 4249 /* now anything on those queues? */ 4250 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next_instrm, nctl) { 4251 TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next_instrm); 4252 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next_instrm); 4253 } 4254 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].uno_inqueue, next_instrm, nctl) { 4255 TAILQ_REMOVE(&oldstrm[i].uno_inqueue, ctl, next_instrm); 4256 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].uno_inqueue, ctl, next_instrm); 4257 } 4258 } 4259 /* Init the new streams */ 4260 for (i = stcb->asoc.streamincnt; i < num_stream; i++) { 4261 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 4262 TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue); 4263 stcb->asoc.strmin[i].sid = i; 4264 stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff; 4265 stcb->asoc.strmin[i].pd_api_started = 0; 4266 stcb->asoc.strmin[i].delivery_started = 0; 4267 } 4268 SCTP_FREE(oldstrm, SCTP_M_STRMI); 4269 /* update the size */ 4270 stcb->asoc.streamincnt = num_stream; 4271 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 4272 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 4273 } 4274 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4275 asoc->str_reset_seq_in++; 4276 } else if ((asoc->str_reset_seq_in - 1) == seq) { 4277 /* 4278 * one seq back, just echo back last action since my 4279 * response was lost. 4280 */ 4281 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4282 } else if ((asoc->str_reset_seq_in - 2) == seq) { 4283 /* 4284 * two seq back, just echo back last action since my 4285 * response was lost. 4286 */ 4287 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 4288 } else { 4289 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 4290 } 4291 } 4292 4293 static void 4294 sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, 4295 struct sctp_stream_reset_add_strm *str_add) 4296 { 4297 /* 4298 * Peer is requesting to add more streams. 4299 * If its within our max-streams we will 4300 * allow it. 4301 */ 4302 uint16_t num_stream; 4303 uint32_t seq; 4304 struct sctp_association *asoc = &stcb->asoc; 4305 4306 /* Get the number. */ 4307 seq = ntohl(str_add->request_seq); 4308 num_stream = ntohs(str_add->number_of_streams); 4309 /* Now what would be the new total? */ 4310 if (asoc->str_reset_seq_in == seq) { 4311 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 4312 if ((asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ) == 0) { 4313 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4314 } else if (stcb->asoc.stream_reset_outstanding) { 4315 /* We must reject it we have something pending */ 4316 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS; 4317 } else { 4318 /* Ok, we can do that :-) */ 4319 int mychk; 4320 mychk = stcb->asoc.streamoutcnt; 4321 mychk += num_stream; 4322 if (mychk < 0x10000) { 4323 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 4324 if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 1, num_stream, 0, 1)) { 4325 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4326 } 4327 } else { 4328 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4329 } 4330 } 4331 sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]); 4332 asoc->str_reset_seq_in++; 4333 } else if ((asoc->str_reset_seq_in - 1) == seq) { 4334 /* 4335 * one seq back, just echo back last action since my 4336 * response was lost. 4337 */ 4338 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4339 } else if ((asoc->str_reset_seq_in - 2) == seq) { 4340 /* 4341 * two seq back, just echo back last action since my 4342 * response was lost. 4343 */ 4344 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 4345 } else { 4346 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 4347 } 4348 } 4349 4350 #ifdef __GNUC__ 4351 __attribute__ ((noinline)) 4352 #endif 4353 static int 4354 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 4355 struct sctp_chunkhdr *ch_req) 4356 { 4357 uint16_t remaining_length, param_len, ptype; 4358 struct sctp_paramhdr pstore; 4359 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 4360 uint32_t seq = 0; 4361 int num_req = 0; 4362 int trunc = 0; 4363 struct sctp_tmit_chunk *chk; 4364 struct sctp_chunkhdr *ch; 4365 struct sctp_paramhdr *ph; 4366 int ret_code = 0; 4367 int num_param = 0; 4368 4369 /* now it may be a reset or a reset-response */ 4370 remaining_length = ntohs(ch_req->chunk_length) - sizeof(struct sctp_chunkhdr); 4371 4372 /* setup for adding the response */ 4373 sctp_alloc_a_chunk(stcb, chk); 4374 if (chk == NULL) { 4375 return (ret_code); 4376 } 4377 chk->copy_by_ref = 0; 4378 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 4379 chk->rec.chunk_id.can_take_data = 0; 4380 chk->flags = 0; 4381 chk->asoc = &stcb->asoc; 4382 chk->no_fr_allowed = 0; 4383 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 4384 chk->book_size_scale = 0; 4385 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 4386 if (chk->data == NULL) { 4387 strres_nochunk: 4388 if (chk->data) { 4389 sctp_m_freem(chk->data); 4390 chk->data = NULL; 4391 } 4392 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 4393 return (ret_code); 4394 } 4395 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 4396 4397 /* setup chunk parameters */ 4398 chk->sent = SCTP_DATAGRAM_UNSENT; 4399 chk->snd_count = 0; 4400 chk->whoTo = NULL; 4401 4402 ch = mtod(chk->data, struct sctp_chunkhdr *); 4403 ch->chunk_type = SCTP_STREAM_RESET; 4404 ch->chunk_flags = 0; 4405 ch->chunk_length = htons(chk->send_size); 4406 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 4407 offset += sizeof(struct sctp_chunkhdr); 4408 while (remaining_length >= sizeof(struct sctp_paramhdr)) { 4409 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *)&pstore); 4410 if (ph == NULL) { 4411 /* TSNH */ 4412 break; 4413 } 4414 param_len = ntohs(ph->param_length); 4415 if ((param_len > remaining_length) || 4416 (param_len < (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)))) { 4417 /* bad parameter length */ 4418 break; 4419 } 4420 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, sizeof(cstore)), 4421 (uint8_t *)&cstore); 4422 if (ph == NULL) { 4423 /* TSNH */ 4424 break; 4425 } 4426 ptype = ntohs(ph->param_type); 4427 num_param++; 4428 if (param_len > sizeof(cstore)) { 4429 trunc = 1; 4430 } else { 4431 trunc = 0; 4432 } 4433 if (num_param > SCTP_MAX_RESET_PARAMS) { 4434 /* hit the max of parameters already sorry.. */ 4435 break; 4436 } 4437 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 4438 struct sctp_stream_reset_out_request *req_out; 4439 4440 if (param_len < sizeof(struct sctp_stream_reset_out_request)) { 4441 break; 4442 } 4443 req_out = (struct sctp_stream_reset_out_request *)ph; 4444 num_req++; 4445 if (stcb->asoc.stream_reset_outstanding) { 4446 seq = ntohl(req_out->response_seq); 4447 if (seq == stcb->asoc.str_reset_seq_out) { 4448 /* implicit ack */ 4449 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL); 4450 } 4451 } 4452 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 4453 } else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) { 4454 struct sctp_stream_reset_add_strm *str_add; 4455 4456 if (param_len < sizeof(struct sctp_stream_reset_add_strm)) { 4457 break; 4458 } 4459 str_add = (struct sctp_stream_reset_add_strm *)ph; 4460 num_req++; 4461 sctp_handle_str_reset_add_strm(stcb, chk, str_add); 4462 } else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) { 4463 struct sctp_stream_reset_add_strm *str_add; 4464 4465 if (param_len < sizeof(struct sctp_stream_reset_add_strm)) { 4466 break; 4467 } 4468 str_add = (struct sctp_stream_reset_add_strm *)ph; 4469 num_req++; 4470 sctp_handle_str_reset_add_out_strm(stcb, chk, str_add); 4471 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 4472 struct sctp_stream_reset_in_request *req_in; 4473 4474 num_req++; 4475 req_in = (struct sctp_stream_reset_in_request *)ph; 4476 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 4477 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 4478 struct sctp_stream_reset_tsn_request *req_tsn; 4479 4480 num_req++; 4481 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 4482 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 4483 ret_code = 1; 4484 goto strres_nochunk; 4485 } 4486 /* no more */ 4487 break; 4488 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 4489 struct sctp_stream_reset_response *resp; 4490 uint32_t result; 4491 4492 if (param_len < sizeof(struct sctp_stream_reset_response)) { 4493 break; 4494 } 4495 resp = (struct sctp_stream_reset_response *)ph; 4496 seq = ntohl(resp->response_seq); 4497 result = ntohl(resp->result); 4498 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 4499 ret_code = 1; 4500 goto strres_nochunk; 4501 } 4502 } else { 4503 break; 4504 } 4505 offset += SCTP_SIZE32(param_len); 4506 if (remaining_length >= SCTP_SIZE32(param_len)) { 4507 remaining_length -= SCTP_SIZE32(param_len); 4508 } else { 4509 remaining_length = 0; 4510 } 4511 } 4512 if (num_req == 0) { 4513 /* we have no response free the stuff */ 4514 goto strres_nochunk; 4515 } 4516 /* ok we have a chunk to link in */ 4517 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 4518 chk, 4519 sctp_next); 4520 stcb->asoc.ctrl_queue_cnt++; 4521 return (ret_code); 4522 } 4523 4524 /* 4525 * Handle a router or endpoints report of a packet loss, there are two ways 4526 * to handle this, either we get the whole packet and must disect it 4527 * ourselves (possibly with truncation and or corruption) or it is a summary 4528 * from a middle box that did the disecting for us. 4529 */ 4530 static void 4531 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 4532 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 4533 { 4534 struct sctp_chunk_desc desc; 4535 struct sctp_chunkhdr *chk_hdr; 4536 struct sctp_data_chunk *data_chunk; 4537 struct sctp_idata_chunk *idata_chunk; 4538 uint32_t bottle_bw, on_queue; 4539 uint32_t offset, chk_len; 4540 uint16_t pktdrp_len; 4541 uint8_t pktdrp_flags; 4542 4543 KASSERT(sizeof(struct sctp_pktdrop_chunk) <= limit, 4544 ("PKTDROP chunk too small")); 4545 pktdrp_flags = cp->ch.chunk_flags; 4546 pktdrp_len = ntohs(cp->ch.chunk_length); 4547 KASSERT(limit <= pktdrp_len, ("Inconsistent limit")); 4548 if (pktdrp_flags & SCTP_PACKET_TRUNCATED) { 4549 if (ntohs(cp->trunc_len) <= pktdrp_len - sizeof(struct sctp_pktdrop_chunk)) { 4550 /* The peer plays games with us. */ 4551 return; 4552 } 4553 } 4554 limit -= sizeof(struct sctp_pktdrop_chunk); 4555 offset = 0; 4556 if (offset == limit) { 4557 if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) { 4558 SCTP_STAT_INCR(sctps_pdrpbwrpt); 4559 } 4560 } else if (offset + sizeof(struct sctphdr) > limit) { 4561 /* Only a partial SCTP common header. */ 4562 SCTP_STAT_INCR(sctps_pdrpcrupt); 4563 offset = limit; 4564 } else { 4565 /* XXX: Check embedded SCTP common header. */ 4566 offset += sizeof(struct sctphdr); 4567 } 4568 /* Now parse through the chunks themselves. */ 4569 while (offset < limit) { 4570 if (offset + sizeof(struct sctp_chunkhdr) > limit) { 4571 SCTP_STAT_INCR(sctps_pdrpcrupt); 4572 break; 4573 } 4574 chk_hdr = (struct sctp_chunkhdr *)(cp->data + offset); 4575 desc.chunk_type = chk_hdr->chunk_type; 4576 /* get amount we need to move */ 4577 chk_len = (uint32_t)ntohs(chk_hdr->chunk_length); 4578 if (chk_len < sizeof(struct sctp_chunkhdr)) { 4579 /* Someone is lying... */ 4580 break; 4581 } 4582 if (desc.chunk_type == SCTP_DATA) { 4583 if (stcb->asoc.idata_supported) { 4584 /* Some is playing games with us. */ 4585 break; 4586 } 4587 if (chk_len <= sizeof(struct sctp_data_chunk)) { 4588 /* Some is playing games with us. */ 4589 break; 4590 } 4591 if (chk_len < sizeof(struct sctp_data_chunk) + SCTP_NUM_DB_TO_VERIFY) { 4592 /* Not enough data bytes available in the chunk. */ 4593 SCTP_STAT_INCR(sctps_pdrpnedat); 4594 goto next_chunk; 4595 } 4596 if (offset + sizeof(struct sctp_data_chunk) + SCTP_NUM_DB_TO_VERIFY > limit) { 4597 /* Not enough data in buffer. */ 4598 break; 4599 } 4600 data_chunk = (struct sctp_data_chunk *)(cp->data + offset); 4601 memcpy(desc.data_bytes, data_chunk + 1, SCTP_NUM_DB_TO_VERIFY); 4602 desc.tsn_ifany = data_chunk->dp.tsn; 4603 if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) { 4604 SCTP_STAT_INCR(sctps_pdrpmbda); 4605 } 4606 } else if (desc.chunk_type == SCTP_IDATA) { 4607 if (!stcb->asoc.idata_supported) { 4608 /* Some is playing games with us. */ 4609 break; 4610 } 4611 if (chk_len <= sizeof(struct sctp_idata_chunk)) { 4612 /* Some is playing games with us. */ 4613 break; 4614 } 4615 if (chk_len < sizeof(struct sctp_idata_chunk) + SCTP_NUM_DB_TO_VERIFY) { 4616 /* Not enough data bytes available in the chunk. */ 4617 SCTP_STAT_INCR(sctps_pdrpnedat); 4618 goto next_chunk; 4619 } 4620 if (offset + sizeof(struct sctp_idata_chunk) + SCTP_NUM_DB_TO_VERIFY > limit) { 4621 /* Not enough data in buffer. */ 4622 break; 4623 } 4624 idata_chunk = (struct sctp_idata_chunk *)(cp->data + offset); 4625 memcpy(desc.data_bytes, idata_chunk + 1, SCTP_NUM_DB_TO_VERIFY); 4626 desc.tsn_ifany = idata_chunk->dp.tsn; 4627 if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) { 4628 SCTP_STAT_INCR(sctps_pdrpmbda); 4629 } 4630 } else { 4631 desc.tsn_ifany = htonl(0); 4632 memset(desc.data_bytes, 0, SCTP_NUM_DB_TO_VERIFY); 4633 if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) { 4634 SCTP_STAT_INCR(sctps_pdrpmbct); 4635 } 4636 } 4637 if (process_chunk_drop(stcb, &desc, net, pktdrp_flags)) { 4638 SCTP_STAT_INCR(sctps_pdrppdbrk); 4639 break; 4640 } 4641 next_chunk: 4642 offset += SCTP_SIZE32(chk_len); 4643 } 4644 /* Now update any rwnd --- possibly */ 4645 if ((pktdrp_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 4646 /* From a peer, we get a rwnd report */ 4647 uint32_t a_rwnd; 4648 4649 SCTP_STAT_INCR(sctps_pdrpfehos); 4650 4651 bottle_bw = ntohl(cp->bottle_bw); 4652 on_queue = ntohl(cp->current_onq); 4653 if (bottle_bw && on_queue) { 4654 /* a rwnd report is in here */ 4655 if (bottle_bw > on_queue) 4656 a_rwnd = bottle_bw - on_queue; 4657 else 4658 a_rwnd = 0; 4659 4660 if (a_rwnd == 0) 4661 stcb->asoc.peers_rwnd = 0; 4662 else { 4663 if (a_rwnd > stcb->asoc.total_flight) { 4664 stcb->asoc.peers_rwnd = 4665 a_rwnd - stcb->asoc.total_flight; 4666 } else { 4667 stcb->asoc.peers_rwnd = 0; 4668 } 4669 if (stcb->asoc.peers_rwnd < 4670 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4671 /* SWS sender side engages */ 4672 stcb->asoc.peers_rwnd = 0; 4673 } 4674 } 4675 } 4676 } else { 4677 SCTP_STAT_INCR(sctps_pdrpfmbox); 4678 } 4679 4680 /* now middle boxes in sat networks get a cwnd bump */ 4681 if ((pktdrp_flags & SCTP_FROM_MIDDLE_BOX) && 4682 (stcb->asoc.sat_t3_loss_recovery == 0) && 4683 (stcb->asoc.sat_network)) { 4684 /* 4685 * This is debatable but for sat networks it makes sense 4686 * Note if a T3 timer has went off, we will prohibit any 4687 * changes to cwnd until we exit the t3 loss recovery. 4688 */ 4689 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb, 4690 net, cp, &bottle_bw, &on_queue); 4691 } 4692 } 4693 4694 /* 4695 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 4696 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 4697 * offset: offset into the mbuf chain to first chunkhdr - length: is the 4698 * length of the complete packet outputs: - length: modified to remaining 4699 * length after control processing - netp: modified to new sctp_nets after 4700 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 4701 * bad packet,...) otherwise return the tcb for this packet 4702 */ 4703 #ifdef __GNUC__ 4704 __attribute__ ((noinline)) 4705 #endif 4706 static struct sctp_tcb * 4707 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 4708 struct sockaddr *src, struct sockaddr *dst, 4709 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 4710 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 4711 #if defined(__FreeBSD__) && !defined(__Userspace__) 4712 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4713 #endif 4714 uint32_t vrf_id, uint16_t port) 4715 { 4716 struct sctp_association *asoc; 4717 struct mbuf *op_err; 4718 char msg[SCTP_DIAG_INFO_LEN]; 4719 uint32_t vtag_in; 4720 int num_chunks = 0; /* number of control chunks processed */ 4721 uint32_t chk_length, contiguous; 4722 int ret; 4723 int abort_no_unlock = 0; 4724 int ecne_seen = 0; 4725 int abort_flag; 4726 /* 4727 * How big should this be, and should it be alloc'd? Lets try the 4728 * d-mtu-ceiling for now (2k) and that should hopefully work ... 4729 * until we get into jumbo grams and such.. 4730 */ 4731 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 4732 int got_auth = 0; 4733 uint32_t auth_offset = 0, auth_len = 0; 4734 int auth_skipped = 0; 4735 int asconf_cnt = 0; 4736 #if defined(__APPLE__) && !defined(__Userspace__) 4737 struct socket *so; 4738 #endif 4739 4740 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 4741 iphlen, *offset, length, (void *)stcb); 4742 4743 if (stcb) { 4744 SCTP_TCB_LOCK_ASSERT(stcb); 4745 } 4746 /* validate chunk header length... */ 4747 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 4748 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 4749 ntohs(ch->chunk_length)); 4750 *offset = length; 4751 return (stcb); 4752 } 4753 /* 4754 * validate the verification tag 4755 */ 4756 vtag_in = ntohl(sh->v_tag); 4757 4758 if (ch->chunk_type == SCTP_INITIATION) { 4759 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 4760 ntohs(ch->chunk_length), vtag_in); 4761 if (vtag_in != 0) { 4762 /* protocol error- silently discard... */ 4763 SCTP_STAT_INCR(sctps_badvtag); 4764 if (stcb != NULL) { 4765 SCTP_TCB_UNLOCK(stcb); 4766 } 4767 return (NULL); 4768 } 4769 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 4770 /* 4771 * If there is no stcb, skip the AUTH chunk and process 4772 * later after a stcb is found (to validate the lookup was 4773 * valid. 4774 */ 4775 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 4776 (stcb == NULL) && 4777 (inp->auth_supported == 1)) { 4778 /* save this chunk for later processing */ 4779 auth_skipped = 1; 4780 auth_offset = *offset; 4781 auth_len = ntohs(ch->chunk_length); 4782 4783 /* (temporarily) move past this chunk */ 4784 *offset += SCTP_SIZE32(auth_len); 4785 if (*offset >= length) { 4786 /* no more data left in the mbuf chain */ 4787 *offset = length; 4788 return (NULL); 4789 } 4790 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4791 sizeof(struct sctp_chunkhdr), chunk_buf); 4792 } 4793 if (ch == NULL) { 4794 /* Help */ 4795 *offset = length; 4796 return (stcb); 4797 } 4798 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 4799 goto process_control_chunks; 4800 } 4801 /* 4802 * first check if it's an ASCONF with an unknown src addr we 4803 * need to look inside to find the association 4804 */ 4805 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 4806 struct sctp_chunkhdr *asconf_ch = ch; 4807 uint32_t asconf_offset = 0, asconf_len = 0; 4808 4809 /* inp's refcount may be reduced */ 4810 SCTP_INP_INCR_REF(inp); 4811 4812 asconf_offset = *offset; 4813 do { 4814 asconf_len = ntohs(asconf_ch->chunk_length); 4815 if (asconf_len < sizeof(struct sctp_asconf_paramhdr)) 4816 break; 4817 stcb = sctp_findassociation_ep_asconf(m, 4818 *offset, 4819 dst, 4820 sh, &inp, netp, vrf_id); 4821 if (stcb != NULL) 4822 break; 4823 asconf_offset += SCTP_SIZE32(asconf_len); 4824 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset, 4825 sizeof(struct sctp_chunkhdr), chunk_buf); 4826 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF); 4827 if (stcb == NULL) { 4828 /* 4829 * reduce inp's refcount if not reduced in 4830 * sctp_findassociation_ep_asconf(). 4831 */ 4832 SCTP_INP_DECR_REF(inp); 4833 } 4834 4835 /* now go back and verify any auth chunk to be sure */ 4836 if (auth_skipped && (stcb != NULL)) { 4837 struct sctp_auth_chunk *auth; 4838 4839 if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) { 4840 auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, chunk_buf); 4841 got_auth = 1; 4842 auth_skipped = 0; 4843 } else { 4844 auth = NULL; 4845 } 4846 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 4847 auth_offset)) { 4848 /* auth HMAC failed so dump it */ 4849 *offset = length; 4850 return (stcb); 4851 } else { 4852 /* remaining chunks are HMAC checked */ 4853 stcb->asoc.authenticated = 1; 4854 } 4855 } 4856 } 4857 if (stcb == NULL) { 4858 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 4859 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 4860 msg); 4861 /* no association, so it's out of the blue... */ 4862 sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err, 4863 #if defined(__FreeBSD__) && !defined(__Userspace__) 4864 mflowtype, mflowid, inp->fibnum, 4865 #endif 4866 vrf_id, port); 4867 *offset = length; 4868 return (NULL); 4869 } 4870 asoc = &stcb->asoc; 4871 /* ABORT and SHUTDOWN can use either v_tag... */ 4872 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 4873 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 4874 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 4875 /* Take the T-bit always into account. */ 4876 if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) && 4877 (vtag_in == asoc->my_vtag)) || 4878 (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) && 4879 (asoc->peer_vtag != htonl(0)) && 4880 (vtag_in == asoc->peer_vtag))) { 4881 /* this is valid */ 4882 } else { 4883 /* drop this packet... */ 4884 SCTP_STAT_INCR(sctps_badvtag); 4885 if (stcb != NULL) { 4886 SCTP_TCB_UNLOCK(stcb); 4887 } 4888 return (NULL); 4889 } 4890 } else { 4891 /* for all other chunks, vtag must match */ 4892 if (vtag_in != asoc->my_vtag) { 4893 /* invalid vtag... */ 4894 SCTPDBG(SCTP_DEBUG_INPUT3, 4895 "invalid vtag: %xh, expect %xh\n", 4896 vtag_in, asoc->my_vtag); 4897 SCTP_STAT_INCR(sctps_badvtag); 4898 if (stcb != NULL) { 4899 SCTP_TCB_UNLOCK(stcb); 4900 } 4901 *offset = length; 4902 return (NULL); 4903 } 4904 } 4905 } /* end if !SCTP_COOKIE_ECHO */ 4906 /* 4907 * process all control chunks... 4908 */ 4909 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 4910 (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) || 4911 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 4912 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4913 /* implied cookie-ack.. we must have lost the ack */ 4914 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 4915 *netp); 4916 } 4917 4918 process_control_chunks: 4919 while (IS_SCTP_CONTROL(ch)) { 4920 /* validate chunk length */ 4921 chk_length = ntohs(ch->chunk_length); 4922 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 4923 ch->chunk_type, chk_length); 4924 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 4925 if (chk_length < sizeof(*ch) || 4926 (*offset + (int)chk_length) > length) { 4927 *offset = length; 4928 return (stcb); 4929 } 4930 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 4931 /* 4932 * INIT and INIT-ACK only gets the init ack "header" portion 4933 * only because we don't have to process the peer's COOKIE. 4934 * All others get a complete chunk. 4935 */ 4936 switch (ch->chunk_type) { 4937 case SCTP_INITIATION: 4938 contiguous = sizeof(struct sctp_init_chunk); 4939 break; 4940 case SCTP_INITIATION_ACK: 4941 contiguous = sizeof(struct sctp_init_ack_chunk); 4942 break; 4943 default: 4944 contiguous = min(chk_length, sizeof(chunk_buf)); 4945 break; 4946 } 4947 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4948 contiguous, 4949 chunk_buf); 4950 if (ch == NULL) { 4951 *offset = length; 4952 return (stcb); 4953 } 4954 4955 num_chunks++; 4956 /* Save off the last place we got a control from */ 4957 if (stcb != NULL) { 4958 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 4959 /* 4960 * allow last_control to be NULL if 4961 * ASCONF... ASCONF processing will find the 4962 * right net later 4963 */ 4964 if ((netp != NULL) && (*netp != NULL)) 4965 stcb->asoc.last_control_chunk_from = *netp; 4966 } 4967 } 4968 #ifdef SCTP_AUDITING_ENABLED 4969 sctp_audit_log(0xB0, ch->chunk_type); 4970 #endif 4971 4972 /* check to see if this chunk required auth, but isn't */ 4973 if ((stcb != NULL) && 4974 sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) && 4975 !stcb->asoc.authenticated) { 4976 /* "silently" ignore */ 4977 SCTP_STAT_INCR(sctps_recvauthmissing); 4978 goto next_chunk; 4979 } 4980 switch (ch->chunk_type) { 4981 case SCTP_INITIATION: 4982 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 4983 /* The INIT chunk must be the only chunk. */ 4984 if ((num_chunks > 1) || 4985 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4986 /* 4987 * RFC 4960bis requires stopping the 4988 * processing of the packet. 4989 */ 4990 *offset = length; 4991 return (stcb); 4992 } 4993 /* Honor our resource limit. */ 4994 if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) { 4995 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 4996 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, 4997 #if defined(__FreeBSD__) && !defined(__Userspace__) 4998 mflowtype, mflowid, inp->fibnum, 4999 #endif 5000 vrf_id, port); 5001 *offset = length; 5002 if (stcb != NULL) { 5003 SCTP_TCB_UNLOCK(stcb); 5004 } 5005 return (NULL); 5006 } 5007 sctp_handle_init(m, iphlen, *offset, src, dst, sh, 5008 (struct sctp_init_chunk *)ch, inp, 5009 stcb, *netp, 5010 #if defined(__FreeBSD__) && !defined(__Userspace__) 5011 mflowtype, mflowid, 5012 #endif 5013 vrf_id, port); 5014 *offset = length; 5015 if (stcb != NULL) { 5016 SCTP_TCB_UNLOCK(stcb); 5017 } 5018 return (NULL); 5019 break; 5020 case SCTP_PAD_CHUNK: 5021 break; 5022 case SCTP_INITIATION_ACK: 5023 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT_ACK\n"); 5024 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5025 /* We are not interested anymore */ 5026 if ((stcb != NULL) && (stcb->asoc.total_output_queue_size)) { 5027 ; 5028 } else { 5029 *offset = length; 5030 if (stcb != NULL) { 5031 #if defined(__APPLE__) && !defined(__Userspace__) 5032 so = SCTP_INP_SO(inp); 5033 atomic_add_int(&stcb->asoc.refcnt, 1); 5034 SCTP_TCB_UNLOCK(stcb); 5035 SCTP_SOCKET_LOCK(so, 1); 5036 SCTP_TCB_LOCK(stcb); 5037 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5038 #endif 5039 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 5040 SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 5041 #if defined(__APPLE__) && !defined(__Userspace__) 5042 SCTP_SOCKET_UNLOCK(so, 1); 5043 #endif 5044 } 5045 return (NULL); 5046 } 5047 } 5048 /* The INIT-ACK chunk must be the only chunk. */ 5049 if ((num_chunks > 1) || 5050 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 5051 *offset = length; 5052 return (stcb); 5053 } 5054 if ((netp != NULL) && (*netp != NULL)) { 5055 ret = sctp_handle_init_ack(m, iphlen, *offset, 5056 src, dst, sh, 5057 (struct sctp_init_ack_chunk *)ch, 5058 stcb, *netp, 5059 &abort_no_unlock, 5060 #if defined(__FreeBSD__) && !defined(__Userspace__) 5061 mflowtype, mflowid, 5062 #endif 5063 vrf_id); 5064 } else { 5065 ret = -1; 5066 } 5067 *offset = length; 5068 if (abort_no_unlock) { 5069 return (NULL); 5070 } 5071 /* 5072 * Special case, I must call the output routine to 5073 * get the cookie echoed 5074 */ 5075 if ((stcb != NULL) && (ret == 0)) { 5076 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 5077 } 5078 return (stcb); 5079 break; 5080 case SCTP_SELECTIVE_ACK: 5081 case SCTP_NR_SELECTIVE_ACK: 5082 { 5083 int abort_now = 0; 5084 uint32_t a_rwnd, cum_ack; 5085 uint16_t num_seg, num_nr_seg, num_dup; 5086 uint8_t flags; 5087 int offset_seg, offset_dup; 5088 5089 SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n", 5090 ch->chunk_type == SCTP_SELECTIVE_ACK ? "SCTP_SACK" : "SCTP_NR_SACK"); 5091 SCTP_STAT_INCR(sctps_recvsacks); 5092 if (stcb == NULL) { 5093 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing %s chunk\n", 5094 (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK"); 5095 break; 5096 } 5097 if (ch->chunk_type == SCTP_SELECTIVE_ACK) { 5098 if (chk_length < sizeof(struct sctp_sack_chunk)) { 5099 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n"); 5100 break; 5101 } 5102 } else { 5103 if (stcb->asoc.nrsack_supported == 0) { 5104 goto unknown_chunk; 5105 } 5106 if (chk_length < sizeof(struct sctp_nr_sack_chunk)) { 5107 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR_SACK chunk, too small\n"); 5108 break; 5109 } 5110 } 5111 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 5112 /*- 5113 * If we have sent a shutdown-ack, we will pay no 5114 * attention to a sack sent in to us since 5115 * we don't care anymore. 5116 */ 5117 break; 5118 } 5119 flags = ch->chunk_flags; 5120 if (ch->chunk_type == SCTP_SELECTIVE_ACK) { 5121 struct sctp_sack_chunk *sack; 5122 5123 sack = (struct sctp_sack_chunk *)ch; 5124 cum_ack = ntohl(sack->sack.cum_tsn_ack); 5125 num_seg = ntohs(sack->sack.num_gap_ack_blks); 5126 num_nr_seg = 0; 5127 num_dup = ntohs(sack->sack.num_dup_tsns); 5128 a_rwnd = ntohl(sack->sack.a_rwnd); 5129 if (sizeof(struct sctp_sack_chunk) + 5130 num_seg * sizeof(struct sctp_gap_ack_block) + 5131 num_dup * sizeof(uint32_t) != chk_length) { 5132 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n"); 5133 break; 5134 } 5135 offset_seg = *offset + sizeof(struct sctp_sack_chunk); 5136 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block); 5137 } else { 5138 struct sctp_nr_sack_chunk *nr_sack; 5139 5140 nr_sack = (struct sctp_nr_sack_chunk *)ch; 5141 cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack); 5142 num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks); 5143 num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks); 5144 num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns); 5145 a_rwnd = ntohl(nr_sack->nr_sack.a_rwnd); 5146 if (sizeof(struct sctp_nr_sack_chunk) + 5147 (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) + 5148 num_dup * sizeof(uint32_t) != chk_length) { 5149 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n"); 5150 break; 5151 } 5152 offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk); 5153 offset_dup = offset_seg + (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block); 5154 } 5155 SCTPDBG(SCTP_DEBUG_INPUT3, "%s process cum_ack:%x num_seg:%d a_rwnd:%d\n", 5156 (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK", 5157 cum_ack, num_seg, a_rwnd); 5158 stcb->asoc.seen_a_sack_this_pkt = 1; 5159 if ((stcb->asoc.pr_sctp_cnt == 0) && 5160 (num_seg == 0) && (num_nr_seg == 0) && 5161 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) && 5162 (stcb->asoc.saw_sack_with_frags == 0) && 5163 (stcb->asoc.saw_sack_with_nr_frags == 0) && 5164 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 5165 /* 5166 * We have a SIMPLE sack having no 5167 * prior segments and data on sent 5168 * queue to be acked. Use the 5169 * faster path sack processing. We 5170 * also allow window update sacks 5171 * with no missing segments to go 5172 * this way too. 5173 */ 5174 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 5175 &abort_now, ecne_seen); 5176 } else { 5177 if ((netp != NULL) && (*netp != NULL)) { 5178 sctp_handle_sack(m, offset_seg, offset_dup, stcb, 5179 num_seg, num_nr_seg, num_dup, &abort_now, flags, 5180 cum_ack, a_rwnd, ecne_seen); 5181 } 5182 } 5183 if (abort_now) { 5184 /* ABORT signal from sack processing */ 5185 *offset = length; 5186 return (NULL); 5187 } 5188 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 5189 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 5190 (stcb->asoc.stream_queue_cnt == 0)) { 5191 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 5192 } 5193 break; 5194 } 5195 case SCTP_HEARTBEAT_REQUEST: 5196 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 5197 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) { 5198 SCTP_STAT_INCR(sctps_recvheartbeat); 5199 sctp_send_heartbeat_ack(stcb, m, *offset, 5200 chk_length, *netp); 5201 } 5202 break; 5203 case SCTP_HEARTBEAT_ACK: 5204 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT_ACK\n"); 5205 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 5206 /* Its not ours */ 5207 break; 5208 } 5209 SCTP_STAT_INCR(sctps_recvheartbeatack); 5210 if ((netp != NULL) && (*netp != NULL)) { 5211 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 5212 stcb, *netp); 5213 } 5214 break; 5215 case SCTP_ABORT_ASSOCIATION: 5216 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 5217 (void *)stcb); 5218 *offset = length; 5219 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) { 5220 if (sctp_handle_abort((struct sctp_abort_chunk *)ch, stcb, *netp)) { 5221 return (NULL); 5222 } else { 5223 return (stcb); 5224 } 5225 } else { 5226 return (NULL); 5227 } 5228 break; 5229 case SCTP_SHUTDOWN: 5230 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 5231 (void *)stcb); 5232 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 5233 break; 5234 } 5235 if ((netp != NULL) && (*netp != NULL)) { 5236 abort_flag = 0; 5237 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 5238 stcb, *netp, &abort_flag); 5239 if (abort_flag) { 5240 *offset = length; 5241 return (NULL); 5242 } 5243 } 5244 break; 5245 case SCTP_SHUTDOWN_ACK: 5246 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_ACK, stcb %p\n", (void *)stcb); 5247 if ((chk_length == sizeof(struct sctp_shutdown_ack_chunk)) && 5248 (stcb != NULL) && (netp != NULL) && (*netp != NULL)) { 5249 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 5250 *offset = length; 5251 return (NULL); 5252 } 5253 break; 5254 case SCTP_OPERATION_ERROR: 5255 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP_ERR\n"); 5256 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL) && 5257 sctp_handle_error(ch, stcb, *netp, contiguous) < 0) { 5258 *offset = length; 5259 return (NULL); 5260 } 5261 break; 5262 case SCTP_COOKIE_ECHO: 5263 SCTPDBG(SCTP_DEBUG_INPUT3, 5264 "SCTP_COOKIE_ECHO, stcb %p\n", (void *)stcb); 5265 if ((stcb != NULL) && (stcb->asoc.total_output_queue_size > 0)) { 5266 ; 5267 } else { 5268 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5269 /* We are not interested anymore */ 5270 abend: 5271 if (stcb != NULL) { 5272 SCTP_TCB_UNLOCK(stcb); 5273 } 5274 *offset = length; 5275 return (NULL); 5276 } 5277 } 5278 /*- 5279 * First are we accepting? We do this again here 5280 * since it is possible that a previous endpoint WAS 5281 * listening responded to a INIT-ACK and then 5282 * closed. We opened and bound.. and are now no 5283 * longer listening. 5284 * 5285 * XXXGL: notes on checking listen queue length. 5286 * 1) SCTP_IS_LISTENING() doesn't necessarily mean 5287 * SOLISTENING(), because a listening "UDP type" 5288 * socket isn't listening in terms of the socket 5289 * layer. It is a normal data flow socket, that 5290 * can fork off new connections. Thus, we should 5291 * look into sol_qlen only in case we are !UDP. 5292 * 2) Checking sol_qlen in general requires locking 5293 * the socket, and this code lacks that. 5294 */ 5295 if ((stcb == NULL) && 5296 (!SCTP_IS_LISTENING(inp) || 5297 (((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) == 0) && 5298 #if defined(__FreeBSD__) && !defined(__Userspace__) 5299 inp->sctp_socket->sol_qlen >= inp->sctp_socket->sol_qlimit))) { 5300 #else 5301 inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit))) { 5302 #endif 5303 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 5304 (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) { 5305 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 5306 sctp_abort_association(inp, stcb, m, iphlen, 5307 src, dst, sh, op_err, 5308 #if defined(__FreeBSD__) && !defined(__Userspace__) 5309 mflowtype, mflowid, 5310 #endif 5311 vrf_id, port); 5312 } 5313 *offset = length; 5314 return (NULL); 5315 } else { 5316 struct mbuf *ret_buf; 5317 struct sctp_inpcb *linp; 5318 struct sctp_tmit_chunk *chk; 5319 5320 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | 5321 SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5322 goto abend; 5323 } 5324 5325 if (stcb) { 5326 linp = NULL; 5327 } else { 5328 linp = inp; 5329 } 5330 5331 if (linp != NULL) { 5332 SCTP_ASOC_CREATE_LOCK(linp); 5333 } 5334 5335 if (netp != NULL) { 5336 struct sctp_tcb *locked_stcb; 5337 5338 locked_stcb = stcb; 5339 ret_buf = 5340 sctp_handle_cookie_echo(m, iphlen, 5341 *offset, 5342 src, dst, 5343 sh, 5344 (struct sctp_cookie_echo_chunk *)ch, 5345 &inp, &stcb, netp, 5346 auth_skipped, 5347 auth_offset, 5348 auth_len, 5349 &locked_stcb, 5350 #if defined(__FreeBSD__) && !defined(__Userspace__) 5351 mflowtype, 5352 mflowid, 5353 #endif 5354 vrf_id, 5355 port); 5356 if ((locked_stcb != NULL) && (locked_stcb != stcb)) { 5357 SCTP_TCB_UNLOCK(locked_stcb); 5358 } 5359 if (stcb != NULL) { 5360 SCTP_TCB_LOCK_ASSERT(stcb); 5361 } 5362 } else { 5363 ret_buf = NULL; 5364 } 5365 if (linp != NULL) { 5366 SCTP_ASOC_CREATE_UNLOCK(linp); 5367 } 5368 if (ret_buf == NULL) { 5369 if (stcb != NULL) { 5370 SCTP_TCB_UNLOCK(stcb); 5371 } 5372 SCTPDBG(SCTP_DEBUG_INPUT3, 5373 "GAK, null buffer\n"); 5374 *offset = length; 5375 return (NULL); 5376 } 5377 /* if AUTH skipped, see if it verified... */ 5378 if (auth_skipped) { 5379 got_auth = 1; 5380 auth_skipped = 0; 5381 } 5382 /* Restart the timer if we have pending data */ 5383 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 5384 if (chk->whoTo != NULL) { 5385 break; 5386 } 5387 } 5388 if (chk != NULL) { 5389 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 5390 } 5391 } 5392 break; 5393 case SCTP_COOKIE_ACK: 5394 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE_ACK, stcb %p\n", (void *)stcb); 5395 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 5396 break; 5397 } 5398 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5399 /* We are not interested anymore */ 5400 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 5401 ; 5402 } else if (stcb) { 5403 #if defined(__APPLE__) && !defined(__Userspace__) 5404 so = SCTP_INP_SO(inp); 5405 atomic_add_int(&stcb->asoc.refcnt, 1); 5406 SCTP_TCB_UNLOCK(stcb); 5407 SCTP_SOCKET_LOCK(so, 1); 5408 SCTP_TCB_LOCK(stcb); 5409 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5410 #endif 5411 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 5412 SCTP_FROM_SCTP_INPUT + SCTP_LOC_30); 5413 #if defined(__APPLE__) && !defined(__Userspace__) 5414 SCTP_SOCKET_UNLOCK(so, 1); 5415 #endif 5416 *offset = length; 5417 return (NULL); 5418 } 5419 } 5420 if ((netp != NULL) && (*netp != NULL)) { 5421 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 5422 } 5423 break; 5424 case SCTP_ECN_ECHO: 5425 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_ECHO\n"); 5426 if (stcb == NULL) { 5427 break; 5428 } 5429 if (stcb->asoc.ecn_supported == 0) { 5430 goto unknown_chunk; 5431 } 5432 if ((chk_length != sizeof(struct sctp_ecne_chunk)) && 5433 (chk_length != sizeof(struct old_sctp_ecne_chunk))) { 5434 break; 5435 } 5436 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, stcb); 5437 ecne_seen = 1; 5438 break; 5439 case SCTP_ECN_CWR: 5440 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_CWR\n"); 5441 if (stcb == NULL) { 5442 break; 5443 } 5444 if (stcb->asoc.ecn_supported == 0) { 5445 goto unknown_chunk; 5446 } 5447 if (chk_length != sizeof(struct sctp_cwr_chunk)) { 5448 break; 5449 } 5450 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp); 5451 break; 5452 case SCTP_SHUTDOWN_COMPLETE: 5453 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_COMPLETE, stcb %p\n", (void *)stcb); 5454 /* must be first and only chunk */ 5455 if ((num_chunks > 1) || 5456 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 5457 *offset = length; 5458 return (stcb); 5459 } 5460 if ((chk_length == sizeof(struct sctp_shutdown_complete_chunk)) && 5461 (stcb != NULL) && (netp != NULL) && (*netp != NULL)) { 5462 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 5463 stcb, *netp); 5464 *offset = length; 5465 return (NULL); 5466 } 5467 break; 5468 case SCTP_ASCONF: 5469 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 5470 if (stcb != NULL) { 5471 if (stcb->asoc.asconf_supported == 0) { 5472 goto unknown_chunk; 5473 } 5474 sctp_handle_asconf(m, *offset, src, 5475 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); 5476 asconf_cnt++; 5477 } 5478 break; 5479 case SCTP_ASCONF_ACK: 5480 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF_ACK\n"); 5481 if (stcb == NULL) { 5482 break; 5483 } 5484 if (stcb->asoc.asconf_supported == 0) { 5485 goto unknown_chunk; 5486 } 5487 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 5488 break; 5489 } 5490 if ((netp != NULL) && (*netp != NULL)) { 5491 /* He's alive so give him credit */ 5492 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5493 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5494 stcb->asoc.overall_error_count, 5495 0, 5496 SCTP_FROM_SCTP_INPUT, 5497 __LINE__); 5498 } 5499 stcb->asoc.overall_error_count = 0; 5500 sctp_handle_asconf_ack(m, *offset, 5501 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); 5502 if (abort_no_unlock) 5503 return (NULL); 5504 } 5505 break; 5506 case SCTP_FORWARD_CUM_TSN: 5507 case SCTP_IFORWARD_CUM_TSN: 5508 SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n", 5509 ch->chunk_type == SCTP_FORWARD_CUM_TSN ? "FORWARD_TSN" : "I_FORWARD_TSN"); 5510 if (stcb == NULL) { 5511 break; 5512 } 5513 if (stcb->asoc.prsctp_supported == 0) { 5514 goto unknown_chunk; 5515 } 5516 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 5517 break; 5518 } 5519 if (((stcb->asoc.idata_supported == 1) && (ch->chunk_type == SCTP_FORWARD_CUM_TSN)) || 5520 ((stcb->asoc.idata_supported == 0) && (ch->chunk_type == SCTP_IFORWARD_CUM_TSN))) { 5521 if (ch->chunk_type == SCTP_FORWARD_CUM_TSN) { 5522 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "FORWARD-TSN chunk received when I-FORWARD-TSN was negotiated"); 5523 } else { 5524 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-FORWARD-TSN chunk received when FORWARD-TSN was negotiated"); 5525 } 5526 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5527 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 5528 *offset = length; 5529 return (NULL); 5530 } 5531 *fwd_tsn_seen = 1; 5532 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5533 /* We are not interested anymore */ 5534 #if defined(__APPLE__) && !defined(__Userspace__) 5535 so = SCTP_INP_SO(inp); 5536 atomic_add_int(&stcb->asoc.refcnt, 1); 5537 SCTP_TCB_UNLOCK(stcb); 5538 SCTP_SOCKET_LOCK(so, 1); 5539 SCTP_TCB_LOCK(stcb); 5540 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5541 #endif 5542 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 5543 SCTP_FROM_SCTP_INPUT + SCTP_LOC_31); 5544 #if defined(__APPLE__) && !defined(__Userspace__) 5545 SCTP_SOCKET_UNLOCK(so, 1); 5546 #endif 5547 *offset = length; 5548 return (NULL); 5549 } 5550 /* 5551 * For sending a SACK this looks like DATA 5552 * chunks. 5553 */ 5554 stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from; 5555 abort_flag = 0; 5556 sctp_handle_forward_tsn(stcb, 5557 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 5558 if (abort_flag) { 5559 *offset = length; 5560 return (NULL); 5561 } 5562 break; 5563 case SCTP_STREAM_RESET: 5564 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 5565 if (stcb == NULL) { 5566 break; 5567 } 5568 if (stcb->asoc.reconfig_supported == 0) { 5569 goto unknown_chunk; 5570 } 5571 if (chk_length < sizeof(struct sctp_stream_reset_tsn_req)) { 5572 break; 5573 } 5574 if (sctp_handle_stream_reset(stcb, m, *offset, ch)) { 5575 /* stop processing */ 5576 *offset = length; 5577 return (NULL); 5578 } 5579 break; 5580 case SCTP_PACKET_DROPPED: 5581 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 5582 if (stcb == NULL) { 5583 break; 5584 } 5585 if (stcb->asoc.pktdrop_supported == 0) { 5586 goto unknown_chunk; 5587 } 5588 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 5589 break; 5590 } 5591 if ((netp != NULL) && (*netp != NULL)) { 5592 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 5593 stcb, *netp, 5594 min(chk_length, contiguous)); 5595 } 5596 break; 5597 case SCTP_AUTHENTICATION: 5598 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 5599 if (stcb == NULL) { 5600 /* save the first AUTH for later processing */ 5601 if (auth_skipped == 0) { 5602 auth_offset = *offset; 5603 auth_len = chk_length; 5604 auth_skipped = 1; 5605 } 5606 /* skip this chunk (temporarily) */ 5607 break; 5608 } 5609 if (stcb->asoc.auth_supported == 0) { 5610 goto unknown_chunk; 5611 } 5612 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 5613 (chk_length > (sizeof(struct sctp_auth_chunk) + 5614 SCTP_AUTH_DIGEST_LEN_MAX))) { 5615 /* Its not ours */ 5616 *offset = length; 5617 return (stcb); 5618 } 5619 if (got_auth == 1) { 5620 /* skip this chunk... it's already auth'd */ 5621 break; 5622 } 5623 got_auth = 1; 5624 if (sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, m, *offset)) { 5625 /* auth HMAC failed so dump the packet */ 5626 *offset = length; 5627 return (stcb); 5628 } else { 5629 /* remaining chunks are HMAC checked */ 5630 stcb->asoc.authenticated = 1; 5631 } 5632 break; 5633 5634 default: 5635 unknown_chunk: 5636 /* it's an unknown chunk! */ 5637 if ((ch->chunk_type & 0x40) && 5638 (stcb != NULL) && 5639 (SCTP_GET_STATE(stcb) != SCTP_STATE_EMPTY) && 5640 (SCTP_GET_STATE(stcb) != SCTP_STATE_INUSE) && 5641 (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) { 5642 struct sctp_gen_error_cause *cause; 5643 int len; 5644 5645 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 5646 0, M_NOWAIT, 1, MT_DATA); 5647 if (op_err != NULL) { 5648 len = min(SCTP_SIZE32(chk_length), (uint32_t)(length - *offset)); 5649 cause = mtod(op_err, struct sctp_gen_error_cause *); 5650 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 5651 cause->length = htons((uint16_t)(len + sizeof(struct sctp_gen_error_cause))); 5652 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 5653 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT); 5654 if (SCTP_BUF_NEXT(op_err) != NULL) { 5655 #ifdef SCTP_MBUF_LOGGING 5656 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 5657 sctp_log_mbc(SCTP_BUF_NEXT(op_err), SCTP_MBUF_ICOPY); 5658 } 5659 #endif 5660 sctp_queue_op_err(stcb, op_err); 5661 } else { 5662 sctp_m_freem(op_err); 5663 } 5664 } 5665 } 5666 if ((ch->chunk_type & 0x80) == 0) { 5667 /* discard this packet */ 5668 *offset = length; 5669 return (stcb); 5670 } /* else skip this bad chunk and continue... */ 5671 break; 5672 } /* switch (ch->chunk_type) */ 5673 5674 next_chunk: 5675 /* get the next chunk */ 5676 *offset += SCTP_SIZE32(chk_length); 5677 if (*offset >= length) { 5678 /* no more data left in the mbuf chain */ 5679 break; 5680 } 5681 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 5682 sizeof(struct sctp_chunkhdr), chunk_buf); 5683 if (ch == NULL) { 5684 *offset = length; 5685 return (stcb); 5686 } 5687 } /* while */ 5688 5689 if ((asconf_cnt > 0) && (stcb != NULL)) { 5690 sctp_send_asconf_ack(stcb); 5691 } 5692 return (stcb); 5693 } 5694 5695 /* 5696 * common input chunk processing (v4 and v6) 5697 */ 5698 void 5699 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length, 5700 struct sockaddr *src, struct sockaddr *dst, 5701 struct sctphdr *sh, struct sctp_chunkhdr *ch, 5702 uint8_t compute_crc, 5703 uint8_t ecn_bits, 5704 #if defined(__FreeBSD__) && !defined(__Userspace__) 5705 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 5706 #endif 5707 uint32_t vrf_id, uint16_t port) 5708 { 5709 char msg[SCTP_DIAG_INFO_LEN]; 5710 struct mbuf *m = *mm, *op_err; 5711 struct sctp_inpcb *inp = NULL, *inp_decr = NULL; 5712 struct sctp_tcb *stcb = NULL; 5713 struct sctp_nets *net = NULL; 5714 #if defined(__Userspace__) 5715 struct socket *upcall_socket = NULL; 5716 #endif 5717 uint32_t high_tsn; 5718 uint32_t cksum_in_hdr; 5719 int un_sent; 5720 int cnt_ctrl_ready = 0; 5721 int fwd_tsn_seen = 0, data_processed = 0; 5722 bool cksum_validated, stcb_looked_up; 5723 5724 SCTP_STAT_INCR(sctps_recvdatagrams); 5725 #ifdef SCTP_AUDITING_ENABLED 5726 sctp_audit_log(0xE0, 1); 5727 sctp_auditing(0, inp, stcb, net); 5728 #endif 5729 5730 stcb_looked_up = false; 5731 if (compute_crc != 0) { 5732 cksum_validated = false; 5733 cksum_in_hdr = sh->checksum; 5734 if (cksum_in_hdr != htonl(0)) { 5735 uint32_t cksum_calculated; 5736 5737 validate_cksum: 5738 sh->checksum = 0; 5739 cksum_calculated = sctp_calculate_cksum(m, iphlen); 5740 sh->checksum = cksum_in_hdr; 5741 if (cksum_calculated != cksum_in_hdr) { 5742 if (stcb_looked_up) { 5743 /* 5744 * The packet has a zero checksum, which 5745 * is not the correct CRC, no stcb has 5746 * been found or an stcb has been found 5747 * but an incorrect zero checksum is not 5748 * acceptable. 5749 */ 5750 KASSERT(cksum_in_hdr == htonl(0), 5751 ("cksum in header not zero: %x", 5752 ntohl(cksum_in_hdr))); 5753 if ((inp == NULL) && 5754 (SCTP_BASE_SYSCTL(sctp_ootb_with_zero_cksum) == 1)) { 5755 /* 5756 * This is an OOTB packet, 5757 * depending on the sysctl 5758 * variable, pretend that the 5759 * checksum is acceptable, 5760 * to allow an appropriate 5761 * response (ABORT, for examlpe) 5762 * to be sent. 5763 */ 5764 KASSERT(stcb == NULL, 5765 ("stcb is %p", stcb)); 5766 SCTP_STAT_INCR(sctps_recvzerocrc); 5767 goto cksum_validated; 5768 } 5769 } else { 5770 stcb = sctp_findassociation_addr(m, offset, src, dst, 5771 sh, ch, &inp, &net, vrf_id); 5772 } 5773 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad cksum in SCTP packet:%x calculated:%x m:%p mlen:%d iphlen:%d\n", 5774 ntohl(cksum_in_hdr), ntohl(cksum_calculated), (void *)m, length, iphlen); 5775 #if defined(INET) || defined(INET6) 5776 if ((ch->chunk_type != SCTP_INITIATION) && 5777 (net != NULL) && (net->port != port)) { 5778 if (net->port == 0) { 5779 /* UDP encapsulation turned on. */ 5780 net->mtu -= sizeof(struct udphdr); 5781 if (stcb->asoc.smallest_mtu > net->mtu) { 5782 sctp_pathmtu_adjustment(stcb, net->mtu, true); 5783 } 5784 } else if (port == 0) { 5785 /* UDP encapsulation turned off. */ 5786 net->mtu += sizeof(struct udphdr); 5787 /* XXX Update smallest_mtu */ 5788 } 5789 net->port = port; 5790 } 5791 #endif 5792 #if defined(__FreeBSD__) && !defined(__Userspace__) 5793 if (net != NULL) { 5794 net->flowtype = mflowtype; 5795 net->flowid = mflowid; 5796 } 5797 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh); 5798 #endif 5799 if ((inp != NULL) && (stcb != NULL)) { 5800 if (stcb->asoc.pktdrop_supported) { 5801 sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1); 5802 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED); 5803 } 5804 } else if ((inp != NULL) && (stcb == NULL)) { 5805 inp_decr = inp; 5806 } 5807 SCTP_STAT_INCR(sctps_badsum); 5808 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 5809 goto out; 5810 } else { 5811 cksum_validated = true; 5812 } 5813 } 5814 KASSERT(cksum_validated || cksum_in_hdr == htonl(0), 5815 ("cksum 0x%08x not zero and not validated", ntohl(cksum_in_hdr))); 5816 if (!cksum_validated) { 5817 stcb = sctp_findassociation_addr(m, offset, src, dst, 5818 sh, ch, &inp, &net, vrf_id); 5819 stcb_looked_up = true; 5820 if (stcb == NULL) { 5821 goto validate_cksum; 5822 } 5823 if (stcb->asoc.rcv_edmid == SCTP_EDMID_NONE) { 5824 goto validate_cksum; 5825 } 5826 KASSERT(stcb->asoc.rcv_edmid == SCTP_EDMID_LOWER_LAYER_DTLS, 5827 ("Unexpected EDMID %u", stcb->asoc.rcv_edmid)); 5828 SCTP_STAT_INCR(sctps_recvzerocrc); 5829 } 5830 } 5831 cksum_validated: 5832 /* Destination port of 0 is illegal, based on RFC4960. */ 5833 if (sh->dest_port == htons(0)) { 5834 SCTP_STAT_INCR(sctps_hdrops); 5835 if ((stcb == NULL) && (inp != NULL)) { 5836 inp_decr = inp; 5837 } 5838 goto out; 5839 } 5840 if (!stcb_looked_up) { 5841 stcb = sctp_findassociation_addr(m, offset, src, dst, 5842 sh, ch, &inp, &net, vrf_id); 5843 } 5844 #if defined(INET) || defined(INET6) 5845 if ((ch->chunk_type != SCTP_INITIATION) && 5846 (net != NULL) && (net->port != port)) { 5847 if (net->port == 0) { 5848 /* UDP encapsulation turned on. */ 5849 net->mtu -= sizeof(struct udphdr); 5850 if (stcb->asoc.smallest_mtu > net->mtu) { 5851 sctp_pathmtu_adjustment(stcb, net->mtu, true); 5852 } 5853 } else if (port == 0) { 5854 /* UDP encapsulation turned off. */ 5855 net->mtu += sizeof(struct udphdr); 5856 /* XXX Update smallest_mtu */ 5857 } 5858 net->port = port; 5859 } 5860 #endif 5861 #if defined(__FreeBSD__) && !defined(__Userspace__) 5862 if (net != NULL) { 5863 net->flowtype = mflowtype; 5864 net->flowid = mflowid; 5865 } 5866 #endif 5867 if (inp == NULL) { 5868 SCTP_STAT_INCR(sctps_noport); 5869 #if defined(__FreeBSD__) && !defined(__Userspace__) 5870 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh); 5871 if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) { 5872 goto out; 5873 } 5874 #endif 5875 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 5876 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 5877 sctp_send_shutdown_complete2(src, dst, sh, 5878 #if defined(__FreeBSD__) && !defined(__Userspace__) 5879 mflowtype, mflowid, fibnum, 5880 #endif 5881 vrf_id, port); 5882 goto out; 5883 } 5884 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 5885 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 5886 goto out; 5887 } 5888 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) { 5889 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 5890 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 5891 (ch->chunk_type != SCTP_INIT))) { 5892 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5893 "Out of the blue"); 5894 sctp_send_abort(m, iphlen, src, dst, 5895 sh, 0, op_err, 5896 #if defined(__FreeBSD__) && !defined(__Userspace__) 5897 mflowtype, mflowid, fibnum, 5898 #endif 5899 vrf_id, port); 5900 } 5901 } 5902 goto out; 5903 } else if (stcb == NULL) { 5904 inp_decr = inp; 5905 } 5906 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n", 5907 (void *)m, iphlen, offset, length, (void *)stcb); 5908 if (stcb) { 5909 /* always clear this before beginning a packet */ 5910 stcb->asoc.authenticated = 0; 5911 stcb->asoc.seen_a_sack_this_pkt = 0; 5912 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n", 5913 (void *)stcb, stcb->asoc.state); 5914 5915 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) || 5916 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5917 /*- 5918 * If we hit here, we had a ref count 5919 * up when the assoc was aborted and the 5920 * timer is clearing out the assoc, we should 5921 * NOT respond to any packet.. its OOTB. 5922 */ 5923 SCTP_TCB_UNLOCK(stcb); 5924 stcb = NULL; 5925 #if defined(__FreeBSD__) && !defined(__Userspace__) 5926 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh); 5927 #endif 5928 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 5929 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5930 msg); 5931 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, 5932 #if defined(__FreeBSD__) && !defined(__Userspace__) 5933 mflowtype, mflowid, inp->fibnum, 5934 #endif 5935 vrf_id, port); 5936 goto out; 5937 } 5938 } 5939 #if defined(__Userspace__) 5940 if ((stcb != NULL) && 5941 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 5942 (stcb->sctp_socket != NULL)) { 5943 if (stcb->sctp_socket->so_head != NULL) { 5944 upcall_socket = stcb->sctp_socket->so_head; 5945 } else { 5946 upcall_socket = stcb->sctp_socket; 5947 } 5948 SOCK_LOCK(upcall_socket); 5949 soref(upcall_socket); 5950 SOCK_UNLOCK(upcall_socket); 5951 } 5952 #endif 5953 if (IS_SCTP_CONTROL(ch)) { 5954 /* process the control portion of the SCTP packet */ 5955 /* sa_ignore NO_NULL_CHK */ 5956 stcb = sctp_process_control(m, iphlen, &offset, length, 5957 src, dst, sh, ch, 5958 inp, stcb, &net, &fwd_tsn_seen, 5959 #if defined(__FreeBSD__) && !defined(__Userspace__) 5960 mflowtype, mflowid, fibnum, 5961 #endif 5962 vrf_id, port); 5963 if (stcb) { 5964 /* This covers us if the cookie-echo was there 5965 * and it changes our INP. 5966 */ 5967 inp = stcb->sctp_ep; 5968 #if defined(INET) || defined(INET6) 5969 if ((ch->chunk_type != SCTP_INITIATION) && 5970 (net != NULL) && (net->port != port)) { 5971 if (net->port == 0) { 5972 /* UDP encapsulation turned on. */ 5973 net->mtu -= sizeof(struct udphdr); 5974 if (stcb->asoc.smallest_mtu > net->mtu) { 5975 sctp_pathmtu_adjustment(stcb, net->mtu, true); 5976 } 5977 } else if (port == 0) { 5978 /* UDP encapsulation turned off. */ 5979 net->mtu += sizeof(struct udphdr); 5980 /* XXX Update smallest_mtu */ 5981 } 5982 net->port = port; 5983 } 5984 #endif 5985 } 5986 } else { 5987 /* 5988 * no control chunks, so pre-process DATA chunks (these 5989 * checks are taken care of by control processing) 5990 */ 5991 5992 /* 5993 * if DATA only packet, and auth is required, then punt... 5994 * can't have authenticated without any AUTH (control) 5995 * chunks 5996 */ 5997 if ((stcb != NULL) && 5998 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) { 5999 /* "silently" ignore */ 6000 #if defined(__FreeBSD__) && !defined(__Userspace__) 6001 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh); 6002 #endif 6003 SCTP_STAT_INCR(sctps_recvauthmissing); 6004 goto out; 6005 } 6006 if (stcb == NULL) { 6007 /* out of the blue DATA chunk */ 6008 #if defined(__FreeBSD__) && !defined(__Userspace__) 6009 SCTP_PROBE5(receive, NULL, NULL, m, NULL, sh); 6010 #endif 6011 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 6012 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 6013 msg); 6014 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, 6015 #if defined(__FreeBSD__) && !defined(__Userspace__) 6016 mflowtype, mflowid, fibnum, 6017 #endif 6018 vrf_id, port); 6019 goto out; 6020 } 6021 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 6022 /* v_tag mismatch! */ 6023 #if defined(__FreeBSD__) && !defined(__Userspace__) 6024 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh); 6025 #endif 6026 SCTP_STAT_INCR(sctps_badvtag); 6027 goto out; 6028 } 6029 } 6030 6031 #if defined(__FreeBSD__) && !defined(__Userspace__) 6032 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh); 6033 #endif 6034 if (stcb == NULL) { 6035 /* 6036 * no valid TCB for this packet, or we found it's a bad 6037 * packet while processing control, or we're done with this 6038 * packet (done or skip rest of data), so we drop it... 6039 */ 6040 goto out; 6041 } 6042 #if defined(__Userspace__) 6043 if ((upcall_socket == NULL) && 6044 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && 6045 (stcb->sctp_socket != NULL)) { 6046 if (stcb->sctp_socket->so_head != NULL) { 6047 upcall_socket = stcb->sctp_socket->so_head; 6048 } else { 6049 upcall_socket = stcb->sctp_socket; 6050 } 6051 SOCK_LOCK(upcall_socket); 6052 soref(upcall_socket); 6053 SOCK_UNLOCK(upcall_socket); 6054 } 6055 #endif 6056 6057 /* 6058 * DATA chunk processing 6059 */ 6060 /* plow through the data chunks while length > offset */ 6061 6062 /* 6063 * Rest should be DATA only. Check authentication state if AUTH for 6064 * DATA is required. 6065 */ 6066 if ((length > offset) && 6067 (stcb != NULL) && 6068 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) && 6069 !stcb->asoc.authenticated) { 6070 /* "silently" ignore */ 6071 SCTP_STAT_INCR(sctps_recvauthmissing); 6072 SCTPDBG(SCTP_DEBUG_AUTH1, 6073 "Data chunk requires AUTH, skipped\n"); 6074 goto trigger_send; 6075 } 6076 if (length > offset) { 6077 int retval; 6078 6079 /* 6080 * First check to make sure our state is correct. We would 6081 * not get here unless we really did have a tag, so we don't 6082 * abort if this happens, just dump the chunk silently. 6083 */ 6084 switch (SCTP_GET_STATE(stcb)) { 6085 case SCTP_STATE_COOKIE_ECHOED: 6086 /* 6087 * we consider data with valid tags in this state 6088 * shows us the cookie-ack was lost. Imply it was 6089 * there. 6090 */ 6091 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 6092 break; 6093 case SCTP_STATE_COOKIE_WAIT: 6094 /* 6095 * We consider OOTB any data sent during asoc setup. 6096 */ 6097 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 6098 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 6099 msg); 6100 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, 6101 #if defined(__FreeBSD__) && !defined(__Userspace__) 6102 mflowtype, mflowid, inp->fibnum, 6103 #endif 6104 vrf_id, port); 6105 goto out; 6106 /*sa_ignore NOTREACHED*/ 6107 break; 6108 case SCTP_STATE_EMPTY: /* should not happen */ 6109 case SCTP_STATE_INUSE: /* should not happen */ 6110 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 6111 case SCTP_STATE_SHUTDOWN_ACK_SENT: 6112 default: 6113 goto out; 6114 /*sa_ignore NOTREACHED*/ 6115 break; 6116 case SCTP_STATE_OPEN: 6117 case SCTP_STATE_SHUTDOWN_SENT: 6118 break; 6119 } 6120 /* plow through the data chunks while length > offset */ 6121 retval = sctp_process_data(mm, iphlen, &offset, length, 6122 inp, stcb, net, &high_tsn); 6123 if (retval == 2) { 6124 /* 6125 * The association aborted, NO UNLOCK needed since 6126 * the association is destroyed. 6127 */ 6128 stcb = NULL; 6129 goto out; 6130 } 6131 if (retval == 0) { 6132 data_processed = 1; 6133 } 6134 /* 6135 * Anything important needs to have been m_copy'ed in 6136 * process_data 6137 */ 6138 } 6139 6140 /* take care of ecn */ 6141 if ((data_processed == 1) && 6142 (stcb->asoc.ecn_supported == 1) && 6143 ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) { 6144 /* Yep, we need to add a ECNE */ 6145 sctp_send_ecn_echo(stcb, net, high_tsn); 6146 } 6147 6148 if ((data_processed == 0) && (fwd_tsn_seen)) { 6149 int was_a_gap; 6150 uint32_t highest_tsn; 6151 6152 if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) { 6153 highest_tsn = stcb->asoc.highest_tsn_inside_nr_map; 6154 } else { 6155 highest_tsn = stcb->asoc.highest_tsn_inside_map; 6156 } 6157 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 6158 stcb->asoc.send_sack = 1; 6159 sctp_sack_check(stcb, was_a_gap); 6160 } else if (fwd_tsn_seen) { 6161 stcb->asoc.send_sack = 1; 6162 } 6163 /* trigger send of any chunks in queue... */ 6164 trigger_send: 6165 #ifdef SCTP_AUDITING_ENABLED 6166 sctp_audit_log(0xE0, 2); 6167 sctp_auditing(1, inp, stcb, net); 6168 #endif 6169 SCTPDBG(SCTP_DEBUG_INPUT1, 6170 "Check for chunk output prw:%d tqe:%d tf=%d\n", 6171 stcb->asoc.peers_rwnd, 6172 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 6173 stcb->asoc.total_flight); 6174 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 6175 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 6176 cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq; 6177 } 6178 if (!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue) || 6179 cnt_ctrl_ready || 6180 stcb->asoc.trigger_reset || 6181 ((un_sent > 0) && 6182 (stcb->asoc.peers_rwnd > 0 || stcb->asoc.total_flight == 0))) { 6183 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 6184 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 6185 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 6186 } 6187 #ifdef SCTP_AUDITING_ENABLED 6188 sctp_audit_log(0xE0, 3); 6189 sctp_auditing(2, inp, stcb, net); 6190 #endif 6191 out: 6192 if (stcb != NULL) { 6193 SCTP_TCB_UNLOCK(stcb); 6194 } 6195 #if defined(__Userspace__) 6196 if (upcall_socket != NULL) { 6197 if (upcall_socket->so_upcall != NULL) { 6198 if (soreadable(upcall_socket) || 6199 sowriteable(upcall_socket) || 6200 upcall_socket->so_error) { 6201 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT); 6202 } 6203 } 6204 ACCEPT_LOCK(); 6205 SOCK_LOCK(upcall_socket); 6206 sorele(upcall_socket); 6207 } 6208 #endif 6209 if (inp_decr != NULL) { 6210 /* reduce ref-count */ 6211 SCTP_INP_WLOCK(inp_decr); 6212 SCTP_INP_DECR_REF(inp_decr); 6213 SCTP_INP_WUNLOCK(inp_decr); 6214 } 6215 return; 6216 } 6217 6218 #ifdef INET 6219 #if !defined(__Userspace__) 6220 void 6221 sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port) 6222 { 6223 struct mbuf *m; 6224 int iphlen; 6225 uint32_t vrf_id = 0; 6226 uint8_t ecn_bits; 6227 struct sockaddr_in src, dst; 6228 struct ip *ip; 6229 struct sctphdr *sh; 6230 struct sctp_chunkhdr *ch; 6231 int length, offset; 6232 uint8_t compute_crc; 6233 #if defined(__FreeBSD__) && !defined(__Userspace__) 6234 uint32_t mflowid; 6235 uint8_t mflowtype; 6236 uint16_t fibnum; 6237 #endif 6238 #if defined(__Userspace__) 6239 uint16_t port = 0; 6240 #endif 6241 6242 iphlen = off; 6243 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 6244 SCTP_RELEASE_PKT(i_pak); 6245 return; 6246 } 6247 m = SCTP_HEADER_TO_CHAIN(i_pak); 6248 #ifdef SCTP_MBUF_LOGGING 6249 /* Log in any input mbufs */ 6250 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6251 sctp_log_mbc(m, SCTP_MBUF_INPUT); 6252 } 6253 #endif 6254 #ifdef SCTP_PACKET_LOGGING 6255 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { 6256 sctp_packet_log(m); 6257 } 6258 #endif 6259 #if defined(__FreeBSD__) && !defined(__Userspace__) 6260 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 6261 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n", 6262 m->m_pkthdr.len, 6263 if_name(m->m_pkthdr.rcvif), 6264 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 6265 #endif 6266 #if defined(__APPLE__) && !defined(__Userspace__) 6267 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 6268 "sctp_input(): Packet of length %d received on %s%d with csum_flags 0x%x.\n", 6269 m->m_pkthdr.len, 6270 m->m_pkthdr.rcvif->if_name, 6271 m->m_pkthdr.rcvif->if_unit, 6272 m->m_pkthdr.csum_flags); 6273 #endif 6274 #if defined(_WIN32) && !defined(__Userspace__) 6275 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 6276 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n", 6277 m->m_pkthdr.len, 6278 m->m_pkthdr.rcvif->if_xname, 6279 m->m_pkthdr.csum_flags); 6280 #endif 6281 #if defined(__FreeBSD__) && !defined(__Userspace__) 6282 mflowid = m->m_pkthdr.flowid; 6283 mflowtype = M_HASHTYPE_GET(m); 6284 fibnum = M_GETFIB(m); 6285 #endif 6286 SCTP_STAT_INCR(sctps_recvpackets); 6287 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 6288 /* Get IP, SCTP, and first chunk header together in the first mbuf. */ 6289 offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 6290 if (SCTP_BUF_LEN(m) < offset) { 6291 if ((m = m_pullup(m, offset)) == NULL) { 6292 SCTP_STAT_INCR(sctps_hdrops); 6293 return; 6294 } 6295 } 6296 ip = mtod(m, struct ip *); 6297 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 6298 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr)); 6299 offset -= sizeof(struct sctp_chunkhdr); 6300 memset(&src, 0, sizeof(struct sockaddr_in)); 6301 src.sin_family = AF_INET; 6302 #ifdef HAVE_SIN_LEN 6303 src.sin_len = sizeof(struct sockaddr_in); 6304 #endif 6305 src.sin_port = sh->src_port; 6306 src.sin_addr = ip->ip_src; 6307 memset(&dst, 0, sizeof(struct sockaddr_in)); 6308 dst.sin_family = AF_INET; 6309 #ifdef HAVE_SIN_LEN 6310 dst.sin_len = sizeof(struct sockaddr_in); 6311 #endif 6312 dst.sin_port = sh->dest_port; 6313 dst.sin_addr = ip->ip_dst; 6314 #if defined(_WIN32) && !defined(__Userspace__) 6315 NTOHS(ip->ip_len); 6316 #endif 6317 #if defined(__linux__) || (defined(_WIN32) && defined(__Userspace__)) 6318 ip->ip_len = ntohs(ip->ip_len); 6319 #endif 6320 #if defined(__Userspace__) 6321 #if defined(__linux__) || defined(_WIN32) 6322 length = ip->ip_len; 6323 #else 6324 length = ip->ip_len + iphlen; 6325 #endif 6326 #elif defined(__FreeBSD__) 6327 length = ntohs(ip->ip_len); 6328 #elif defined(__APPLE__) 6329 length = ip->ip_len + iphlen; 6330 #else 6331 length = ip->ip_len; 6332 #endif 6333 /* Validate mbuf chain length with IP payload length. */ 6334 if (SCTP_HEADER_LEN(m) != length) { 6335 SCTPDBG(SCTP_DEBUG_INPUT1, 6336 "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m)); 6337 SCTP_STAT_INCR(sctps_hdrops); 6338 goto out; 6339 } 6340 /* SCTP does not allow broadcasts or multicasts */ 6341 if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) { 6342 goto out; 6343 } 6344 if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) { 6345 goto out; 6346 } 6347 ecn_bits = ip->ip_tos; 6348 #if defined(__FreeBSD__) && !defined(__Userspace__) 6349 if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) { 6350 SCTP_STAT_INCR(sctps_recvhwcrc); 6351 compute_crc = 0; 6352 } else { 6353 #else 6354 if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && 6355 ((src.sin_addr.s_addr == dst.sin_addr.s_addr) || 6356 (SCTP_IS_IT_LOOPBACK(m)))) { 6357 SCTP_STAT_INCR(sctps_recvhwcrc); 6358 compute_crc = 0; 6359 } else { 6360 #endif 6361 SCTP_STAT_INCR(sctps_recvswcrc); 6362 compute_crc = 1; 6363 } 6364 sctp_common_input_processing(&m, iphlen, offset, length, 6365 (struct sockaddr *)&src, 6366 (struct sockaddr *)&dst, 6367 sh, ch, 6368 compute_crc, 6369 ecn_bits, 6370 #if defined(__FreeBSD__) && !defined(__Userspace__) 6371 mflowtype, mflowid, fibnum, 6372 #endif 6373 vrf_id, port); 6374 out: 6375 if (m) { 6376 sctp_m_freem(m); 6377 } 6378 return; 6379 } 6380 6381 #if defined(__FreeBSD__) && !defined(__Userspace__) 6382 #if defined(SCTP_MCORE_INPUT) && defined(SMP) 6383 extern int *sctp_cpuarry; 6384 #endif 6385 #endif 6386 6387 #if defined(__FreeBSD__) && !defined(__Userspace__) 6388 int 6389 sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED) 6390 { 6391 struct mbuf *m; 6392 int off; 6393 6394 m = *mp; 6395 off = *offp; 6396 #else 6397 void 6398 sctp_input(struct mbuf *m, int off) 6399 { 6400 #endif 6401 #if defined(__FreeBSD__) && !defined(__Userspace__) 6402 #if defined(SCTP_MCORE_INPUT) && defined(SMP) 6403 if (mp_ncpus > 1) { 6404 struct ip *ip; 6405 struct sctphdr *sh; 6406 int offset; 6407 int cpu_to_use; 6408 uint32_t flowid, tag; 6409 6410 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 6411 flowid = m->m_pkthdr.flowid; 6412 } else { 6413 /* No flow id built by lower layers 6414 * fix it so we create one. 6415 */ 6416 offset = off + sizeof(struct sctphdr); 6417 if (SCTP_BUF_LEN(m) < offset) { 6418 if ((m = m_pullup(m, offset)) == NULL) { 6419 SCTP_STAT_INCR(sctps_hdrops); 6420 return (IPPROTO_DONE); 6421 } 6422 } 6423 ip = mtod(m, struct ip *); 6424 sh = (struct sctphdr *)((caddr_t)ip + off); 6425 tag = htonl(sh->v_tag); 6426 flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port); 6427 m->m_pkthdr.flowid = flowid; 6428 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE_HASH); 6429 } 6430 cpu_to_use = sctp_cpuarry[flowid % mp_ncpus]; 6431 sctp_queue_to_mcore(m, off, cpu_to_use); 6432 return (IPPROTO_DONE); 6433 } 6434 #endif 6435 #endif 6436 sctp_input_with_port(m, off, 0); 6437 #if defined(__FreeBSD__) && !defined(__Userspace__) 6438 return (IPPROTO_DONE); 6439 #endif 6440 } 6441 #endif 6442 #endif