sctp_timer.c (51008B)
1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #define _IP_VHL 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_pcb.h> 38 #ifdef INET6 39 #if defined(__FreeBSD__) && defined(__Userspace__) 40 #include <netinet6/sctp6_var.h> 41 #endif 42 #endif 43 #include <netinet/sctp_var.h> 44 #include <netinet/sctp_sysctl.h> 45 #include <netinet/sctp_timer.h> 46 #include <netinet/sctputil.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_header.h> 49 #include <netinet/sctp_indata.h> 50 #include <netinet/sctp_asconf.h> 51 #include <netinet/sctp_input.h> 52 #include <netinet/sctp.h> 53 #include <netinet/sctp_uio.h> 54 #if defined(INET) || defined(INET6) 55 #if !(defined(_WIN32) && defined(__Userspace__)) 56 #include <netinet/udp.h> 57 #endif 58 #endif 59 60 void 61 sctp_audit_retranmission_queue(struct sctp_association *asoc) 62 { 63 struct sctp_tmit_chunk *chk; 64 65 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n", 66 asoc->sent_queue_retran_cnt, 67 asoc->sent_queue_cnt); 68 asoc->sent_queue_retran_cnt = 0; 69 asoc->sent_queue_cnt = 0; 70 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 71 if (chk->sent == SCTP_DATAGRAM_RESEND) { 72 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 73 } 74 asoc->sent_queue_cnt++; 75 } 76 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 77 if (chk->sent == SCTP_DATAGRAM_RESEND) { 78 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 79 } 80 } 81 TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) { 82 if (chk->sent == SCTP_DATAGRAM_RESEND) { 83 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 84 } 85 } 86 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n", 87 asoc->sent_queue_retran_cnt, 88 asoc->sent_queue_cnt); 89 } 90 91 static int 92 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 93 struct sctp_nets *net, uint16_t threshold) 94 { 95 KASSERT(stcb != NULL, ("stcb is NULL")); 96 SCTP_TCB_LOCK_ASSERT(stcb); 97 98 if (net != NULL) { 99 net->error_count++; 100 SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n", 101 (void *)net, net->error_count, 102 net->failure_threshold); 103 if (net->error_count > net->failure_threshold) { 104 /* We had a threshold failure */ 105 if (net->dest_state & SCTP_ADDR_REACHABLE) { 106 net->dest_state &= ~SCTP_ADDR_REACHABLE; 107 net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 108 net->dest_state &= ~SCTP_ADDR_PF; 109 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 110 stcb, 0, 111 (void *)net, SCTP_SO_NOT_LOCKED); 112 } 113 } else if ((net->pf_threshold < net->failure_threshold) && 114 (net->error_count > net->pf_threshold)) { 115 if ((net->dest_state & SCTP_ADDR_PF) == 0) { 116 net->dest_state |= SCTP_ADDR_PF; 117 net->last_active = sctp_get_tick_count(); 118 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 119 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 120 inp, stcb, net, 121 SCTP_FROM_SCTP_TIMER + SCTP_LOC_1); 122 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 123 } 124 } 125 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { 126 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 127 sctp_misc_ints(SCTP_THRESHOLD_INCR, 128 stcb->asoc.overall_error_count, 129 (stcb->asoc.overall_error_count+1), 130 SCTP_FROM_SCTP_TIMER, 131 __LINE__); 132 } 133 stcb->asoc.overall_error_count++; 134 } 135 } else { 136 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 137 sctp_misc_ints(SCTP_THRESHOLD_INCR, 138 stcb->asoc.overall_error_count, 139 (stcb->asoc.overall_error_count+1), 140 SCTP_FROM_SCTP_TIMER, 141 __LINE__); 142 } 143 stcb->asoc.overall_error_count++; 144 } 145 SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n", 146 (void *)&stcb->asoc, stcb->asoc.overall_error_count, 147 (uint32_t)threshold, 148 ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state)); 149 /* 150 * We specifically do not do >= to give the assoc one more change 151 * before we fail it. 152 */ 153 if (stcb->asoc.overall_error_count > threshold) { 154 /* Abort notification sends a ULP notify */ 155 struct mbuf *op_err; 156 157 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 158 "Association error counter exceeded"); 159 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_2; 160 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); 161 return (1); 162 } 163 return (0); 164 } 165 166 /* 167 * sctp_find_alternate_net() returns a non-NULL pointer as long as there 168 * exists nets, which are not being deleted. 169 */ 170 struct sctp_nets * 171 sctp_find_alternate_net(struct sctp_tcb *stcb, 172 struct sctp_nets *net, 173 int mode) 174 { 175 /* Find and return an alternate network if possible */ 176 struct sctp_nets *alt, *mnet, *min_errors_net = NULL , *max_cwnd_net = NULL; 177 bool looped; 178 /* JRS 5/14/07 - Initialize min_errors to an impossible value. */ 179 int min_errors = -1; 180 uint32_t max_cwnd = 0; 181 182 if (stcb->asoc.numnets == 1) { 183 /* No selection can be made. */ 184 return (TAILQ_FIRST(&stcb->asoc.nets)); 185 } 186 /* 187 * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate net algorithm. 188 * This algorithm chooses the active destination (not in PF state) with the largest 189 * cwnd value. If all destinations are in PF state, unreachable, or unconfirmed, choose 190 * the destination that is in PF state with the lowest error count. In case of a tie, 191 * choose the destination that was most recently active. 192 */ 193 if (mode == 2) { 194 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 195 /* JRS 5/14/07 - If the destination is unreachable or unconfirmed, skip it. */ 196 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 197 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 198 continue; 199 } 200 /* 201 * JRS 5/14/07 - If the destination is reachable but in PF state, compare 202 * the error count of the destination to the minimum error count seen thus far. 203 * Store the destination with the lower error count. If the error counts are 204 * equal, store the destination that was most recently active. 205 */ 206 if (mnet->dest_state & SCTP_ADDR_PF) { 207 /* 208 * JRS 5/14/07 - If the destination under consideration is the current 209 * destination, work as if the error count is one higher. The 210 * actual error count will not be incremented until later in the 211 * t3 handler. 212 */ 213 if (mnet == net) { 214 if (min_errors == -1) { 215 min_errors = mnet->error_count + 1; 216 min_errors_net = mnet; 217 } else if (mnet->error_count + 1 < min_errors) { 218 min_errors = mnet->error_count + 1; 219 min_errors_net = mnet; 220 } else if (mnet->error_count + 1 == min_errors 221 && mnet->last_active > min_errors_net->last_active) { 222 min_errors_net = mnet; 223 min_errors = mnet->error_count + 1; 224 } 225 continue; 226 } else { 227 if (min_errors == -1) { 228 min_errors = mnet->error_count; 229 min_errors_net = mnet; 230 } else if (mnet->error_count < min_errors) { 231 min_errors = mnet->error_count; 232 min_errors_net = mnet; 233 } else if (mnet->error_count == min_errors 234 && mnet->last_active > min_errors_net->last_active) { 235 min_errors_net = mnet; 236 min_errors = mnet->error_count; 237 } 238 continue; 239 } 240 } 241 /* 242 * JRS 5/14/07 - If the destination is reachable and not in PF state, compare the 243 * cwnd of the destination to the highest cwnd seen thus far. Store the 244 * destination with the higher cwnd value. If the cwnd values are equal, 245 * randomly choose one of the two destinations. 246 */ 247 if (max_cwnd < mnet->cwnd) { 248 max_cwnd_net = mnet; 249 max_cwnd = mnet->cwnd; 250 } else if (max_cwnd == mnet->cwnd) { 251 uint32_t rndval; 252 uint8_t this_random; 253 254 if (stcb->asoc.hb_random_idx > 3) { 255 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 256 memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values)); 257 this_random = stcb->asoc.hb_random_values[0]; 258 stcb->asoc.hb_random_idx++; 259 stcb->asoc.hb_ect_randombit = 0; 260 } else { 261 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 262 stcb->asoc.hb_random_idx++; 263 stcb->asoc.hb_ect_randombit = 0; 264 } 265 if (this_random % 2 == 1) { 266 max_cwnd_net = mnet; 267 max_cwnd = mnet->cwnd; /* Useless? */ 268 } 269 } 270 } 271 if (max_cwnd_net == NULL) { 272 if (min_errors_net == NULL) { 273 return (net); 274 } 275 return (min_errors_net); 276 } else { 277 return (max_cwnd_net); 278 } 279 } /* JRS 5/14/07 - If mode is set to 1, use the CMT policy for choosing an alternate net. */ 280 else if (mode == 1) { 281 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 282 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 283 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 284 /* 285 * will skip ones that are not-reachable or 286 * unconfirmed 287 */ 288 continue; 289 } 290 if (max_cwnd < mnet->cwnd) { 291 max_cwnd_net = mnet; 292 max_cwnd = mnet->cwnd; 293 } else if (max_cwnd == mnet->cwnd) { 294 uint32_t rndval; 295 uint8_t this_random; 296 297 if (stcb->asoc.hb_random_idx > 3) { 298 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 299 memcpy(stcb->asoc.hb_random_values, &rndval, 300 sizeof(stcb->asoc.hb_random_values)); 301 this_random = stcb->asoc.hb_random_values[0]; 302 stcb->asoc.hb_random_idx = 0; 303 stcb->asoc.hb_ect_randombit = 0; 304 } else { 305 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 306 stcb->asoc.hb_random_idx++; 307 stcb->asoc.hb_ect_randombit = 0; 308 } 309 if (this_random % 2) { 310 max_cwnd_net = mnet; 311 max_cwnd = mnet->cwnd; 312 } 313 } 314 } 315 if (max_cwnd_net) { 316 return (max_cwnd_net); 317 } 318 } 319 /* Look for an alternate net, which is active. */ 320 if ((net != NULL) && ((net->dest_state & SCTP_ADDR_BEING_DELETED) == 0)) { 321 alt = TAILQ_NEXT(net, sctp_next); 322 } else { 323 alt = TAILQ_FIRST(&stcb->asoc.nets); 324 } 325 looped = false; 326 for (;;) { 327 if (alt == NULL) { 328 if (!looped) { 329 alt = TAILQ_FIRST(&stcb->asoc.nets); 330 looped = true; 331 } 332 /* Definitely out of candidates. */ 333 if (alt == NULL) { 334 break; 335 } 336 } 337 #if defined(__FreeBSD__) && !defined(__Userspace__) 338 if (alt->ro.ro_nh == NULL) { 339 #else 340 if (alt->ro.ro_rt == NULL) { 341 #endif 342 if (alt->ro._s_addr) { 343 sctp_free_ifa(alt->ro._s_addr); 344 alt->ro._s_addr = NULL; 345 } 346 alt->src_addr_selected = 0; 347 } 348 if (((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && 349 #if defined(__FreeBSD__) && !defined(__Userspace__) 350 (alt->ro.ro_nh != NULL) && 351 #else 352 (alt->ro.ro_rt != NULL) && 353 #endif 354 ((alt->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) && 355 (alt != net)) { 356 /* Found an alternate net, which is reachable. */ 357 break; 358 } 359 alt = TAILQ_NEXT(alt, sctp_next); 360 } 361 362 if (alt == NULL) { 363 /* 364 * In case no active alternate net has been found, look for 365 * an alternate net, which is confirmed. 366 */ 367 if ((net != NULL) && ((net->dest_state & SCTP_ADDR_BEING_DELETED) == 0)) { 368 alt = TAILQ_NEXT(net, sctp_next); 369 } else { 370 alt = TAILQ_FIRST(&stcb->asoc.nets); 371 } 372 looped = false; 373 for (;;) { 374 if (alt == NULL) { 375 if (!looped) { 376 alt = TAILQ_FIRST(&stcb->asoc.nets); 377 looped = true; 378 } 379 /* Definitely out of candidates. */ 380 if (alt == NULL) { 381 break; 382 } 383 } 384 if (((alt->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) && 385 (alt != net)) { 386 /* Found an alternate net, which is confirmed. */ 387 break; 388 } 389 alt = TAILQ_NEXT(alt, sctp_next); 390 } 391 } 392 if (alt == NULL) { 393 /* 394 * In case no confirmed alternate net has been found, just 395 * return net, if it is not being deleted. In the other case 396 * just return the first net. 397 */ 398 if ((net != NULL) && ((net->dest_state & SCTP_ADDR_BEING_DELETED) == 0)) { 399 alt = net; 400 } 401 if (alt == NULL) { 402 alt = TAILQ_FIRST(&stcb->asoc.nets); 403 } 404 } 405 return (alt); 406 } 407 408 static void 409 sctp_backoff_on_timeout(struct sctp_tcb *stcb, 410 struct sctp_nets *net, 411 int win_probe, 412 int num_marked, int num_abandoned) 413 { 414 if (net->RTO == 0) { 415 if (net->RTO_measured) { 416 net->RTO = stcb->asoc.minrto; 417 } else { 418 net->RTO = stcb->asoc.initial_rto; 419 } 420 } 421 net->RTO <<= 1; 422 if (net->RTO > stcb->asoc.maxrto) { 423 net->RTO = stcb->asoc.maxrto; 424 } 425 if ((win_probe == 0) && (num_marked || num_abandoned)) { 426 /* We don't apply penalty to window probe scenarios */ 427 /* JRS - Use the congestion control given in the CC module */ 428 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net); 429 } 430 } 431 432 #ifndef INVARIANTS 433 static void 434 sctp_recover_sent_list(struct sctp_tcb *stcb) 435 { 436 struct sctp_tmit_chunk *chk, *nchk; 437 struct sctp_association *asoc; 438 439 asoc = &stcb->asoc; 440 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 441 if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.tsn)) { 442 SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n", 443 (void *)chk, chk->rec.data.tsn, asoc->last_acked_seq); 444 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 445 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 446 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 447 } 448 } 449 if ((asoc->strmout[chk->rec.data.sid].chunks_on_queues == 0) && 450 (asoc->strmout[chk->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 451 TAILQ_EMPTY(&asoc->strmout[chk->rec.data.sid].outqueue)) { 452 asoc->trigger_reset = 1; 453 } 454 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 455 if (PR_SCTP_ENABLED(chk->flags)) { 456 if (asoc->pr_sctp_cnt != 0) 457 asoc->pr_sctp_cnt--; 458 } 459 if (chk->data) { 460 /*sa_ignore NO_NULL_CHK*/ 461 sctp_free_bufspace(stcb, asoc, chk, 1); 462 sctp_m_freem(chk->data); 463 chk->data = NULL; 464 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(chk->flags)) { 465 asoc->sent_queue_cnt_removeable--; 466 } 467 } 468 asoc->sent_queue_cnt--; 469 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 470 } 471 } 472 SCTP_PRINTF("after recover order is as follows\n"); 473 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 474 SCTP_PRINTF("chk:%p TSN:%x\n", (void *)chk, chk->rec.data.tsn); 475 } 476 } 477 #endif 478 479 static int 480 sctp_mark_all_for_resend(struct sctp_tcb *stcb, 481 struct sctp_nets *net, 482 struct sctp_nets *alt, 483 int window_probe, 484 int *num_marked, 485 int *num_abandoned) 486 { 487 488 /* 489 * Mark all chunks (well not all) that were sent to *net for 490 * retransmission. Move them to alt for there destination as well... 491 * We only mark chunks that have been outstanding long enough to 492 * have received feed-back. 493 */ 494 struct sctp_tmit_chunk *chk, *nchk; 495 struct sctp_nets *lnets; 496 struct timeval now, min_wait, tv; 497 int cur_rto; 498 int cnt_abandoned; 499 int audit_tf, num_mk, fir; 500 unsigned int cnt_mk; 501 uint32_t orig_flight, orig_tf; 502 uint32_t tsnlast, tsnfirst; 503 #ifndef INVARIANTS 504 int recovery_cnt = 0; 505 #endif 506 507 /* none in flight now */ 508 audit_tf = 0; 509 fir = 0; 510 /* 511 * figure out how long a data chunk must be pending before we can 512 * mark it .. 513 */ 514 (void)SCTP_GETTIME_TIMEVAL(&now); 515 /* get cur rto in micro-seconds */ 516 cur_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 517 cur_rto *= 1000; 518 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 519 sctp_log_fr(cur_rto, 520 stcb->asoc.peers_rwnd, 521 window_probe, 522 SCTP_FR_T3_MARK_TIME); 523 sctp_log_fr(net->flight_size, 0, 0, SCTP_FR_CWND_REPORT); 524 sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT); 525 } 526 tv.tv_sec = cur_rto / 1000000; 527 tv.tv_usec = cur_rto % 1000000; 528 #if !(defined(__FreeBSD__) && !defined(__Userspace__)) 529 timersub(&now, &tv, &min_wait); 530 #else 531 min_wait = now; 532 timevalsub(&min_wait, &tv); 533 #endif 534 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 535 /* 536 * if we hit here, we don't have enough seconds on the clock 537 * to account for the RTO. We just let the lower seconds be 538 * the bounds and don't worry about it. This may mean we 539 * will mark a lot more than we should. 540 */ 541 min_wait.tv_sec = min_wait.tv_usec = 0; 542 } 543 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 544 sctp_log_fr(cur_rto, (uint32_t)now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); 545 sctp_log_fr(0, (uint32_t)min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); 546 } 547 /* 548 * Our rwnd will be incorrect here since we are not adding back the 549 * cnt * mbuf but we will fix that down below. 550 */ 551 orig_flight = net->flight_size; 552 orig_tf = stcb->asoc.total_flight; 553 554 net->fast_retran_ip = 0; 555 /* Now on to each chunk */ 556 cnt_abandoned = 0; 557 num_mk = cnt_mk = 0; 558 tsnfirst = tsnlast = 0; 559 #ifndef INVARIANTS 560 start_again: 561 #endif 562 TAILQ_FOREACH_SAFE(chk, &stcb->asoc.sent_queue, sctp_next, nchk) { 563 if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.tsn)) { 564 /* Strange case our list got out of order? */ 565 SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x\n", 566 (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.tsn); 567 #ifdef INVARIANTS 568 panic("last acked >= chk on sent-Q"); 569 #else 570 recovery_cnt++; 571 SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt); 572 sctp_recover_sent_list(stcb); 573 if (recovery_cnt < 10) { 574 goto start_again; 575 } else { 576 SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt); 577 } 578 #endif 579 } 580 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { 581 /* 582 * found one to mark: If it is less than 583 * DATAGRAM_ACKED it MUST not be a skipped or marked 584 * TSN but instead one that is either already set 585 * for retransmission OR one that needs 586 * retransmission. 587 */ 588 589 /* validate its been outstanding long enough */ 590 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 591 sctp_log_fr(chk->rec.data.tsn, 592 (uint32_t)chk->sent_rcv_time.tv_sec, 593 chk->sent_rcv_time.tv_usec, 594 SCTP_FR_T3_MARK_TIME); 595 } 596 if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) { 597 /* 598 * we have reached a chunk that was sent 599 * some seconds past our min.. forget it we 600 * will find no more to send. 601 */ 602 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 603 sctp_log_fr(0, 604 (uint32_t)chk->sent_rcv_time.tv_sec, 605 chk->sent_rcv_time.tv_usec, 606 SCTP_FR_T3_STOPPED); 607 } 608 continue; 609 } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) && 610 (window_probe == 0)) { 611 /* 612 * we must look at the micro seconds to 613 * know. 614 */ 615 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 616 /* 617 * ok it was sent after our boundary 618 * time. 619 */ 620 continue; 621 } 622 } 623 if (stcb->asoc.prsctp_supported && PR_SCTP_TTL_ENABLED(chk->flags)) { 624 /* Is it expired? */ 625 #if !(defined(__FreeBSD__) && !defined(__Userspace__)) 626 if (timercmp(&now, &chk->rec.data.timetodrop, >)) { 627 #else 628 if (timevalcmp(&now, &chk->rec.data.timetodrop, >)) { 629 #endif 630 /* Yes so drop it */ 631 if (chk->data) { 632 (void)sctp_release_pr_sctp_chunk(stcb, 633 chk, 634 1, 635 SCTP_SO_NOT_LOCKED); 636 cnt_abandoned++; 637 } 638 continue; 639 } 640 } 641 if (stcb->asoc.prsctp_supported && PR_SCTP_RTX_ENABLED(chk->flags)) { 642 /* Has it been retransmitted tv_sec times? */ 643 if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) { 644 if (chk->data) { 645 (void)sctp_release_pr_sctp_chunk(stcb, 646 chk, 647 1, 648 SCTP_SO_NOT_LOCKED); 649 cnt_abandoned++; 650 } 651 continue; 652 } 653 } 654 if (chk->sent < SCTP_DATAGRAM_RESEND) { 655 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 656 num_mk++; 657 if (fir == 0) { 658 fir = 1; 659 tsnfirst = chk->rec.data.tsn; 660 } 661 tsnlast = chk->rec.data.tsn; 662 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 663 sctp_log_fr(chk->rec.data.tsn, chk->snd_count, 664 0, SCTP_FR_T3_MARKED); 665 } 666 667 if (chk->rec.data.chunk_was_revoked) { 668 /* deflate the cwnd */ 669 chk->whoTo->cwnd -= chk->book_size; 670 chk->rec.data.chunk_was_revoked = 0; 671 } 672 net->marked_retrans++; 673 stcb->asoc.marked_retrans++; 674 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 675 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO, 676 chk->whoTo->flight_size, 677 chk->book_size, 678 (uint32_t)(uintptr_t)chk->whoTo, 679 chk->rec.data.tsn); 680 } 681 sctp_flight_size_decrease(chk); 682 sctp_total_flight_decrease(stcb, chk); 683 stcb->asoc.peers_rwnd += chk->send_size; 684 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 685 } 686 chk->sent = SCTP_DATAGRAM_RESEND; 687 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 688 SCTP_STAT_INCR(sctps_markedretrans); 689 690 /* reset the TSN for striking and other FR stuff */ 691 chk->rec.data.doing_fast_retransmit = 0; 692 /* Clear any time so NO RTT is being done */ 693 694 if (chk->do_rtt) { 695 if (chk->whoTo->rto_needed == 0) { 696 chk->whoTo->rto_needed = 1; 697 } 698 } 699 chk->do_rtt = 0; 700 if (alt != net) { 701 sctp_free_remote_addr(chk->whoTo); 702 chk->no_fr_allowed = 1; 703 chk->whoTo = alt; 704 atomic_add_int(&alt->ref_count, 1); 705 } else { 706 chk->no_fr_allowed = 0; 707 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 708 chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 709 } else { 710 chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn; 711 } 712 } 713 /* CMT: Do not allow FRs on retransmitted TSNs. 714 */ 715 if (stcb->asoc.sctp_cmt_on_off > 0) { 716 chk->no_fr_allowed = 1; 717 } 718 #ifdef THIS_SHOULD_NOT_BE_DONE 719 } else if (chk->sent == SCTP_DATAGRAM_ACKED) { 720 /* remember highest acked one */ 721 could_be_sent = chk; 722 #endif 723 } 724 if (chk->sent == SCTP_DATAGRAM_RESEND) { 725 cnt_mk++; 726 } 727 } 728 if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) { 729 /* we did not subtract the same things? */ 730 audit_tf = 1; 731 } 732 733 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 734 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); 735 } 736 #ifdef SCTP_DEBUG 737 if (num_mk) { 738 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 739 tsnlast); 740 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%u\n", 741 num_mk, 742 stcb->asoc.peers_rwnd); 743 } 744 #endif 745 *num_marked = num_mk; 746 *num_abandoned = cnt_abandoned; 747 /* Now check for a ECN Echo that may be stranded And 748 * include the cnt_mk'd to have all resends in the 749 * control queue. 750 */ 751 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 752 if (chk->sent == SCTP_DATAGRAM_RESEND) { 753 cnt_mk++; 754 } 755 if ((chk->whoTo == net) && 756 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 757 sctp_free_remote_addr(chk->whoTo); 758 chk->whoTo = alt; 759 if (chk->sent != SCTP_DATAGRAM_RESEND) { 760 chk->sent = SCTP_DATAGRAM_RESEND; 761 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 762 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 763 cnt_mk++; 764 } 765 atomic_add_int(&alt->ref_count, 1); 766 } 767 } 768 #ifdef THIS_SHOULD_NOT_BE_DONE 769 if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) { 770 /* fix it so we retransmit the highest acked anyway */ 771 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 772 cnt_mk++; 773 could_be_sent->sent = SCTP_DATAGRAM_RESEND; 774 } 775 #endif 776 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { 777 #ifdef INVARIANTS 778 SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n", 779 cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk); 780 #endif 781 #ifndef SCTP_AUDITING_ENABLED 782 stcb->asoc.sent_queue_retran_cnt = cnt_mk; 783 #endif 784 } 785 if (audit_tf) { 786 SCTPDBG(SCTP_DEBUG_TIMER4, 787 "Audit total flight due to negative value net:%p\n", 788 (void *)net); 789 stcb->asoc.total_flight = 0; 790 stcb->asoc.total_flight_count = 0; 791 /* Clear all networks flight size */ 792 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { 793 lnets->flight_size = 0; 794 SCTPDBG(SCTP_DEBUG_TIMER4, 795 "Net:%p c-f cwnd:%d ssthresh:%d\n", 796 (void *)lnets, lnets->cwnd, lnets->ssthresh); 797 } 798 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 799 if (chk->sent < SCTP_DATAGRAM_RESEND) { 800 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 801 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 802 chk->whoTo->flight_size, 803 chk->book_size, 804 (uint32_t)(uintptr_t)chk->whoTo, 805 chk->rec.data.tsn); 806 } 807 808 sctp_flight_size_increase(chk); 809 sctp_total_flight_increase(stcb, chk); 810 } 811 } 812 } 813 /* We return 1 if we only have a window probe outstanding */ 814 return (0); 815 } 816 817 int 818 sctp_t3rxt_timer(struct sctp_inpcb *inp, 819 struct sctp_tcb *stcb, 820 struct sctp_nets *net) 821 { 822 struct sctp_nets *alt; 823 int win_probe, num_mk, num_abandoned; 824 825 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 826 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); 827 } 828 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 829 struct sctp_nets *lnet; 830 831 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 832 if (net == lnet) { 833 sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3); 834 } else { 835 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3); 836 } 837 } 838 } 839 /* Find an alternate and mark those for retransmission */ 840 if ((stcb->asoc.peers_rwnd == 0) && 841 (stcb->asoc.total_flight < net->mtu)) { 842 SCTP_STAT_INCR(sctps_timowindowprobe); 843 win_probe = 1; 844 } else { 845 win_probe = 0; 846 } 847 848 if (win_probe == 0) { 849 /* We don't do normal threshold management on window probes */ 850 if (sctp_threshold_management(inp, stcb, net, 851 stcb->asoc.max_send_times)) { 852 /* Association was destroyed */ 853 return (1); 854 } else { 855 if (net != stcb->asoc.primary_destination) { 856 /* send a immediate HB if our RTO is stale */ 857 struct timeval now; 858 uint32_t ms_goneby; 859 860 (void)SCTP_GETTIME_TIMEVAL(&now); 861 if (net->last_sent_time.tv_sec) { 862 ms_goneby = (uint32_t)(now.tv_sec - net->last_sent_time.tv_sec) * 1000; 863 } else { 864 ms_goneby = 0; 865 } 866 if ((net->dest_state & SCTP_ADDR_PF) == 0) { 867 if ((ms_goneby > net->RTO) || (net->RTO == 0)) { 868 /* 869 * no recent feed back in an RTO or 870 * more, request a RTT update 871 */ 872 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 873 } 874 } 875 } 876 } 877 } else { 878 /* 879 * For a window probe we don't penalize the net's but only 880 * the association. This may fail it if SACKs are not coming 881 * back. If sack's are coming with rwnd locked at 0, we will 882 * continue to hold things waiting for rwnd to raise 883 */ 884 if (sctp_threshold_management(inp, stcb, NULL, 885 stcb->asoc.max_send_times)) { 886 /* Association was destroyed */ 887 return (1); 888 } 889 } 890 if (stcb->asoc.sctp_cmt_on_off > 0) { 891 if (net->pf_threshold < net->failure_threshold) { 892 alt = sctp_find_alternate_net(stcb, net, 2); 893 } else { 894 /* 895 * CMT: Using RTX_SSTHRESH policy for CMT. 896 * If CMT is being used, then pick dest with 897 * largest ssthresh for any retransmission. 898 */ 899 alt = sctp_find_alternate_net(stcb, net, 1); 900 /* 901 * CUCv2: If a different dest is picked for 902 * the retransmission, then new 903 * (rtx-)pseudo_cumack needs to be tracked 904 * for orig dest. Let CUCv2 track new (rtx-) 905 * pseudo-cumack always. 906 */ 907 net->find_pseudo_cumack = 1; 908 net->find_rtx_pseudo_cumack = 1; 909 } 910 } else { 911 alt = sctp_find_alternate_net(stcb, net, 0); 912 } 913 914 num_mk = 0; 915 num_abandoned = 0; 916 (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, 917 &num_mk, &num_abandoned); 918 /* FR Loss recovery just ended with the T3. */ 919 stcb->asoc.fast_retran_loss_recovery = 0; 920 921 /* CMT FR loss recovery ended with the T3 */ 922 net->fast_retran_loss_recovery = 0; 923 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) && 924 (net->flight_size == 0)) { 925 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net); 926 } 927 928 /* 929 * setup the sat loss recovery that prevents satellite cwnd advance. 930 */ 931 stcb->asoc.sat_t3_loss_recovery = 1; 932 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; 933 934 /* Backoff the timer and cwnd */ 935 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned); 936 if (((net->dest_state & SCTP_ADDR_REACHABLE) == 0) || 937 (net->dest_state & SCTP_ADDR_PF)) { 938 /* Move all pending over too */ 939 sctp_move_chunks_from_net(stcb, net); 940 941 /* Get the address that failed, to 942 * force a new src address selection and 943 * a route allocation. 944 */ 945 if (net->ro._s_addr != NULL) { 946 sctp_free_ifa(net->ro._s_addr); 947 net->ro._s_addr = NULL; 948 } 949 net->src_addr_selected = 0; 950 951 /* Force a route allocation too */ 952 #if defined(__FreeBSD__) && !defined(__Userspace__) 953 RO_NHFREE(&net->ro); 954 #else 955 if (net->ro.ro_rt != NULL) { 956 RTFREE(net->ro.ro_rt); 957 net->ro.ro_rt = NULL; 958 } 959 #endif 960 961 /* Was it our primary? */ 962 if ((stcb->asoc.primary_destination == net) && (alt != net)) { 963 /* 964 * Yes, note it as such and find an alternate note: 965 * this means HB code must use this to resent the 966 * primary if it goes active AND if someone does a 967 * change-primary then this flag must be cleared 968 * from any net structures. 969 */ 970 if (stcb->asoc.alternate != NULL) { 971 sctp_free_remote_addr(stcb->asoc.alternate); 972 } 973 stcb->asoc.alternate = alt; 974 atomic_add_int(&stcb->asoc.alternate->ref_count, 1); 975 } 976 } 977 /* 978 * Special case for cookie-echo'ed case, we don't do output but must 979 * await the COOKIE-ACK before retransmission 980 */ 981 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) { 982 /* 983 * Here we just reset the timer and start again since we 984 * have not established the asoc 985 */ 986 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 987 return (0); 988 } 989 if (stcb->asoc.prsctp_supported) { 990 struct sctp_tmit_chunk *lchk; 991 992 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); 993 /* C3. See if we need to send a Fwd-TSN */ 994 if (SCTP_TSN_GT(stcb->asoc.advanced_peer_ack_point, stcb->asoc.last_acked_seq)) { 995 send_forward_tsn(stcb, &stcb->asoc); 996 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { 997 if (lchk->whoTo != NULL) { 998 break; 999 } 1000 } 1001 if (lchk != NULL) { 1002 /* Assure a timer is up */ 1003 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); 1004 } 1005 } 1006 } 1007 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1008 sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX); 1009 } 1010 return (0); 1011 } 1012 1013 int 1014 sctp_t1init_timer(struct sctp_inpcb *inp, 1015 struct sctp_tcb *stcb, 1016 struct sctp_nets *net) 1017 { 1018 /* bump the thresholds */ 1019 if (stcb->asoc.delayed_connection) { 1020 /* 1021 * special hook for delayed connection. The library did NOT 1022 * complete the rest of its sends. 1023 */ 1024 stcb->asoc.delayed_connection = 0; 1025 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1026 return (0); 1027 } 1028 if (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT) { 1029 return (0); 1030 } 1031 if (sctp_threshold_management(inp, stcb, net, 1032 stcb->asoc.max_init_times)) { 1033 /* Association was destroyed */ 1034 return (1); 1035 } 1036 stcb->asoc.dropped_special_cnt = 0; 1037 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0); 1038 if (stcb->asoc.initial_init_rto_max < net->RTO) { 1039 net->RTO = stcb->asoc.initial_init_rto_max; 1040 } 1041 if (stcb->asoc.numnets > 1) { 1042 /* If we have more than one addr use it */ 1043 struct sctp_nets *alt; 1044 1045 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0); 1046 if (alt != stcb->asoc.primary_destination) { 1047 sctp_move_chunks_from_net(stcb, stcb->asoc.primary_destination); 1048 stcb->asoc.primary_destination = alt; 1049 } 1050 } 1051 /* Send out a new init */ 1052 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1053 return (0); 1054 } 1055 1056 /* 1057 * For cookie and asconf we actually need to find and mark for resend, then 1058 * increment the resend counter (after all the threshold management stuff of 1059 * course). 1060 */ 1061 int 1062 sctp_cookie_timer(struct sctp_inpcb *inp, 1063 struct sctp_tcb *stcb, 1064 struct sctp_nets *net SCTP_UNUSED) 1065 { 1066 struct sctp_nets *alt; 1067 struct sctp_tmit_chunk *cookie; 1068 1069 /* first before all else we must find the cookie */ 1070 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { 1071 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1072 break; 1073 } 1074 } 1075 if (cookie == NULL) { 1076 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) { 1077 /* FOOBAR! */ 1078 struct mbuf *op_err; 1079 1080 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1081 "Cookie timer expired, but no cookie"); 1082 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_3; 1083 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 1084 } else { 1085 #ifdef INVARIANTS 1086 panic("Cookie timer expires in wrong state?"); 1087 #else 1088 SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(stcb)); 1089 return (0); 1090 #endif 1091 } 1092 return (0); 1093 } 1094 /* Ok we found the cookie, threshold management next */ 1095 if (sctp_threshold_management(inp, stcb, cookie->whoTo, 1096 stcb->asoc.max_init_times)) { 1097 /* Assoc is over */ 1098 return (1); 1099 } 1100 /* 1101 * Cleared threshold management, now lets backoff the address 1102 * and select an alternate 1103 */ 1104 stcb->asoc.dropped_special_cnt = 0; 1105 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0); 1106 alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0); 1107 if (alt != cookie->whoTo) { 1108 sctp_free_remote_addr(cookie->whoTo); 1109 cookie->whoTo = alt; 1110 atomic_add_int(&alt->ref_count, 1); 1111 } 1112 /* Now mark the retran info */ 1113 if (cookie->sent != SCTP_DATAGRAM_RESEND) { 1114 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1115 } 1116 cookie->sent = SCTP_DATAGRAM_RESEND; 1117 cookie->flags |= CHUNK_FLAGS_FRAGMENT_OK; 1118 /* 1119 * Now call the output routine to kick out the cookie again, Note we 1120 * don't mark any chunks for retran so that FR will need to kick in 1121 * to move these (or a send timer). 1122 */ 1123 return (0); 1124 } 1125 1126 int 1127 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb) 1128 { 1129 struct sctp_nets *alt, *net; 1130 struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; 1131 1132 if (stcb->asoc.stream_reset_outstanding == 0) { 1133 return (0); 1134 } 1135 /* find the existing STRRESET, we use the seq number we sent out on */ 1136 (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst); 1137 if (strrst == NULL) { 1138 return (0); 1139 } 1140 net = strrst->whoTo; 1141 /* do threshold management */ 1142 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1143 /* Assoc is over */ 1144 return (1); 1145 } 1146 /* 1147 * Cleared threshold management, now lets backoff the address 1148 * and select an alternate 1149 */ 1150 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1151 alt = sctp_find_alternate_net(stcb, net, 0); 1152 strrst->whoTo = alt; 1153 atomic_add_int(&alt->ref_count, 1); 1154 1155 /* See if a ECN Echo is also stranded */ 1156 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1157 if ((chk->whoTo == net) && 1158 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1159 sctp_free_remote_addr(chk->whoTo); 1160 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1161 chk->sent = SCTP_DATAGRAM_RESEND; 1162 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 1163 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1164 } 1165 chk->whoTo = alt; 1166 atomic_add_int(&alt->ref_count, 1); 1167 } 1168 } 1169 if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { 1170 /* 1171 * If the address went un-reachable, we need to move to 1172 * alternates for ALL chk's in queue 1173 */ 1174 sctp_move_chunks_from_net(stcb, net); 1175 } 1176 sctp_free_remote_addr(net); 1177 1178 /* mark the retran info */ 1179 if (strrst->sent != SCTP_DATAGRAM_RESEND) 1180 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1181 strrst->sent = SCTP_DATAGRAM_RESEND; 1182 strrst->flags |= CHUNK_FLAGS_FRAGMENT_OK; 1183 1184 /* restart the timer */ 1185 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, alt); 1186 return (0); 1187 } 1188 1189 int 1190 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1191 struct sctp_nets *net) 1192 { 1193 struct sctp_nets *alt; 1194 struct sctp_tmit_chunk *asconf, *chk; 1195 1196 /* is this a first send, or a retransmission? */ 1197 if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) { 1198 /* compose a new ASCONF chunk and send it */ 1199 sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED); 1200 } else { 1201 /* 1202 * Retransmission of the existing ASCONF is needed 1203 */ 1204 1205 /* find the existing ASCONF */ 1206 asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue); 1207 if (asconf == NULL) { 1208 return (0); 1209 } 1210 net = asconf->whoTo; 1211 /* do threshold management */ 1212 if (sctp_threshold_management(inp, stcb, net, 1213 stcb->asoc.max_send_times)) { 1214 /* Assoc is over */ 1215 return (1); 1216 } 1217 if (asconf->snd_count > stcb->asoc.max_send_times) { 1218 /* 1219 * Something is rotten: our peer is not responding to 1220 * ASCONFs but apparently is to other chunks. i.e. it 1221 * is not properly handling the chunk type upper bits. 1222 * Mark this peer as ASCONF incapable and cleanup. 1223 */ 1224 SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); 1225 sctp_asconf_cleanup(stcb); 1226 return (0); 1227 } 1228 /* 1229 * cleared threshold management, so now backoff the net and 1230 * select an alternate 1231 */ 1232 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1233 alt = sctp_find_alternate_net(stcb, net, 0); 1234 if (asconf->whoTo != alt) { 1235 asconf->whoTo = alt; 1236 atomic_add_int(&alt->ref_count, 1); 1237 } 1238 1239 /* See if an ECN Echo is also stranded */ 1240 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1241 if ((chk->whoTo == net) && 1242 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1243 sctp_free_remote_addr(chk->whoTo); 1244 chk->whoTo = alt; 1245 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1246 chk->sent = SCTP_DATAGRAM_RESEND; 1247 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 1248 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1249 } 1250 atomic_add_int(&alt->ref_count, 1); 1251 } 1252 } 1253 TAILQ_FOREACH(chk, &stcb->asoc.asconf_send_queue, sctp_next) { 1254 if (chk->whoTo != alt) { 1255 sctp_free_remote_addr(chk->whoTo); 1256 chk->whoTo = alt; 1257 atomic_add_int(&alt->ref_count, 1); 1258 } 1259 if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT) 1260 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1261 chk->sent = SCTP_DATAGRAM_RESEND; 1262 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 1263 } 1264 if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { 1265 /* 1266 * If the address went un-reachable, we need to move 1267 * to the alternate for ALL chunks in queue 1268 */ 1269 sctp_move_chunks_from_net(stcb, net); 1270 } 1271 sctp_free_remote_addr(net); 1272 1273 /* mark the retran info */ 1274 if (asconf->sent != SCTP_DATAGRAM_RESEND) 1275 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1276 asconf->sent = SCTP_DATAGRAM_RESEND; 1277 asconf->flags |= CHUNK_FLAGS_FRAGMENT_OK; 1278 1279 /* send another ASCONF if any and we can do */ 1280 sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED); 1281 } 1282 return (0); 1283 } 1284 1285 /* Mobility adaptation */ 1286 void 1287 sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb) 1288 { 1289 if (stcb->asoc.deleted_primary == NULL) { 1290 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n"); 1291 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1292 return; 1293 } 1294 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary "); 1295 SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa); 1296 sctp_free_remote_addr(stcb->asoc.deleted_primary); 1297 stcb->asoc.deleted_primary = NULL; 1298 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1299 return; 1300 } 1301 1302 /* 1303 * For the shutdown and shutdown-ack, we do not keep one around on the 1304 * control queue. This means we must generate a new one and call the general 1305 * chunk output routine, AFTER having done threshold management. 1306 * It is assumed that net is non-NULL. 1307 */ 1308 int 1309 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1310 struct sctp_nets *net) 1311 { 1312 struct sctp_nets *alt; 1313 1314 /* first threshold management */ 1315 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1316 /* Assoc is over */ 1317 return (1); 1318 } 1319 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1320 /* second select an alternative */ 1321 alt = sctp_find_alternate_net(stcb, net, 0); 1322 1323 /* third generate a shutdown into the queue for out net */ 1324 sctp_send_shutdown(stcb, alt); 1325 1326 /* fourth restart timer */ 1327 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); 1328 return (0); 1329 } 1330 1331 int 1332 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1333 struct sctp_nets *net) 1334 { 1335 struct sctp_nets *alt; 1336 1337 /* first threshold management */ 1338 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1339 /* Assoc is over */ 1340 return (1); 1341 } 1342 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1343 /* second select an alternative */ 1344 alt = sctp_find_alternate_net(stcb, net, 0); 1345 1346 /* third generate a shutdown into the queue for out net */ 1347 sctp_send_shutdown_ack(stcb, alt); 1348 1349 /* fourth restart timer */ 1350 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); 1351 return (0); 1352 } 1353 1354 static void 1355 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, struct sctp_tcb *stcb) 1356 { 1357 struct sctp_stream_queue_pending *sp; 1358 unsigned int i, chks_in_queue = 0; 1359 int being_filled = 0; 1360 1361 KASSERT(inp != NULL, ("inp is NULL")); 1362 KASSERT(stcb != NULL, ("stcb is NULL")); 1363 SCTP_TCB_LOCK_ASSERT(stcb); 1364 KASSERT(TAILQ_EMPTY(&stcb->asoc.send_queue), ("send_queue not empty")); 1365 KASSERT(TAILQ_EMPTY(&stcb->asoc.sent_queue), ("sent_queue not empty")); 1366 1367 if (stcb->asoc.sent_queue_retran_cnt) { 1368 SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n", 1369 stcb->asoc.sent_queue_retran_cnt); 1370 stcb->asoc.sent_queue_retran_cnt = 0; 1371 } 1372 if (stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) { 1373 /* No stream scheduler information, initialize scheduler */ 1374 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc); 1375 if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) { 1376 /* yep, we lost a stream or two */ 1377 SCTP_PRINTF("Found additional streams NOT managed by scheduler, corrected\n"); 1378 } else { 1379 /* no streams lost */ 1380 stcb->asoc.total_output_queue_size = 0; 1381 } 1382 } 1383 /* Check to see if some data queued, if so report it */ 1384 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1385 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 1386 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) { 1387 if (sp->msg_is_complete) 1388 being_filled++; 1389 chks_in_queue++; 1390 } 1391 } 1392 } 1393 if (chks_in_queue != stcb->asoc.stream_queue_cnt) { 1394 SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", 1395 stcb->asoc.stream_queue_cnt, chks_in_queue); 1396 } 1397 if (chks_in_queue) { 1398 /* call the output queue function */ 1399 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1400 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1401 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1402 /* 1403 * Probably should go in and make it go back through 1404 * and add fragments allowed 1405 */ 1406 if (being_filled == 0) { 1407 SCTP_PRINTF("Still nothing moved %d chunks are stuck\n", 1408 chks_in_queue); 1409 } 1410 } 1411 } else { 1412 SCTP_PRINTF("Found no chunks on any queue tot:%lu\n", 1413 (u_long)stcb->asoc.total_output_queue_size); 1414 stcb->asoc.total_output_queue_size = 0; 1415 } 1416 } 1417 1418 int 1419 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1420 struct sctp_nets *net) 1421 { 1422 bool net_was_pf; 1423 1424 net_was_pf = (net->dest_state & SCTP_ADDR_PF) != 0; 1425 if (net->hb_responded == 0) { 1426 if (net->ro._s_addr != NULL) { 1427 /* Invalidate the src address if we did not get 1428 * a response last time. 1429 */ 1430 sctp_free_ifa(net->ro._s_addr); 1431 net->ro._s_addr = NULL; 1432 net->src_addr_selected = 0; 1433 } 1434 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1435 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1436 /* Assoc is over */ 1437 return (1); 1438 } 1439 } 1440 /* Zero PBA, if it needs it */ 1441 if (net->partial_bytes_acked > 0) { 1442 net->partial_bytes_acked = 0; 1443 } 1444 if ((stcb->asoc.total_output_queue_size > 0) && 1445 (TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1446 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1447 sctp_audit_stream_queues_for_size(inp, stcb); 1448 } 1449 if ((((net->dest_state & SCTP_ADDR_NOHB) == 0) || 1450 (net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 1451 (net_was_pf || ((net->dest_state & SCTP_ADDR_PF) == 0))) { 1452 /* When moving to PF during threshold management, a HB has been 1453 queued in that routine. */ 1454 uint32_t ms_gone_by; 1455 1456 if ((net->last_sent_time.tv_sec > 0) || 1457 (net->last_sent_time.tv_usec > 0)) { 1458 #if defined(__FreeBSD__) && !defined(__Userspace__) 1459 struct timeval diff; 1460 1461 SCTP_GETTIME_TIMEVAL(&diff); 1462 timevalsub(&diff, &net->last_sent_time); 1463 #else 1464 struct timeval diff, now; 1465 1466 SCTP_GETTIME_TIMEVAL(&now); 1467 timersub(&now, &net->last_sent_time, &diff); 1468 #endif 1469 ms_gone_by = (uint32_t)(diff.tv_sec * 1000) + 1470 (uint32_t)(diff.tv_usec / 1000); 1471 } else { 1472 ms_gone_by = 0xffffffff; 1473 } 1474 if ((ms_gone_by >= net->heart_beat_delay) || 1475 (net->dest_state & SCTP_ADDR_UNCONFIRMED) || 1476 (net->dest_state & SCTP_ADDR_PF)) { 1477 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 1478 } 1479 } 1480 return (0); 1481 } 1482 1483 void 1484 sctp_pathmtu_timer(struct sctp_inpcb *inp, 1485 struct sctp_tcb *stcb, 1486 struct sctp_nets *net) 1487 { 1488 uint32_t next_mtu, mtu; 1489 1490 next_mtu = sctp_get_next_mtu(net->mtu); 1491 1492 if ((next_mtu > net->mtu) && (net->port == 0)) { 1493 if ((net->src_addr_selected == 0) || 1494 (net->ro._s_addr == NULL) || 1495 (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1496 if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1497 sctp_free_ifa(net->ro._s_addr); 1498 net->ro._s_addr = NULL; 1499 net->src_addr_selected = 0; 1500 } else if (net->ro._s_addr == NULL) { 1501 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1502 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1503 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1504 /* KAME hack: embed scopeid */ 1505 #if defined(__APPLE__) && !defined(__Userspace__) 1506 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 1507 (void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL); 1508 #else 1509 (void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL); 1510 #endif 1511 #elif defined(SCTP_KAME) 1512 (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 1513 #else 1514 (void)in6_embedscope(&sin6->sin6_addr, sin6); 1515 #endif 1516 } 1517 #endif 1518 1519 net->ro._s_addr = sctp_source_address_selection(inp, 1520 stcb, 1521 (sctp_route_t *)&net->ro, 1522 net, 0, stcb->asoc.vrf_id); 1523 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1524 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1525 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1526 #ifdef SCTP_KAME 1527 (void)sa6_recoverscope(sin6); 1528 #else 1529 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL); 1530 #endif /* SCTP_KAME */ 1531 } 1532 #endif /* INET6 */ 1533 } 1534 if (net->ro._s_addr) 1535 net->src_addr_selected = 1; 1536 } 1537 if (net->ro._s_addr) { 1538 #if defined(__FreeBSD__) && !defined(__Userspace__) 1539 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_nh); 1540 #else 1541 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); 1542 #endif 1543 #if defined(INET) || defined(INET6) 1544 if (net->port) { 1545 mtu -= sizeof(struct udphdr); 1546 } 1547 #endif 1548 if (mtu > next_mtu) { 1549 net->mtu = next_mtu; 1550 } else { 1551 net->mtu = mtu; 1552 } 1553 } 1554 } 1555 /* restart the timer */ 1556 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 1557 } 1558 1559 void 1560 sctp_autoclose_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb) 1561 { 1562 struct timeval tn, *tim_touse; 1563 struct sctp_association *asoc; 1564 uint32_t ticks_gone_by; 1565 1566 (void)SCTP_GETTIME_TIMEVAL(&tn); 1567 if (stcb->asoc.sctp_autoclose_ticks > 0 && 1568 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1569 /* Auto close is on */ 1570 asoc = &stcb->asoc; 1571 /* pick the time to use */ 1572 if (asoc->time_last_rcvd.tv_sec > 1573 asoc->time_last_sent.tv_sec) { 1574 tim_touse = &asoc->time_last_rcvd; 1575 } else { 1576 tim_touse = &asoc->time_last_sent; 1577 } 1578 /* Now has long enough transpired to autoclose? */ 1579 ticks_gone_by = sctp_secs_to_ticks((uint32_t)(tn.tv_sec - tim_touse->tv_sec)); 1580 if (ticks_gone_by >= asoc->sctp_autoclose_ticks) { 1581 /* 1582 * autoclose time has hit, call the output routine, 1583 * which should do nothing just to be SURE we don't 1584 * have hanging data. We can then safely check the 1585 * queues and know that we are clear to send 1586 * shutdown 1587 */ 1588 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1589 /* Are we clean? */ 1590 if (TAILQ_EMPTY(&asoc->send_queue) && 1591 TAILQ_EMPTY(&asoc->sent_queue)) { 1592 /* 1593 * there is nothing queued to send, so I'm 1594 * done... 1595 */ 1596 if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) { 1597 /* only send SHUTDOWN 1st time thru */ 1598 struct sctp_nets *net; 1599 1600 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 1601 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1602 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1603 } 1604 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 1605 sctp_stop_timers_for_shutdown(stcb); 1606 if (stcb->asoc.alternate) { 1607 net = stcb->asoc.alternate; 1608 } else { 1609 net = stcb->asoc.primary_destination; 1610 } 1611 sctp_send_shutdown(stcb, net); 1612 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1613 stcb->sctp_ep, stcb, net); 1614 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1615 stcb->sctp_ep, stcb, NULL); 1616 } 1617 } 1618 } else { 1619 /* 1620 * No auto close at this time, reset t-o to check 1621 * later 1622 */ 1623 uint32_t tmp; 1624 1625 /* fool the timer startup to use the time left */ 1626 tmp = asoc->sctp_autoclose_ticks; 1627 asoc->sctp_autoclose_ticks -= ticks_gone_by; 1628 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 1629 /* restore the real tick value */ 1630 asoc->sctp_autoclose_ticks = tmp; 1631 } 1632 } 1633 }