tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

sctp_cc_functions.c (77563B)


      1 /*-
      2 * SPDX-License-Identifier: BSD-3-Clause
      3 *
      4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
      5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
      6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
      7 *
      8 * Redistribution and use in source and binary forms, with or without
      9 * modification, are permitted provided that the following conditions are met:
     10 *
     11 * a) Redistributions of source code must retain the above copyright notice,
     12 *    this list of conditions and the following disclaimer.
     13 *
     14 * b) Redistributions in binary form must reproduce the above copyright
     15 *    notice, this list of conditions and the following disclaimer in
     16 *    the documentation and/or other materials provided with the distribution.
     17 *
     18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
     19 *    contributors may be used to endorse or promote products derived
     20 *    from this software without specific prior written permission.
     21 *
     22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     32 * THE POSSIBILITY OF SUCH DAMAGE.
     33 */
     34 
     35 #include <netinet/sctp_os.h>
     36 #include <netinet/sctp_var.h>
     37 #include <netinet/sctp_sysctl.h>
     38 #include <netinet/sctp_pcb.h>
     39 #include <netinet/sctp_header.h>
     40 #include <netinet/sctputil.h>
     41 #include <netinet/sctp_output.h>
     42 #include <netinet/sctp_input.h>
     43 #include <netinet/sctp_indata.h>
     44 #include <netinet/sctp_uio.h>
     45 #include <netinet/sctp_timer.h>
     46 #include <netinet/sctp_auth.h>
     47 #include <netinet/sctp_asconf.h>
     48 #if defined(__FreeBSD__) && !defined(__Userspace__)
     49 #include <netinet/sctp_kdtrace.h>
     50 #endif
     51 
     52 #if defined(_WIN32) && defined(__MINGW32__)
     53 #include <minmax.h>
     54 #endif
     55 
     56 #define SHIFT_MPTCP_MULTI_N 40
     57 #define SHIFT_MPTCP_MULTI_Z 16
     58 #define SHIFT_MPTCP_MULTI 8
     59 
     60 #ifdef KDTRACE_HOOKS
     61 #define __dtrace
     62 #else
     63 #define __dtrace __unused
     64 #endif
     65 
     66 static void
     67 sctp_enforce_cwnd_limit(struct sctp_association *assoc, struct sctp_nets *net)
     68 {
     69 if ((assoc->max_cwnd > 0) &&
     70     (net->cwnd > assoc->max_cwnd) &&
     71     (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) {
     72 	net->cwnd = assoc->max_cwnd;
     73 	if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
     74 		net->cwnd = net->mtu - sizeof(struct sctphdr);
     75 	}
     76 }
     77 }
     78 
     79 static void
     80 sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
     81 {
     82 struct sctp_association *assoc;
     83 uint32_t cwnd_in_mtu;
     84 
     85 assoc = &stcb->asoc;
     86 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
     87 if (cwnd_in_mtu == 0) {
     88 	/* Using 0 means that the value of RFC 4960 is used. */
     89 	net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
     90 } else {
     91 	/*
     92 	 * We take the minimum of the burst limit and the
     93 	 * initial congestion window.
     94 	 */
     95 	if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst))
     96 		cwnd_in_mtu = assoc->max_burst;
     97 	net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
     98 }
     99 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
    100     (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
    101 	/* In case of resource pooling initialize appropriately */
    102 	net->cwnd /= assoc->numnets;
    103 	if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
    104 		net->cwnd = net->mtu - sizeof(struct sctphdr);
    105 	}
    106 }
    107 sctp_enforce_cwnd_limit(assoc, net);
    108 net->ssthresh = assoc->peers_rwnd;
    109 #if defined(__FreeBSD__) && !defined(__Userspace__)
    110 SDT_PROBE5(sctp, cwnd, net, init,
    111            stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
    112            0, net->cwnd);
    113 #endif
    114 if (SCTP_BASE_SYSCTL(sctp_logging_level) &
    115     (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
    116 	sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
    117 }
    118 }
    119 
    120 static void
    121 sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
    122                          struct sctp_association *asoc)
    123 {
    124 struct sctp_nets *net;
    125 uint32_t t_ssthresh, t_cwnd;
    126 uint64_t t_ucwnd_sbw;
    127 
    128 /* MT FIXME: Don't compute this over and over again */
    129 t_ssthresh = 0;
    130 t_cwnd = 0;
    131 t_ucwnd_sbw = 0;
    132 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
    133     (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
    134 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
    135 		t_ssthresh += net->ssthresh;
    136 		t_cwnd += net->cwnd;
    137 		if (net->lastsa > 0) {
    138 			t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)net->lastsa;
    139 		}
    140 	}
    141 	if (t_ucwnd_sbw == 0) {
    142 		t_ucwnd_sbw = 1;
    143 	}
    144 }
    145 
    146 /*-
    147  * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
    148  * (net->fast_retran_loss_recovery == 0)))
    149  */
    150 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
    151 	if ((asoc->fast_retran_loss_recovery == 0) ||
    152 	    (asoc->sctp_cmt_on_off > 0)) {
    153 		/* out of a RFC2582 Fast recovery window? */
    154 		if (net->net_ack > 0) {
    155 			/*
    156 			 * per section 7.2.3, are there any
    157 			 * destinations that had a fast retransmit
    158 			 * to them. If so what we need to do is
    159 			 * adjust ssthresh and cwnd.
    160 			 */
    161 			struct sctp_tmit_chunk *lchk;
    162 			int old_cwnd = net->cwnd;
    163 
    164 			if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
    165 			    (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
    166 				if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) {
    167 					net->ssthresh = (uint32_t)(((uint64_t)4 *
    168 				                                    (uint64_t)net->mtu *
    169 				                                    (uint64_t)net->ssthresh) /
    170 					                           (uint64_t)t_ssthresh);
    171 				}
    172 				if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2) {
    173 					uint32_t srtt;
    174 
    175 					srtt = net->lastsa;
    176 					/* lastsa>>3;  we don't need to devide ...*/
    177 					if (srtt == 0) {
    178 						srtt = 1;
    179 					}
    180 					/* Short Version => Equal to Contel Version MBe */
    181 					net->ssthresh = (uint32_t) (((uint64_t)4 *
    182 					                             (uint64_t)net->mtu *
    183 					                             (uint64_t)net->cwnd) /
    184 					                            ((uint64_t)srtt *
    185 					                             t_ucwnd_sbw));
    186 								     /* INCREASE FACTOR */;
    187 				}
    188 				if ((net->cwnd > t_cwnd / 2) &&
    189 				    (net->ssthresh < net->cwnd - t_cwnd / 2)) {
    190 					net->ssthresh = net->cwnd - t_cwnd / 2;
    191 				}
    192 				if (net->ssthresh < net->mtu) {
    193 					net->ssthresh = net->mtu;
    194 				}
    195 			} else {
    196 				net->ssthresh = net->cwnd / 2;
    197 				if (net->ssthresh < (net->mtu * 2)) {
    198 					net->ssthresh = 2 * net->mtu;
    199 				}
    200 			}
    201 			net->cwnd = net->ssthresh;
    202 			sctp_enforce_cwnd_limit(asoc, net);
    203 #if defined(__FreeBSD__) && !defined(__Userspace__)
    204 			SDT_PROBE5(sctp, cwnd, net, fr,
    205 			           stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
    206 			           old_cwnd, net->cwnd);
    207 #endif
    208 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
    209 				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
    210 					SCTP_CWND_LOG_FROM_FR);
    211 			}
    212 			lchk = TAILQ_FIRST(&asoc->send_queue);
    213 
    214 			net->partial_bytes_acked = 0;
    215 			/* Turn on fast recovery window */
    216 			asoc->fast_retran_loss_recovery = 1;
    217 			if (lchk == NULL) {
    218 				/* Mark end of the window */
    219 				asoc->fast_recovery_tsn = asoc->sending_seq - 1;
    220 			} else {
    221 				asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
    222 			}
    223 
    224 			/*
    225 			 * CMT fast recovery -- per destination
    226 			 * recovery variable.
    227 			 */
    228 			net->fast_retran_loss_recovery = 1;
    229 
    230 			if (lchk == NULL) {
    231 				/* Mark end of the window */
    232 				net->fast_recovery_tsn = asoc->sending_seq - 1;
    233 			} else {
    234 				net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
    235 			}
    236 
    237 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
    238 					stcb->sctp_ep, stcb, net,
    239 			                SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_1);
    240 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
    241 					 stcb->sctp_ep, stcb, net);
    242 		}
    243 	} else if (net->net_ack > 0) {
    244 		/*
    245 		 * Mark a peg that we WOULD have done a cwnd
    246 		 * reduction but RFC2582 prevented this action.
    247 		 */
    248 		SCTP_STAT_INCR(sctps_fastretransinrtt);
    249 	}
    250 }
    251 }
    252 
    253 /* Defines for instantaneous bw decisions */
    254 #define SCTP_INST_LOOSING 1 /* Losing to other flows */
    255 #define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */
    256 #define SCTP_INST_GAINING 3 /* Gaining, step down possible */
    257 
    258 #if defined(__FreeBSD__) && !defined(__Userspace__)
    259 static int
    260 cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw,
    261           uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind)
    262 #else
    263 static int
    264 cc_bw_same(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw,
    265           uint64_t rtt_offset, uint8_t inst_ind)
    266 #endif
    267 {
    268 #if defined(__FreeBSD__) && !defined(__Userspace__)
    269 uint64_t oth __dtrace, probepoint __dtrace;
    270 #endif
    271 
    272 #if defined(__FreeBSD__) && !defined(__Userspace__)
    273 probepoint = (((uint64_t)net->cwnd) << 32);
    274 #endif
    275 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) {
    276 	/*
    277 	 * rtt increased
    278 	 * we don't update bw.. so we don't
    279 	 * update the rtt either.
    280 	 */
    281 #if defined(__FreeBSD__) && !defined(__Userspace__)
    282 	/* Probe point 5 */
    283 	probepoint |=  ((5 << 16) | 1);
    284 	SDT_PROBE5(sctp, cwnd, net, rttvar,
    285 	           vtag,
    286 	           ((net->cc_mod.rtcc.lbw << 32) | nbw),
    287 	           ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    288 	           net->flight_size,
    289 	           probepoint);
    290 #endif
    291 	if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
    292 		if (net->cc_mod.rtcc.last_step_state == 5)
    293 			net->cc_mod.rtcc.step_cnt++;
    294 		else
    295 			net->cc_mod.rtcc.step_cnt = 1;
    296 		net->cc_mod.rtcc.last_step_state = 5;
    297 		if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
    298 		    ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
    299 		     ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
    300 			/* Try a step down */
    301 #if defined(__FreeBSD__) && !defined(__Userspace__)
    302 			oth = net->cc_mod.rtcc.vol_reduce;
    303 			oth <<= 16;
    304 			oth |= net->cc_mod.rtcc.step_cnt;
    305 			oth <<= 16;
    306 			oth |= net->cc_mod.rtcc.last_step_state;
    307 			SDT_PROBE5(sctp, cwnd, net, rttstep,
    308 			           vtag,
    309 			           ((net->cc_mod.rtcc.lbw << 32) | nbw),
    310 			           ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    311 			           oth,
    312 			           probepoint);
    313 #endif
    314 			if (net->cwnd > (4 * net->mtu)) {
    315 				net->cwnd -= net->mtu;
    316 				net->cc_mod.rtcc.vol_reduce++;
    317 			} else {
    318 				net->cc_mod.rtcc.step_cnt = 0;
    319 			}
    320 		}
    321 	}
    322 	return (1);
    323 }
    324 if (net->rtt < net->cc_mod.rtcc.lbw_rtt-rtt_offset) {
    325 	/*
    326 	 * rtt decreased, there could be more room.
    327 	 * we update both the bw and the rtt here to
    328 	 * lock this in as a good step down.
    329 	 */
    330 #if defined(__FreeBSD__) && !defined(__Userspace__)
    331 	/* Probe point 6 */
    332 	probepoint |=  ((6 << 16) | 0);
    333 	SDT_PROBE5(sctp, cwnd, net, rttvar,
    334 	           vtag,
    335 	           ((net->cc_mod.rtcc.lbw << 32) | nbw),
    336 	           ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    337 	           net->flight_size,
    338 	           probepoint);
    339 #endif
    340 	if (net->cc_mod.rtcc.steady_step) {
    341 #if defined(__FreeBSD__) && !defined(__Userspace__)
    342 		oth = net->cc_mod.rtcc.vol_reduce;
    343 		oth <<= 16;
    344 		oth |= net->cc_mod.rtcc.step_cnt;
    345 		oth <<= 16;
    346 		oth |= net->cc_mod.rtcc.last_step_state;
    347 		SDT_PROBE5(sctp, cwnd, net, rttstep,
    348 		           vtag,
    349 		           ((net->cc_mod.rtcc.lbw << 32) | nbw),
    350 		           ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    351 		           oth,
    352 		           probepoint);
    353 #endif
    354 		if ((net->cc_mod.rtcc.last_step_state == 5) &&
    355 		    (net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step)) {
    356 			/* Step down worked */
    357 			net->cc_mod.rtcc.step_cnt = 0;
    358 			return (1);
    359 		} else {
    360 			net->cc_mod.rtcc.last_step_state = 6;
    361 			net->cc_mod.rtcc.step_cnt = 0;
    362 		}
    363 	}
    364 	net->cc_mod.rtcc.lbw = nbw;
    365 	net->cc_mod.rtcc.lbw_rtt = net->rtt;
    366 	net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
    367 	if (inst_ind == SCTP_INST_GAINING)
    368 		return (1);
    369 	else if (inst_ind == SCTP_INST_NEUTRAL)
    370 		return (1);
    371 	else
    372 		return (0);
    373 }
    374 /* Ok bw and rtt remained the same .. no update to any
    375  */
    376 #if defined(__FreeBSD__) && !defined(__Userspace__)
    377 /* Probe point 7 */
    378 probepoint |=  ((7 << 16) | net->cc_mod.rtcc.ret_from_eq);
    379 SDT_PROBE5(sctp, cwnd, net, rttvar,
    380            vtag,
    381            ((net->cc_mod.rtcc.lbw << 32) | nbw),
    382            ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    383            net->flight_size,
    384            probepoint);
    385 #endif
    386 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
    387 	if (net->cc_mod.rtcc.last_step_state == 5)
    388 		net->cc_mod.rtcc.step_cnt++;
    389 	else
    390 		net->cc_mod.rtcc.step_cnt = 1;
    391 	net->cc_mod.rtcc.last_step_state = 5;
    392 	if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
    393 	    ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
    394 	     ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
    395 		/* Try a step down */
    396 		if (net->cwnd > (4 * net->mtu)) {
    397 			net->cwnd -= net->mtu;
    398 			net->cc_mod.rtcc.vol_reduce++;
    399 			return (1);
    400 		} else {
    401 			net->cc_mod.rtcc.step_cnt = 0;
    402 		}
    403 	}
    404 }
    405 if (inst_ind == SCTP_INST_GAINING)
    406 	return (1);
    407 else if (inst_ind == SCTP_INST_NEUTRAL)
    408 	return (1);
    409 else
    410 	return ((int)net->cc_mod.rtcc.ret_from_eq);
    411 }
    412 
    413 #if defined(__FreeBSD__) && !defined(__Userspace__)
    414 static int
    415 cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset,
    416               uint64_t vtag, uint8_t inst_ind)
    417 #else
    418 static int
    419 cc_bw_decrease(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset,
    420               uint8_t inst_ind)
    421 #endif
    422 {
    423 #if defined(__FreeBSD__) && !defined(__Userspace__)
    424 uint64_t oth __dtrace, probepoint __dtrace;
    425 #endif
    426 
    427 /* Bandwidth decreased.*/
    428 #if defined(__FreeBSD__) && !defined(__Userspace__)
    429 probepoint = (((uint64_t)net->cwnd) << 32);
    430 #endif
    431 if (net->rtt  > net->cc_mod.rtcc.lbw_rtt+rtt_offset) {
    432 	/* rtt increased */
    433 	/* Did we add more */
    434 	if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) &&
    435 	    (inst_ind != SCTP_INST_LOOSING)) {
    436 		/* We caused it maybe.. back off? */
    437 #if defined(__FreeBSD__) && !defined(__Userspace__)
    438 		/* PROBE POINT 1 */
    439 		probepoint |=  ((1 << 16) | 1);
    440 		SDT_PROBE5(sctp, cwnd, net, rttvar,
    441 		           vtag,
    442 		           ((net->cc_mod.rtcc.lbw << 32) | nbw),
    443 		           ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    444 		           net->flight_size,
    445 		           probepoint);
    446 #endif
    447 		if (net->cc_mod.rtcc.ret_from_eq) {
    448 			/* Switch over to CA if we are less aggressive */
    449 			net->ssthresh = net->cwnd-1;
    450 			net->partial_bytes_acked = 0;
    451 		}
    452 		return (1);
    453 	}
    454 #if defined(__FreeBSD__) && !defined(__Userspace__)
    455 	/* Probe point 2 */
    456 	probepoint |=  ((2 << 16) | 0);
    457 	SDT_PROBE5(sctp, cwnd, net, rttvar,
    458 	           vtag,
    459 	           ((net->cc_mod.rtcc.lbw << 32) | nbw),
    460 	           ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    461 	           net->flight_size,
    462 	           probepoint);
    463 #endif
    464 	/* Someone else - fight for more? */
    465 	if (net->cc_mod.rtcc.steady_step) {
    466 #if defined(__FreeBSD__) && !defined(__Userspace__)
    467 		oth = net->cc_mod.rtcc.vol_reduce;
    468 		oth <<= 16;
    469 		oth |= net->cc_mod.rtcc.step_cnt;
    470 		oth <<= 16;
    471 		oth |= net->cc_mod.rtcc.last_step_state;
    472 		SDT_PROBE5(sctp, cwnd, net, rttstep,
    473 		           vtag,
    474 		           ((net->cc_mod.rtcc.lbw << 32) | nbw),
    475 		           ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    476 		           oth,
    477 			  probepoint);
    478 #endif
    479 		/* Did we voluntarily give up some? if so take
    480 		 * one back please
    481 		 */
    482 		if ((net->cc_mod.rtcc.vol_reduce) &&
    483 		    (inst_ind != SCTP_INST_GAINING)) {
    484 			net->cwnd += net->mtu;
    485 			sctp_enforce_cwnd_limit(&stcb->asoc, net);
    486 			net->cc_mod.rtcc.vol_reduce--;
    487 		}
    488 		net->cc_mod.rtcc.last_step_state = 2;
    489 		net->cc_mod.rtcc.step_cnt = 0;
    490 	}
    491 	goto out_decision;
    492 } else  if (net->rtt  < net->cc_mod.rtcc.lbw_rtt-rtt_offset) {
    493 	/* bw & rtt decreased */
    494 #if defined(__FreeBSD__) && !defined(__Userspace__)
    495 	/* Probe point 3 */
    496 	probepoint |=  ((3 << 16) | 0);
    497 	SDT_PROBE5(sctp, cwnd, net, rttvar,
    498 	           vtag,
    499 	           ((net->cc_mod.rtcc.lbw << 32) | nbw),
    500 	           ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    501 	           net->flight_size,
    502 	           probepoint);
    503 #endif
    504 	if (net->cc_mod.rtcc.steady_step) {
    505 #if defined(__FreeBSD__) && !defined(__Userspace__)
    506 		oth = net->cc_mod.rtcc.vol_reduce;
    507 		oth <<= 16;
    508 		oth |= net->cc_mod.rtcc.step_cnt;
    509 		oth <<= 16;
    510 		oth |= net->cc_mod.rtcc.last_step_state;
    511 		SDT_PROBE5(sctp, cwnd, net, rttstep,
    512 		           vtag,
    513 		           ((net->cc_mod.rtcc.lbw << 32) | nbw),
    514 		           ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    515 		           oth,
    516 		           probepoint);
    517 #endif
    518 		if ((net->cc_mod.rtcc.vol_reduce) &&
    519 		    (inst_ind != SCTP_INST_GAINING)) {
    520 			net->cwnd += net->mtu;
    521 			sctp_enforce_cwnd_limit(&stcb->asoc, net);
    522 			net->cc_mod.rtcc.vol_reduce--;
    523 		}
    524 		net->cc_mod.rtcc.last_step_state = 3;
    525 		net->cc_mod.rtcc.step_cnt = 0;
    526 	}
    527 	goto out_decision;
    528 }
    529 /* The bw decreased but rtt stayed the same */
    530 #if defined(__FreeBSD__) && !defined(__Userspace__)
    531 /* Probe point 4 */
    532 probepoint |=  ((4 << 16) | 0);
    533 SDT_PROBE5(sctp, cwnd, net, rttvar,
    534            vtag,
    535            ((net->cc_mod.rtcc.lbw << 32) | nbw),
    536            ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    537            net->flight_size,
    538            probepoint);
    539 #endif
    540 if (net->cc_mod.rtcc.steady_step) {
    541 #if defined(__FreeBSD__) && !defined(__Userspace__)
    542 	oth = net->cc_mod.rtcc.vol_reduce;
    543 	oth <<= 16;
    544 	oth |= net->cc_mod.rtcc.step_cnt;
    545 	oth <<= 16;
    546 	oth |= net->cc_mod.rtcc.last_step_state;
    547 	SDT_PROBE5(sctp, cwnd, net, rttstep,
    548 	           vtag,
    549 	           ((net->cc_mod.rtcc.lbw << 32) | nbw),
    550 	           ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    551 	           oth,
    552 	           probepoint);
    553 #endif
    554 	if ((net->cc_mod.rtcc.vol_reduce) &&
    555 	    (inst_ind != SCTP_INST_GAINING)) {
    556 		net->cwnd += net->mtu;
    557 		sctp_enforce_cwnd_limit(&stcb->asoc, net);
    558 		net->cc_mod.rtcc.vol_reduce--;
    559 	}
    560 	net->cc_mod.rtcc.last_step_state = 4;
    561 	net->cc_mod.rtcc.step_cnt = 0;
    562 }
    563 out_decision:
    564 net->cc_mod.rtcc.lbw = nbw;
    565 net->cc_mod.rtcc.lbw_rtt = net->rtt;
    566 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
    567 if (inst_ind == SCTP_INST_GAINING) {
    568 	return (1);
    569 } else {
    570 	return (0);
    571 }
    572 }
    573 
    574 #if defined(__FreeBSD__) && !defined(__Userspace__)
    575 static int
    576 cc_bw_increase(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t vtag)
    577 #else
    578 static int
    579 cc_bw_increase(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw)
    580 #endif
    581 {
    582 #if defined(__FreeBSD__) && !defined(__Userspace__)
    583 uint64_t oth __dtrace, probepoint __dtrace;
    584 
    585 #endif
    586 /* BW increased, so update and
    587  * return 0, since all actions in
    588  * our table say to do the normal CC
    589  * update. Note that we pay no attention to
    590  * the inst_ind since our overall sum is increasing.
    591  */
    592 #if defined(__FreeBSD__) && !defined(__Userspace__)
    593 /* PROBE POINT 0 */
    594 probepoint = (((uint64_t)net->cwnd) << 32);
    595 SDT_PROBE5(sctp, cwnd, net, rttvar,
    596            vtag,
    597            ((net->cc_mod.rtcc.lbw << 32) | nbw),
    598            ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    599            net->flight_size,
    600            probepoint);
    601 #endif
    602 if (net->cc_mod.rtcc.steady_step) {
    603 #if defined(__FreeBSD__) && !defined(__Userspace__)
    604 	oth = net->cc_mod.rtcc.vol_reduce;
    605 	oth <<= 16;
    606 	oth |= net->cc_mod.rtcc.step_cnt;
    607 	oth <<= 16;
    608 	oth |= net->cc_mod.rtcc.last_step_state;
    609 	SDT_PROBE5(sctp, cwnd, net, rttstep,
    610 	           vtag,
    611 	           ((net->cc_mod.rtcc.lbw << 32) | nbw),
    612 	           ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    613 	           oth,
    614 	           probepoint);
    615 #endif
    616 	net->cc_mod.rtcc.last_step_state = 0;
    617 	net->cc_mod.rtcc.step_cnt = 0;
    618 	net->cc_mod.rtcc.vol_reduce = 0;
    619 }
    620 net->cc_mod.rtcc.lbw = nbw;
    621 net->cc_mod.rtcc.lbw_rtt = net->rtt;
    622 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
    623 return (0);
    624 }
    625 
    626 /* RTCC Algorithm to limit growth of cwnd, return
    627 * true if you want to NOT allow cwnd growth
    628 */
    629 static int
    630 cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw)
    631 {
    632 uint64_t bw_offset, rtt_offset;
    633 #if defined(__FreeBSD__) && !defined(__Userspace__)
    634 uint64_t probepoint __dtrace, rtt, vtag;
    635 #endif
    636 uint64_t bytes_for_this_rtt, inst_bw;
    637 uint64_t div, inst_off;
    638 int bw_shift;
    639 uint8_t inst_ind;
    640 int ret;
    641 /*-
    642  * Here we need to see if we want
    643  * to limit cwnd growth due to increase
    644  * in overall rtt but no increase in bw.
    645  * We use the following table to figure
    646  * out what we should do. When we return
    647  * 0, cc update goes on as planned. If we
    648  * return 1, then no cc update happens and cwnd
    649  * stays where it is at.
    650  * ----------------------------------
    651  *   BW    |    RTT   | Action
    652  * *********************************
    653  *   INC   |    INC   | return 0
    654  * ----------------------------------
    655  *   INC   |    SAME  | return 0
    656  * ----------------------------------
    657  *   INC   |    DECR  | return 0
    658  * ----------------------------------
    659  *   SAME  |    INC   | return 1
    660  * ----------------------------------
    661  *   SAME  |    SAME  | return 1
    662  * ----------------------------------
    663  *   SAME  |    DECR  | return 0
    664  * ----------------------------------
    665  *   DECR  |    INC   | return 0 or 1 based on if we caused.
    666  * ----------------------------------
    667  *   DECR  |    SAME  | return 0
    668  * ----------------------------------
    669  *   DECR  |    DECR  | return 0
    670  * ----------------------------------
    671  *
    672  * We are a bit fuzz on what an increase or
    673  * decrease is. For BW it is the same if
    674  * it did not change within 1/64th. For
    675  * RTT it stayed the same if it did not
    676  * change within 1/32nd
    677  */
    678 bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw);
    679 #if defined(__FreeBSD__) && !defined(__Userspace__)
    680 rtt = stcb->asoc.my_vtag;
    681 vtag = (rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport);
    682 probepoint = (((uint64_t)net->cwnd) << 32);
    683 rtt = net->rtt;
    684 #endif
    685 if (net->cc_mod.rtcc.rtt_set_this_sack) {
    686 	net->cc_mod.rtcc.rtt_set_this_sack = 0;
    687 	bytes_for_this_rtt = net->cc_mod.rtcc.bw_bytes - net->cc_mod.rtcc.bw_bytes_at_last_rttc;
    688 	net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
    689 	if (net->rtt) {
    690 		div = net->rtt / 1000;
    691 		if (div) {
    692 			inst_bw = bytes_for_this_rtt / div;
    693 			inst_off = inst_bw >> bw_shift;
    694 			if (inst_bw > nbw)
    695 				inst_ind = SCTP_INST_GAINING;
    696 			else if ((inst_bw+inst_off) < nbw)
    697 				inst_ind = SCTP_INST_LOOSING;
    698 			else
    699 				inst_ind = SCTP_INST_NEUTRAL;
    700 #if defined(__FreeBSD__) && !defined(__Userspace__)
    701 			probepoint |=  ((0xb << 16) | inst_ind);
    702 #endif
    703 		} else {
    704 			inst_ind = net->cc_mod.rtcc.last_inst_ind;
    705 #if defined(__FreeBSD__) && !defined(__Userspace__)
    706 			inst_bw = bytes_for_this_rtt / (uint64_t)(net->rtt);
    707 			/* Can't determine do not change */
    708 			probepoint |=  ((0xc << 16) | inst_ind);
    709 #endif
    710 		}
    711 	} else {
    712 		inst_ind = net->cc_mod.rtcc.last_inst_ind;
    713 #if defined(__FreeBSD__) && !defined(__Userspace__)
    714 		inst_bw = bytes_for_this_rtt;
    715 		/* Can't determine do not change */
    716 		probepoint |=  ((0xd << 16) | inst_ind);
    717 #endif
    718 	}
    719 #if defined(__FreeBSD__) && !defined(__Userspace__)
    720 	SDT_PROBE5(sctp, cwnd, net, rttvar,
    721 	           vtag,
    722 	           ((nbw << 32) | inst_bw),
    723 	           ((net->cc_mod.rtcc.lbw_rtt << 32) | rtt),
    724 	           net->flight_size,
    725 	           probepoint);
    726 #endif
    727 } else {
    728 	/* No rtt measurement, use last one */
    729 	inst_ind = net->cc_mod.rtcc.last_inst_ind;
    730 }
    731 bw_offset = net->cc_mod.rtcc.lbw >> bw_shift;
    732 if (nbw > net->cc_mod.rtcc.lbw + bw_offset) {
    733 #if defined(__FreeBSD__) && !defined(__Userspace__)
    734 	ret = cc_bw_increase(stcb, net, nbw, vtag);
    735 #else
    736 	ret = cc_bw_increase(stcb, net, nbw);
    737 #endif
    738 	goto out;
    739 }
    740 rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt);
    741 if (nbw < net->cc_mod.rtcc.lbw - bw_offset) {
    742 #if defined(__FreeBSD__) && !defined(__Userspace__)
    743 	ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind);
    744 #else
    745 	ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, inst_ind);
    746 #endif
    747 	goto out;
    748 }
    749 /* If we reach here then
    750  * we are in a situation where
    751  * the bw stayed the same.
    752  */
    753 #if defined(__FreeBSD__) && !defined(__Userspace__)
    754 ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind);
    755 #else
    756 ret = cc_bw_same(stcb, net, nbw, rtt_offset, inst_ind);
    757 #endif
    758 out:
    759 net->cc_mod.rtcc.last_inst_ind = inst_ind;
    760 return (ret);
    761 }
    762 
    763 static void
    764 sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb,
    765 			   struct sctp_association *asoc,
    766 			   int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc)
    767 {
    768 struct sctp_nets *net;
    769 #if defined(__FreeBSD__) && !defined(__Userspace__)
    770 int old_cwnd __dtrace;
    771 #endif
    772 uint32_t t_ssthresh, incr;
    773 uint64_t t_ucwnd_sbw;
    774 uint64_t t_path_mptcp;
    775 uint64_t mptcp_like_alpha;
    776 uint32_t srtt;
    777 uint64_t max_path;
    778 
    779 /* MT FIXME: Don't compute this over and over again */
    780 t_ssthresh = 0;
    781 t_ucwnd_sbw = 0;
    782 t_path_mptcp = 0;
    783 mptcp_like_alpha = 1;
    784 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
    785     (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2) ||
    786     (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_MPTCP)) {
    787 	max_path = 0;
    788 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
    789 		t_ssthresh += net->ssthresh;
    790 		/* lastsa>>3;  we don't need to devide ...*/
    791 		srtt = net->lastsa;
    792 		if (srtt > 0) {
    793 			uint64_t tmp;
    794 
    795 			t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)srtt;
    796 			t_path_mptcp += (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_Z) /
    797 			                (((uint64_t)net->mtu) * (uint64_t)srtt);
    798 			tmp = (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_N) /
    799 			      ((uint64_t)net->mtu * (uint64_t)(srtt * srtt));
    800 			if (tmp > max_path) {
    801 				max_path = tmp;
    802 			}
    803 		}
    804 	}
    805 	if (t_path_mptcp > 0) {
    806 		mptcp_like_alpha = max_path / (t_path_mptcp * t_path_mptcp);
    807 	} else {
    808 		mptcp_like_alpha = 1;
    809 	}
    810 }
    811 if (t_ssthresh == 0) {
    812 	t_ssthresh = 1;
    813 }
    814 if (t_ucwnd_sbw == 0) {
    815 	t_ucwnd_sbw = 1;
    816 }
    817 /******************************/
    818 /* update cwnd and Early FR   */
    819 /******************************/
    820 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
    821 #ifdef JANA_CMT_FAST_RECOVERY
    822 	/*
    823 	 * CMT fast recovery code. Need to debug.
    824 	 */
    825 	if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
    826 		if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
    827 		    SCTP_TSN_GE(net->pseudo_cumack,net->fast_recovery_tsn)) {
    828 			net->will_exit_fast_recovery = 1;
    829 		}
    830 	}
    831 #endif
    832 	/* if nothing was acked on this destination skip it */
    833 	if (net->net_ack == 0) {
    834 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
    835 			sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
    836 		}
    837 		continue;
    838 	}
    839 #ifdef JANA_CMT_FAST_RECOVERY
    840 	/* CMT fast recovery code
    841 	 */
    842 	/*
    843 	  if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) {
    844 	  @@@ Do something
    845 	  }
    846 	  else if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
    847 	*/
    848 #endif
    849 
    850 	if (asoc->fast_retran_loss_recovery &&
    851 	    (will_exit == 0) &&
    852 	    (asoc->sctp_cmt_on_off == 0)) {
    853 		/*
    854 		 * If we are in loss recovery we skip any cwnd
    855 		 * update
    856 		 */
    857 		return;
    858 	}
    859 	/*
    860 	 * Did any measurements go on for this network?
    861 	 */
    862 	if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) {
    863 		uint64_t nbw;
    864 		/*
    865 		 * At this point our bw_bytes has been updated
    866 		 * by incoming sack information.
    867 		 *
    868 		 * But our bw may not yet be set.
    869 		 *
    870 		 */
    871 		if ((net->cc_mod.rtcc.new_tot_time/1000) > 0) {
    872 			nbw = net->cc_mod.rtcc.bw_bytes/(net->cc_mod.rtcc.new_tot_time/1000);
    873 		} else {
    874 			nbw = net->cc_mod.rtcc.bw_bytes;
    875 		}
    876 		if (net->cc_mod.rtcc.lbw) {
    877 			if (cc_bw_limit(stcb, net, nbw)) {
    878 				/* Hold here, no update */
    879 				continue;
    880 			}
    881 		} else {
    882 #if defined(__FreeBSD__) && !defined(__Userspace__)
    883 			uint64_t vtag __dtrace, probepoint __dtrace;
    884 
    885 			probepoint = (((uint64_t)net->cwnd) << 32);
    886 			probepoint |=  ((0xa << 16) | 0);
    887 			vtag = (net->rtt << 32) |
    888 				(((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
    889 				(stcb->rport);
    890 
    891 			SDT_PROBE5(sctp, cwnd, net, rttvar,
    892 			           vtag,
    893 			           nbw,
    894 			           ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
    895 			           net->flight_size,
    896 			           probepoint);
    897 #endif
    898 			net->cc_mod.rtcc.lbw = nbw;
    899 			net->cc_mod.rtcc.lbw_rtt = net->rtt;
    900 			if (net->cc_mod.rtcc.rtt_set_this_sack) {
    901 				net->cc_mod.rtcc.rtt_set_this_sack = 0;
    902 				net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
    903 			}
    904 		}
    905 	}
    906 	/*
    907 	 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
    908 	 * moved.
    909 	 */
    910 	if (accum_moved ||
    911 	    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
    912 		/* If the cumulative ack moved we can proceed */
    913 		if (net->cwnd <= net->ssthresh) {
    914 			/* We are in slow start */
    915 			if (net->flight_size + net->net_ack >= net->cwnd) {
    916 				uint32_t limit;
    917 
    918 #if defined(__FreeBSD__) && !defined(__Userspace__)
    919 				old_cwnd = net->cwnd;
    920 #endif
    921 				switch (asoc->sctp_cmt_on_off) {
    922 				case SCTP_CMT_RPV1:
    923 					limit = (uint32_t)(((uint64_t)net->mtu *
    924 					                    (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
    925 					                    (uint64_t)net->ssthresh) /
    926 					                   (uint64_t)t_ssthresh);
    927 					incr = (uint32_t)(((uint64_t)net->net_ack *
    928 					                   (uint64_t)net->ssthresh) /
    929 					                  (uint64_t)t_ssthresh);
    930 					if (incr > limit) {
    931 						incr = limit;
    932 					}
    933 					if (incr == 0) {
    934 						incr = 1;
    935 					}
    936 					break;
    937 				case SCTP_CMT_RPV2:
    938 					/* lastsa>>3;  we don't need to divide ...*/
    939 					srtt = net->lastsa;
    940 					if (srtt == 0) {
    941 						srtt = 1;
    942 					}
    943 					limit = (uint32_t)(((uint64_t)net->mtu *
    944 					                    (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
    945 					                    (uint64_t)net->cwnd) /
    946 					                   ((uint64_t)srtt * t_ucwnd_sbw));
    947 					                   /* INCREASE FACTOR */
    948 					incr = (uint32_t)(((uint64_t)net->net_ack *
    949 					                   (uint64_t)net->cwnd) /
    950 					                  ((uint64_t)srtt * t_ucwnd_sbw));
    951 					                  /* INCREASE FACTOR */
    952 					if (incr > limit) {
    953 						incr = limit;
    954 					}
    955 					if (incr == 0) {
    956 						incr = 1;
    957 					}
    958 					break;
    959 				case SCTP_CMT_MPTCP:
    960 					limit = (uint32_t)(((uint64_t)net->mtu *
    961 					                    mptcp_like_alpha *
    962 					                    (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) >>
    963 					                   SHIFT_MPTCP_MULTI);
    964 					incr  = (uint32_t)(((uint64_t)net->net_ack *
    965 					                    mptcp_like_alpha) >>
    966 					                   SHIFT_MPTCP_MULTI);
    967 					if (incr > limit) {
    968 						incr = limit;
    969 					}
    970 					if (incr > net->net_ack) {
    971 						incr = net->net_ack;
    972 					}
    973 					if (incr > net->mtu) {
    974 						incr = net->mtu;
    975 					}
    976 					break;
    977 				default:
    978 					incr = net->net_ack;
    979 					if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) {
    980 						incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable);
    981 					}
    982 					break;
    983 				}
    984 				net->cwnd += incr;
    985 				sctp_enforce_cwnd_limit(asoc, net);
    986 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
    987 					sctp_log_cwnd(stcb, net, incr,
    988 					              SCTP_CWND_LOG_FROM_SS);
    989 				}
    990 #if defined(__FreeBSD__) && !defined(__Userspace__)
    991 				SDT_PROBE5(sctp, cwnd, net, ack,
    992 				           stcb->asoc.my_vtag,
    993 				           ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
    994 				           net,
    995 				           old_cwnd, net->cwnd);
    996 #endif
    997 			} else {
    998 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
    999 					sctp_log_cwnd(stcb, net, net->net_ack,
   1000 						      SCTP_CWND_LOG_NOADV_SS);
   1001 				}
   1002 			}
   1003 		} else {
   1004 			/* We are in congestion avoidance */
   1005 			/*
   1006 			 * Add to pba
   1007 			 */
   1008 		        net->partial_bytes_acked += net->net_ack;
   1009 
   1010 			if ((net->flight_size + net->net_ack >= net->cwnd) &&
   1011 			    (net->partial_bytes_acked >= net->cwnd)) {
   1012 				net->partial_bytes_acked -= net->cwnd;
   1013 #if defined(__FreeBSD__) && !defined(__Userspace__)
   1014 				old_cwnd = net->cwnd;
   1015 #endif
   1016 				switch (asoc->sctp_cmt_on_off) {
   1017 				case SCTP_CMT_RPV1:
   1018 					incr = (uint32_t)(((uint64_t)net->mtu *
   1019 					                   (uint64_t)net->ssthresh) /
   1020 					                  (uint64_t)t_ssthresh);
   1021 					if (incr == 0) {
   1022 						incr = 1;
   1023 					}
   1024 					break;
   1025 				case SCTP_CMT_RPV2:
   1026 					/* lastsa>>3;  we don't need to divide ... */
   1027 					srtt = net->lastsa;
   1028 					if (srtt == 0) {
   1029 						srtt = 1;
   1030 					}
   1031 					incr = (uint32_t)((uint64_t)net->mtu *
   1032 					                  (uint64_t)net->cwnd /
   1033 					                  ((uint64_t)srtt *
   1034 					                   t_ucwnd_sbw));
   1035 					                  /* INCREASE FACTOR */
   1036 					if (incr == 0) {
   1037 						incr = 1;
   1038 					}
   1039 					break;
   1040 				case SCTP_CMT_MPTCP:
   1041 					incr = (uint32_t)((mptcp_like_alpha *
   1042 					                   (uint64_t) net->cwnd) >>
   1043 					                  SHIFT_MPTCP_MULTI);
   1044 					if (incr > net->mtu) {
   1045 						incr = net->mtu;
   1046 					}
   1047 					break;
   1048 				default:
   1049 					incr = net->mtu;
   1050 					break;
   1051 				}
   1052 				net->cwnd += incr;
   1053 				sctp_enforce_cwnd_limit(asoc, net);
   1054 #if defined(__FreeBSD__) && !defined(__Userspace__)
   1055 				SDT_PROBE5(sctp, cwnd, net, ack,
   1056 				           stcb->asoc.my_vtag,
   1057 				           ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
   1058 				           net,
   1059 				           old_cwnd, net->cwnd);
   1060 #endif
   1061 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   1062 					sctp_log_cwnd(stcb, net, net->mtu,
   1063 						      SCTP_CWND_LOG_FROM_CA);
   1064 				}
   1065 			} else {
   1066 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   1067 					sctp_log_cwnd(stcb, net, net->net_ack,
   1068 						      SCTP_CWND_LOG_NOADV_CA);
   1069 				}
   1070 			}
   1071 		}
   1072 	} else {
   1073 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   1074 			sctp_log_cwnd(stcb, net, net->mtu,
   1075 				      SCTP_CWND_LOG_NO_CUMACK);
   1076 		}
   1077 	}
   1078 }
   1079 }
   1080 
   1081 #if defined(__FreeBSD__) && !defined(__Userspace__)
   1082 static void
   1083 sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb, struct sctp_nets *net)
   1084 #else
   1085 static void
   1086 sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net)
   1087 #endif
   1088 {
   1089 #if defined(__FreeBSD__) && !defined(__Userspace__)
   1090 int old_cwnd __dtrace;
   1091 
   1092 old_cwnd = net->cwnd;
   1093 #endif
   1094 net->cwnd = net->mtu;
   1095 #if defined(__FreeBSD__) && !defined(__Userspace__)
   1096 SDT_PROBE5(sctp, cwnd, net, ack,
   1097            stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
   1098            old_cwnd, net->cwnd);
   1099 #endif
   1100 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
   1101         (void *)net, net->cwnd);
   1102 }
   1103 
   1104 static void
   1105 sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
   1106 {
   1107 int old_cwnd = net->cwnd;
   1108 uint32_t t_ssthresh, t_cwnd;
   1109 uint64_t t_ucwnd_sbw;
   1110 
   1111 /* MT FIXME: Don't compute this over and over again */
   1112 t_ssthresh = 0;
   1113 t_cwnd = 0;
   1114 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
   1115     (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
   1116 	struct sctp_nets *lnet;
   1117 	uint32_t srtt;
   1118 
   1119 	t_ucwnd_sbw = 0;
   1120 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
   1121 		t_ssthresh += lnet->ssthresh;
   1122 		t_cwnd += lnet->cwnd;
   1123 		srtt = lnet->lastsa;
   1124 		/* lastsa>>3;  we don't need to divide ... */
   1125 		if (srtt > 0) {
   1126 			t_ucwnd_sbw += (uint64_t)lnet->cwnd / (uint64_t)srtt;
   1127 		}
   1128 	}
   1129 	if (t_ssthresh < 1) {
   1130 		t_ssthresh = 1;
   1131 	}
   1132 	if (t_ucwnd_sbw < 1) {
   1133 		t_ucwnd_sbw = 1;
   1134 	}
   1135 	if (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) {
   1136 		net->ssthresh = (uint32_t)(((uint64_t)4 *
   1137 		                            (uint64_t)net->mtu *
   1138 		                            (uint64_t)net->ssthresh) /
   1139 		                           (uint64_t)t_ssthresh);
   1140 	} else {
   1141 		uint64_t cc_delta;
   1142 
   1143 		srtt = net->lastsa;
   1144 		/* lastsa>>3;  we don't need to divide ... */
   1145 		if (srtt == 0) {
   1146 			srtt = 1;
   1147 		}
   1148 		cc_delta = t_ucwnd_sbw * (uint64_t)srtt / 2;
   1149 		if (cc_delta < t_cwnd) {
   1150 			net->ssthresh = (uint32_t)((uint64_t)t_cwnd - cc_delta);
   1151 		} else {
   1152 			net->ssthresh  = net->mtu;
   1153 		}
   1154 	}
   1155 	if ((net->cwnd > t_cwnd / 2) &&
   1156 	    (net->ssthresh < net->cwnd - t_cwnd / 2)) {
   1157 		net->ssthresh = net->cwnd - t_cwnd / 2;
   1158 	}
   1159 	if (net->ssthresh < net->mtu) {
   1160 		net->ssthresh = net->mtu;
   1161 	}
   1162 } else {
   1163 	net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
   1164 }
   1165 net->cwnd = net->mtu;
   1166 net->partial_bytes_acked = 0;
   1167 #if defined(__FreeBSD__) && !defined(__Userspace__)
   1168 SDT_PROBE5(sctp, cwnd, net, to,
   1169            stcb->asoc.my_vtag,
   1170            ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
   1171            net,
   1172            old_cwnd, net->cwnd);
   1173 #endif
   1174 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   1175 	sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
   1176 }
   1177 }
   1178 
   1179 static void
   1180 sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net,
   1181                                       int in_window, int num_pkt_lost, int use_rtcc)
   1182 {
   1183 int old_cwnd = net->cwnd;
   1184 if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) {
   1185 	/* Data center Congestion Control */
   1186 	if (in_window == 0) {
   1187 		/* Go to CA with the cwnd at the point we sent
   1188 		 * the TSN that was marked with a CE.
   1189 		 */
   1190 		if (net->ecn_prev_cwnd < net->cwnd) {
   1191 			/* Restore to prev cwnd */
   1192 			net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost);
   1193 		} else {
   1194 			/* Just cut in 1/2 */
   1195 			net->cwnd /= 2;
   1196 		}
   1197 		/* Drop to CA */
   1198 		net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu);
   1199 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   1200 			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
   1201 		}
   1202 	} else {
   1203 		/* Further tuning down required over the drastic original cut */
   1204 		net->ssthresh -= (net->mtu * num_pkt_lost);
   1205 		net->cwnd -= (net->mtu * num_pkt_lost);
   1206 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   1207 			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
   1208 		}
   1209 	}
   1210 	SCTP_STAT_INCR(sctps_ecnereducedcwnd);
   1211 } else {
   1212 	if (in_window == 0) {
   1213 		SCTP_STAT_INCR(sctps_ecnereducedcwnd);
   1214 		net->ssthresh = net->cwnd / 2;
   1215 		if (net->ssthresh < net->mtu) {
   1216 			net->ssthresh = net->mtu;
   1217 			/* here back off the timer as well, to slow us down */
   1218 			net->RTO <<= 1;
   1219 		}
   1220 		net->cwnd = net->ssthresh;
   1221 #if defined(__FreeBSD__) && !defined(__Userspace__)
   1222 		SDT_PROBE5(sctp, cwnd, net, ecn,
   1223 		           stcb->asoc.my_vtag,
   1224 		           ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
   1225 		           net,
   1226 		           old_cwnd, net->cwnd);
   1227 #endif
   1228 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   1229 			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
   1230 		}
   1231 	}
   1232 }
   1233 
   1234 }
   1235 
   1236 static void
   1237 sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
   1238 struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
   1239 uint32_t *bottle_bw, uint32_t *on_queue)
   1240 {
   1241 uint32_t bw_avail;
   1242 unsigned int incr;
   1243 int old_cwnd = net->cwnd;
   1244 
   1245 /* get bottle neck bw */
   1246 *bottle_bw = ntohl(cp->bottle_bw);
   1247 /* and whats on queue */
   1248 *on_queue = ntohl(cp->current_onq);
   1249 /*
   1250  * adjust the on-queue if our flight is more it could be
   1251  * that the router has not yet gotten data "in-flight" to it
   1252  */
   1253 if (*on_queue < net->flight_size) {
   1254 	*on_queue = net->flight_size;
   1255 }
   1256 /* rtt is measured in micro seconds, bottle_bw in bytes per second */
   1257 bw_avail = (uint32_t)(((uint64_t)(*bottle_bw) * net->rtt) / (uint64_t)1000000);
   1258 if (bw_avail > *bottle_bw) {
   1259 	/*
   1260 	 * Cap the growth to no more than the bottle neck.
   1261 	 * This can happen as RTT slides up due to queues.
   1262 	 * It also means if you have more than a 1 second
   1263 	 * RTT with a empty queue you will be limited to the
   1264 	 * bottle_bw per second no matter if other points
   1265 	 * have 1/2 the RTT and you could get more out...
   1266 	 */
   1267 	bw_avail = *bottle_bw;
   1268 }
   1269 if (*on_queue > bw_avail) {
   1270 	/*
   1271 	 * No room for anything else don't allow anything
   1272 	 * else to be "added to the fire".
   1273 	 */
   1274 	int seg_inflight, seg_onqueue, my_portion;
   1275 
   1276 	net->partial_bytes_acked = 0;
   1277 	/* how much are we over queue size? */
   1278 	incr = *on_queue - bw_avail;
   1279 	if (stcb->asoc.seen_a_sack_this_pkt) {
   1280 		/*
   1281 		 * undo any cwnd adjustment that the sack
   1282 		 * might have made
   1283 		 */
   1284 		net->cwnd = net->prev_cwnd;
   1285 	}
   1286 	/* Now how much of that is mine? */
   1287 	seg_inflight = net->flight_size / net->mtu;
   1288 	seg_onqueue = *on_queue / net->mtu;
   1289 	my_portion = (incr * seg_inflight) / seg_onqueue;
   1290 
   1291 	/* Have I made an adjustment already */
   1292 	if (net->cwnd > net->flight_size) {
   1293 		/*
   1294 		 * for this flight I made an adjustment we
   1295 		 * need to decrease the portion by a share
   1296 		 * our previous adjustment.
   1297 		 */
   1298 		int diff_adj;
   1299 
   1300 		diff_adj = net->cwnd - net->flight_size;
   1301 		if (diff_adj > my_portion)
   1302 			my_portion = 0;
   1303 		else
   1304 			my_portion -= diff_adj;
   1305 	}
   1306 	/*
   1307 	 * back down to the previous cwnd (assume we have
   1308 	 * had a sack before this packet). minus what ever
   1309 	 * portion of the overage is my fault.
   1310 	 */
   1311 	net->cwnd -= my_portion;
   1312 
   1313 	/* we will NOT back down more than 1 MTU */
   1314 	if (net->cwnd <= net->mtu) {
   1315 		net->cwnd = net->mtu;
   1316 	}
   1317 	/* force into CA */
   1318 	net->ssthresh = net->cwnd - 1;
   1319 } else {
   1320 	/*
   1321 	 * Take 1/4 of the space left or max burst up ..
   1322 	 * whichever is less.
   1323 	 */
   1324 	incr = (bw_avail - *on_queue) >> 2;
   1325 	if ((stcb->asoc.max_burst > 0) &&
   1326 	    (stcb->asoc.max_burst * net->mtu < incr)) {
   1327 		incr = stcb->asoc.max_burst * net->mtu;
   1328 	}
   1329 	net->cwnd += incr;
   1330 }
   1331 if (net->cwnd > bw_avail) {
   1332 	/* We can't exceed the pipe size */
   1333 	net->cwnd = bw_avail;
   1334 }
   1335 if (net->cwnd < net->mtu) {
   1336 	/* We always have 1 MTU */
   1337 	net->cwnd = net->mtu;
   1338 }
   1339 sctp_enforce_cwnd_limit(&stcb->asoc, net);
   1340 if (net->cwnd - old_cwnd != 0) {
   1341 	/* log only changes */
   1342 #if defined(__FreeBSD__) && !defined(__Userspace__)
   1343 	SDT_PROBE5(sctp, cwnd, net, pd,
   1344 	           stcb->asoc.my_vtag,
   1345 	           ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
   1346 	           net,
   1347 	           old_cwnd, net->cwnd);
   1348 #endif
   1349 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   1350 		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
   1351 			SCTP_CWND_LOG_FROM_SAT);
   1352 	}
   1353 }
   1354 }
   1355 
   1356 static void
   1357 sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
   1358                              struct sctp_nets *net, int burst_limit)
   1359 {
   1360 int old_cwnd = net->cwnd;
   1361 
   1362 if (net->ssthresh < net->cwnd)
   1363 	net->ssthresh = net->cwnd;
   1364 if (burst_limit) {
   1365 	net->cwnd = (net->flight_size + (burst_limit * net->mtu));
   1366 	sctp_enforce_cwnd_limit(&stcb->asoc, net);
   1367 #if defined(__FreeBSD__) && !defined(__Userspace__)
   1368 	SDT_PROBE5(sctp, cwnd, net, bl,
   1369 	           stcb->asoc.my_vtag,
   1370 	           ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
   1371 	           net,
   1372 	           old_cwnd, net->cwnd);
   1373 #endif
   1374 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   1375 		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
   1376 	}
   1377 }
   1378 }
   1379 
   1380 static void
   1381 sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
   1382                            struct sctp_association *asoc,
   1383                            int accum_moved, int reneged_all, int will_exit)
   1384 {
   1385 /* Passing a zero argument in last disables the rtcc algorithm */
   1386 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0);
   1387 }
   1388 
   1389 static void
   1390 sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
   1391                                int in_window, int num_pkt_lost)
   1392 {
   1393 /* Passing a zero argument in last disables the rtcc algorithm */
   1394 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0);
   1395 }
   1396 
   1397 /* Here starts the RTCCVAR type CC invented by RRS which
   1398 * is a slight mod to RFC2581. We reuse a common routine or
   1399 * two since these algorithms are so close and need to
   1400 * remain the same.
   1401 */
   1402 static void
   1403 sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
   1404                                     int in_window, int num_pkt_lost)
   1405 {
   1406 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1);
   1407 }
   1408 
   1409 static void sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net,
   1410                                                   struct sctp_tmit_chunk *tp1)
   1411 {
   1412 net->cc_mod.rtcc.bw_bytes += tp1->send_size;
   1413 }
   1414 
   1415 static void
   1416 sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb SCTP_UNUSED,
   1417                                    struct sctp_nets *net)
   1418 {
   1419 if (net->cc_mod.rtcc.tls_needs_set > 0) {
   1420 	/* We had a bw measurement going on */
   1421 	struct timeval ltls;
   1422 	SCTP_GETPTIME_TIMEVAL(&ltls);
   1423 	timevalsub(&ltls, &net->cc_mod.rtcc.tls);
   1424 	net->cc_mod.rtcc.new_tot_time = (ltls.tv_sec * 1000000) + ltls.tv_usec;
   1425 }
   1426 }
   1427 
   1428 static void
   1429 sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb,
   1430                                       struct sctp_nets *net)
   1431 {
   1432 #if defined(__FreeBSD__) && !defined(__Userspace__)
   1433 uint64_t vtag __dtrace, probepoint __dtrace;
   1434 
   1435 #endif
   1436 if (net->cc_mod.rtcc.lbw) {
   1437 #if defined(__FreeBSD__) && !defined(__Userspace__)
   1438 	/* Clear the old bw.. we went to 0 in-flight */
   1439 	vtag = (net->rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
   1440 		(stcb->rport);
   1441 	probepoint = (((uint64_t)net->cwnd) << 32);
   1442 	/* Probe point 8 */
   1443 	probepoint |=  ((8 << 16) | 0);
   1444 	SDT_PROBE5(sctp, cwnd, net, rttvar,
   1445 	           vtag,
   1446 	           ((net->cc_mod.rtcc.lbw << 32) | 0),
   1447 	           ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
   1448 	           net->flight_size,
   1449 	           probepoint);
   1450 #endif
   1451 	net->cc_mod.rtcc.lbw_rtt = 0;
   1452 	net->cc_mod.rtcc.cwnd_at_bw_set = 0;
   1453 	net->cc_mod.rtcc.lbw = 0;
   1454 	net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
   1455 	net->cc_mod.rtcc.vol_reduce = 0;
   1456 	net->cc_mod.rtcc.bw_tot_time = 0;
   1457 	net->cc_mod.rtcc.bw_bytes = 0;
   1458 	net->cc_mod.rtcc.tls_needs_set = 0;
   1459 	if (net->cc_mod.rtcc.steady_step) {
   1460 		net->cc_mod.rtcc.vol_reduce = 0;
   1461 		net->cc_mod.rtcc.step_cnt = 0;
   1462 		net->cc_mod.rtcc.last_step_state = 0;
   1463 	}
   1464 	if (net->cc_mod.rtcc.ret_from_eq) {
   1465 		/* less aggressive one - reset cwnd too */
   1466 		uint32_t cwnd_in_mtu, cwnd;
   1467 
   1468 		cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
   1469 		if (cwnd_in_mtu == 0) {
   1470 			/* Using 0 means that the value of RFC 4960 is used. */
   1471 			cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
   1472 		} else {
   1473 			/*
   1474 			 * We take the minimum of the burst limit and the
   1475 			 * initial congestion window.
   1476 			 */
   1477 			if ((stcb->asoc.max_burst > 0) && (cwnd_in_mtu > stcb->asoc.max_burst))
   1478 				cwnd_in_mtu = stcb->asoc.max_burst;
   1479 			cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
   1480 		}
   1481 		if (net->cwnd > cwnd) {
   1482 			/* Only set if we are not a timeout (i.e. down to 1 mtu) */
   1483 			net->cwnd = cwnd;
   1484 		}
   1485 	}
   1486 }
   1487 }
   1488 
   1489 static void
   1490 sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb,
   1491                               struct sctp_nets *net)
   1492 {
   1493 #if defined(__FreeBSD__) && !defined(__Userspace__)
   1494 uint64_t vtag __dtrace, probepoint __dtrace;
   1495 
   1496 #endif
   1497 sctp_set_initial_cc_param(stcb, net);
   1498 stcb->asoc.use_precise_time = 1;
   1499 #if defined(__FreeBSD__) && !defined(__Userspace__)
   1500 probepoint = (((uint64_t)net->cwnd) << 32);
   1501 probepoint |=  ((9 << 16) | 0);
   1502 vtag = (net->rtt << 32) |
   1503 	(((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
   1504 	(stcb->rport);
   1505 SDT_PROBE5(sctp, cwnd, net, rttvar,
   1506            vtag,
   1507            0,
   1508            0,
   1509            0,
   1510            probepoint);
   1511 #endif
   1512 net->cc_mod.rtcc.lbw_rtt = 0;
   1513 net->cc_mod.rtcc.cwnd_at_bw_set = 0;
   1514 net->cc_mod.rtcc.vol_reduce = 0;
   1515 net->cc_mod.rtcc.lbw = 0;
   1516 net->cc_mod.rtcc.vol_reduce = 0;
   1517 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
   1518 net->cc_mod.rtcc.bw_tot_time = 0;
   1519 net->cc_mod.rtcc.bw_bytes = 0;
   1520 net->cc_mod.rtcc.tls_needs_set = 0;
   1521 net->cc_mod.rtcc.ret_from_eq = SCTP_BASE_SYSCTL(sctp_rttvar_eqret);
   1522 net->cc_mod.rtcc.steady_step = SCTP_BASE_SYSCTL(sctp_steady_step);
   1523 net->cc_mod.rtcc.use_dccc_ecn = SCTP_BASE_SYSCTL(sctp_use_dccc_ecn);
   1524 net->cc_mod.rtcc.step_cnt = 0;
   1525 net->cc_mod.rtcc.last_step_state = 0;
   1526 }
   1527 
   1528 static int
   1529 sctp_cwnd_rtcc_socket_option(struct sctp_tcb *stcb, int setorget,
   1530                             struct sctp_cc_option *cc_opt)
   1531 {
   1532 struct sctp_nets *net;
   1533 
   1534 if (setorget == 1) {
   1535 	/* a set */
   1536 	if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
   1537 		if ((cc_opt->aid_value.assoc_value != 0) &&
   1538 		    (cc_opt->aid_value.assoc_value != 1)) {
   1539 			return (EINVAL);
   1540 		}
   1541 		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
   1542 			net->cc_mod.rtcc.ret_from_eq = cc_opt->aid_value.assoc_value;
   1543 		}
   1544 	} else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
   1545 		if ((cc_opt->aid_value.assoc_value != 0) &&
   1546 		    (cc_opt->aid_value.assoc_value != 1)) {
   1547 			return (EINVAL);
   1548 		}
   1549 		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
   1550 			net->cc_mod.rtcc.use_dccc_ecn = cc_opt->aid_value.assoc_value;
   1551 		}
   1552 	} else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
   1553 		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
   1554 			net->cc_mod.rtcc.steady_step = cc_opt->aid_value.assoc_value;
   1555 		}
   1556 	} else {
   1557 		return (EINVAL);
   1558 	}
   1559 } else {
   1560 	/* a get */
   1561 	if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
   1562 		net = TAILQ_FIRST(&stcb->asoc.nets);
   1563 		if (net == NULL) {
   1564 			return (EFAULT);
   1565 		}
   1566 		cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.ret_from_eq;
   1567 	} else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
   1568 		net = TAILQ_FIRST(&stcb->asoc.nets);
   1569 		if (net == NULL) {
   1570 			return (EFAULT);
   1571 		}
   1572 		cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.use_dccc_ecn;
   1573 	} else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
   1574 		net = TAILQ_FIRST(&stcb->asoc.nets);
   1575 		if (net == NULL) {
   1576 			return (EFAULT);
   1577 		}
   1578 		cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.steady_step;
   1579 	} else {
   1580 		return (EINVAL);
   1581 	}
   1582 }
   1583 return (0);
   1584 }
   1585 
   1586 static void
   1587 sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb *stcb SCTP_UNUSED,
   1588                                         struct sctp_nets *net)
   1589 {
   1590 if (net->cc_mod.rtcc.tls_needs_set == 0) {
   1591 	SCTP_GETPTIME_TIMEVAL(&net->cc_mod.rtcc.tls);
   1592 	net->cc_mod.rtcc.tls_needs_set = 2;
   1593 }
   1594 }
   1595 
   1596 static void
   1597 sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb,
   1598                                 struct sctp_association *asoc,
   1599                                 int accum_moved, int reneged_all, int will_exit)
   1600 {
   1601 /* Passing a one argument at the last enables the rtcc algorithm */
   1602 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1);
   1603 }
   1604 
   1605 static void
   1606 sctp_rtt_rtcc_calculated(struct sctp_tcb *stcb SCTP_UNUSED,
   1607                         struct sctp_nets *net,
   1608                         struct timeval *now SCTP_UNUSED)
   1609 {
   1610 net->cc_mod.rtcc.rtt_set_this_sack = 1;
   1611 }
   1612 
   1613 /* Here starts Sally Floyds HS-TCP */
   1614 
   1615 struct sctp_hs_raise_drop {
   1616 int32_t cwnd;
   1617 int8_t increase;
   1618 int8_t drop_percent;
   1619 };
   1620 
   1621 #define SCTP_HS_TABLE_SIZE 73
   1622 
   1623 static const struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
   1624 {38, 1, 50},		/* 0   */
   1625 {118, 2, 44},		/* 1   */
   1626 {221, 3, 41},		/* 2   */
   1627 {347, 4, 38},		/* 3   */
   1628 {495, 5, 37},		/* 4   */
   1629 {663, 6, 35},		/* 5   */
   1630 {851, 7, 34},		/* 6   */
   1631 {1058, 8, 33},		/* 7   */
   1632 {1284, 9, 32},		/* 8   */
   1633 {1529, 10, 31},		/* 9   */
   1634 {1793, 11, 30},		/* 10  */
   1635 {2076, 12, 29},		/* 11  */
   1636 {2378, 13, 28},		/* 12  */
   1637 {2699, 14, 28},		/* 13  */
   1638 {3039, 15, 27},		/* 14  */
   1639 {3399, 16, 27},		/* 15  */
   1640 {3778, 17, 26},		/* 16  */
   1641 {4177, 18, 26},		/* 17  */
   1642 {4596, 19, 25},		/* 18  */
   1643 {5036, 20, 25},		/* 19  */
   1644 {5497, 21, 24},		/* 20  */
   1645 {5979, 22, 24},		/* 21  */
   1646 {6483, 23, 23},		/* 22  */
   1647 {7009, 24, 23},		/* 23  */
   1648 {7558, 25, 22},		/* 24  */
   1649 {8130, 26, 22},		/* 25  */
   1650 {8726, 27, 22},		/* 26  */
   1651 {9346, 28, 21},		/* 27  */
   1652 {9991, 29, 21},		/* 28  */
   1653 {10661, 30, 21},	/* 29  */
   1654 {11358, 31, 20},	/* 30  */
   1655 {12082, 32, 20},	/* 31  */
   1656 {12834, 33, 20},	/* 32  */
   1657 {13614, 34, 19},	/* 33  */
   1658 {14424, 35, 19},	/* 34  */
   1659 {15265, 36, 19},	/* 35  */
   1660 {16137, 37, 19},	/* 36  */
   1661 {17042, 38, 18},	/* 37  */
   1662 {17981, 39, 18},	/* 38  */
   1663 {18955, 40, 18},	/* 39  */
   1664 {19965, 41, 17},	/* 40  */
   1665 {21013, 42, 17},	/* 41  */
   1666 {22101, 43, 17},	/* 42  */
   1667 {23230, 44, 17},	/* 43  */
   1668 {24402, 45, 16},	/* 44  */
   1669 {25618, 46, 16},	/* 45  */
   1670 {26881, 47, 16},	/* 46  */
   1671 {28193, 48, 16},	/* 47  */
   1672 {29557, 49, 15},	/* 48  */
   1673 {30975, 50, 15},	/* 49  */
   1674 {32450, 51, 15},	/* 50  */
   1675 {33986, 52, 15},	/* 51  */
   1676 {35586, 53, 14},	/* 52  */
   1677 {37253, 54, 14},	/* 53  */
   1678 {38992, 55, 14},	/* 54  */
   1679 {40808, 56, 14},	/* 55  */
   1680 {42707, 57, 13},	/* 56  */
   1681 {44694, 58, 13},	/* 57  */
   1682 {46776, 59, 13},	/* 58  */
   1683 {48961, 60, 13},	/* 59  */
   1684 {51258, 61, 13},	/* 60  */
   1685 {53677, 62, 12},	/* 61  */
   1686 {56230, 63, 12},	/* 62  */
   1687 {58932, 64, 12},	/* 63  */
   1688 {61799, 65, 12},	/* 64  */
   1689 {64851, 66, 11},	/* 65  */
   1690 {68113, 67, 11},	/* 66  */
   1691 {71617, 68, 11},	/* 67  */
   1692 {75401, 69, 10},	/* 68  */
   1693 {79517, 70, 10},	/* 69  */
   1694 {84035, 71, 10},	/* 70  */
   1695 {89053, 72, 10},	/* 71  */
   1696 {94717, 73, 9}		/* 72  */
   1697 };
   1698 
   1699 static void
   1700 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
   1701 {
   1702 int cur_val, i, indx, incr;
   1703 int old_cwnd = net->cwnd;
   1704 
   1705 cur_val = net->cwnd >> 10;
   1706 indx = SCTP_HS_TABLE_SIZE - 1;
   1707 
   1708 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
   1709 	/* normal mode */
   1710 	if (net->net_ack > net->mtu) {
   1711 		net->cwnd += net->mtu;
   1712 	} else {
   1713 		net->cwnd += net->net_ack;
   1714 	}
   1715 } else {
   1716 	for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
   1717 		if (cur_val < sctp_cwnd_adjust[i].cwnd) {
   1718 			indx = i;
   1719 			break;
   1720 		}
   1721 	}
   1722 	net->last_hs_used = indx;
   1723 	incr = (((int32_t)sctp_cwnd_adjust[indx].increase) << 10);
   1724 	net->cwnd += incr;
   1725 }
   1726 sctp_enforce_cwnd_limit(&stcb->asoc, net);
   1727 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   1728 	sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SS);
   1729 }
   1730 }
   1731 
   1732 static void
   1733 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
   1734 {
   1735 int cur_val, i, indx;
   1736 int old_cwnd = net->cwnd;
   1737 
   1738 cur_val = net->cwnd >> 10;
   1739 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
   1740 	/* normal mode */
   1741 	net->ssthresh = net->cwnd / 2;
   1742 	if (net->ssthresh < (net->mtu * 2)) {
   1743 		net->ssthresh = 2 * net->mtu;
   1744 	}
   1745 	net->cwnd = net->ssthresh;
   1746 } else {
   1747 	/* drop by the proper amount */
   1748 	net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
   1749 	    (int32_t)sctp_cwnd_adjust[net->last_hs_used].drop_percent);
   1750 	net->cwnd = net->ssthresh;
   1751 	/* now where are we */
   1752 	indx = net->last_hs_used;
   1753 	cur_val = net->cwnd >> 10;
   1754 	/* reset where we are in the table */
   1755 	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
   1756 		/* feel out of hs */
   1757 		net->last_hs_used = 0;
   1758 	} else {
   1759 		for (i = indx; i >= 1; i--) {
   1760 			if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
   1761 				break;
   1762 			}
   1763 		}
   1764 		net->last_hs_used = indx;
   1765 	}
   1766 }
   1767 sctp_enforce_cwnd_limit(&stcb->asoc, net);
   1768 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   1769 	sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
   1770 }
   1771 }
   1772 
   1773 static void
   1774 sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
   1775                             struct sctp_association *asoc)
   1776 {
   1777 struct sctp_nets *net;
   1778 	/*
   1779  * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
   1780  * (net->fast_retran_loss_recovery == 0)))
   1781  */
   1782 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   1783 	if ((asoc->fast_retran_loss_recovery == 0) ||
   1784 	    (asoc->sctp_cmt_on_off > 0)) {
   1785 		/* out of a RFC2582 Fast recovery window? */
   1786 		if (net->net_ack > 0) {
   1787 			/*
   1788 			 * per section 7.2.3, are there any
   1789 			 * destinations that had a fast retransmit
   1790 			 * to them. If so what we need to do is
   1791 			 * adjust ssthresh and cwnd.
   1792 			 */
   1793 			struct sctp_tmit_chunk *lchk;
   1794 
   1795 			sctp_hs_cwnd_decrease(stcb, net);
   1796 
   1797 			lchk = TAILQ_FIRST(&asoc->send_queue);
   1798 
   1799 			net->partial_bytes_acked = 0;
   1800 			/* Turn on fast recovery window */
   1801 			asoc->fast_retran_loss_recovery = 1;
   1802 			if (lchk == NULL) {
   1803 				/* Mark end of the window */
   1804 				asoc->fast_recovery_tsn = asoc->sending_seq - 1;
   1805 			} else {
   1806 				asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
   1807 			}
   1808 
   1809 			/*
   1810 			 * CMT fast recovery -- per destination
   1811 			 * recovery variable.
   1812 			 */
   1813 			net->fast_retran_loss_recovery = 1;
   1814 
   1815 			if (lchk == NULL) {
   1816 				/* Mark end of the window */
   1817 				net->fast_recovery_tsn = asoc->sending_seq - 1;
   1818 			} else {
   1819 				net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
   1820 			}
   1821 
   1822 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
   1823 					stcb->sctp_ep, stcb, net,
   1824 			                SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_2);
   1825 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
   1826 					 stcb->sctp_ep, stcb, net);
   1827 		}
   1828 	} else if (net->net_ack > 0) {
   1829 		/*
   1830 		 * Mark a peg that we WOULD have done a cwnd
   1831 		 * reduction but RFC2582 prevented this action.
   1832 		 */
   1833 		SCTP_STAT_INCR(sctps_fastretransinrtt);
   1834 	}
   1835 }
   1836 }
   1837 
   1838 static void
   1839 sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
   1840                               struct sctp_association *asoc,
   1841                               int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
   1842 {
   1843 struct sctp_nets *net;
   1844 /******************************/
   1845 /* update cwnd and Early FR   */
   1846 /******************************/
   1847 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   1848 #ifdef JANA_CMT_FAST_RECOVERY
   1849 	/*
   1850 	 * CMT fast recovery code. Need to debug.
   1851 	 */
   1852 	if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
   1853 		if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
   1854 		    SCTP_TSN_GE(net->pseudo_cumack,net->fast_recovery_tsn)) {
   1855 			net->will_exit_fast_recovery = 1;
   1856 		}
   1857 	}
   1858 #endif
   1859 	/* if nothing was acked on this destination skip it */
   1860 	if (net->net_ack == 0) {
   1861 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   1862 			sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
   1863 		}
   1864 		continue;
   1865 	}
   1866 #ifdef JANA_CMT_FAST_RECOVERY
   1867 	/* CMT fast recovery code
   1868 	 */
   1869 	/*
   1870 	if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) {
   1871 	    @@@ Do something
   1872 	 }
   1873 	 else if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
   1874 	*/
   1875 #endif
   1876 
   1877 	 if (asoc->fast_retran_loss_recovery &&
   1878 	     (will_exit == 0) &&
   1879 	     (asoc->sctp_cmt_on_off == 0)) {
   1880 		/*
   1881 		 * If we are in loss recovery we skip any cwnd
   1882 		 * update
   1883 		 */
   1884 		return;
   1885 	}
   1886 	/*
   1887 	 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
   1888 	 * moved.
   1889 	 */
   1890 	if (accum_moved ||
   1891 	    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
   1892 		/* If the cumulative ack moved we can proceed */
   1893 		if (net->cwnd <= net->ssthresh) {
   1894 			/* We are in slow start */
   1895 			if (net->flight_size + net->net_ack >= net->cwnd) {
   1896 				sctp_hs_cwnd_increase(stcb, net);
   1897 			} else {
   1898 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   1899 					sctp_log_cwnd(stcb, net, net->net_ack,
   1900 						SCTP_CWND_LOG_NOADV_SS);
   1901 				}
   1902 			}
   1903 		} else {
   1904 			/* We are in congestion avoidance */
   1905 			net->partial_bytes_acked += net->net_ack;
   1906 			if ((net->flight_size + net->net_ack >= net->cwnd) &&
   1907 			    (net->partial_bytes_acked >= net->cwnd)) {
   1908 				net->partial_bytes_acked -= net->cwnd;
   1909 				net->cwnd += net->mtu;
   1910 				sctp_enforce_cwnd_limit(asoc, net);
   1911 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   1912 					sctp_log_cwnd(stcb, net, net->mtu,
   1913 						SCTP_CWND_LOG_FROM_CA);
   1914 				}
   1915 			} else {
   1916 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   1917 					sctp_log_cwnd(stcb, net, net->net_ack,
   1918 						SCTP_CWND_LOG_NOADV_CA);
   1919 				}
   1920 			}
   1921 		}
   1922 	} else {
   1923 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   1924 			sctp_log_cwnd(stcb, net, net->mtu,
   1925 				SCTP_CWND_LOG_NO_CUMACK);
   1926 		}
   1927 	}
   1928 }
   1929 }
   1930 
   1931 /*
   1932 * H-TCP congestion control. The algorithm is detailed in:
   1933 * R.N.Shorten, D.J.Leith:
   1934 *   "H-TCP: TCP for high-speed and long-distance networks"
   1935 *   Proc. PFLDnet, Argonne, 2004.
   1936 * http://www.hamilton.ie/net/htcp3.pdf
   1937 */
   1938 
   1939 static int use_rtt_scaling = 1;
   1940 static int use_bandwidth_switch = 1;
   1941 
   1942 static inline int
   1943 between(uint32_t seq1, uint32_t seq2, uint32_t seq3)
   1944 {
   1945 return (seq3 - seq2 >= seq1 - seq2);
   1946 }
   1947 
   1948 static inline uint32_t
   1949 htcp_cong_time(struct htcp *ca)
   1950 {
   1951 return (sctp_get_tick_count() - ca->last_cong);
   1952 }
   1953 
   1954 static inline uint32_t
   1955 htcp_ccount(struct htcp *ca)
   1956 {
   1957 return (ca->minRTT == 0 ? htcp_cong_time(ca) : htcp_cong_time(ca)/ca->minRTT);
   1958 }
   1959 
   1960 static inline void
   1961 htcp_reset(struct htcp *ca)
   1962 {
   1963 ca->undo_last_cong = ca->last_cong;
   1964 ca->undo_maxRTT = ca->maxRTT;
   1965 ca->undo_old_maxB = ca->old_maxB;
   1966 ca->last_cong = sctp_get_tick_count();
   1967 }
   1968 
   1969 #ifdef SCTP_NOT_USED
   1970 
   1971 static uint32_t
   1972 htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net)
   1973 {
   1974 net->cc_mod.htcp_ca.last_cong = net->cc_mod.htcp_ca.undo_last_cong;
   1975 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.undo_maxRTT;
   1976 net->cc_mod.htcp_ca.old_maxB = net->cc_mod.htcp_ca.undo_old_maxB;
   1977 return (max(net->cwnd, ((net->ssthresh/net->mtu<<7)/net->cc_mod.htcp_ca.beta)*net->mtu));
   1978 }
   1979 
   1980 #endif
   1981 
   1982 static inline void
   1983 measure_rtt(struct sctp_nets *net)
   1984 {
   1985 uint32_t srtt = net->lastsa>>SCTP_RTT_SHIFT;
   1986 
   1987 /* keep track of minimum RTT seen so far, minRTT is zero at first */
   1988 if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT)
   1989 	net->cc_mod.htcp_ca.minRTT = srtt;
   1990 
   1991 /* max RTT */
   1992 if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) {
   1993 	if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT)
   1994 		net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT;
   1995 	if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT+sctp_msecs_to_ticks(20))
   1996 		net->cc_mod.htcp_ca.maxRTT = srtt;
   1997 }
   1998 }
   1999 
   2000 static void
   2001 measure_achieved_throughput(struct sctp_nets *net)
   2002 {
   2003 uint32_t now = sctp_get_tick_count();
   2004 
   2005 if (net->fast_retran_ip == 0)
   2006 	net->cc_mod.htcp_ca.bytes_acked = net->net_ack;
   2007 
   2008 if (!use_bandwidth_switch)
   2009 	return;
   2010 
   2011 /* achieved throughput calculations */
   2012 /* JRS - not 100% sure of this statement */
   2013 if (net->fast_retran_ip == 1) {
   2014 	net->cc_mod.htcp_ca.bytecount = 0;
   2015 	net->cc_mod.htcp_ca.lasttime = now;
   2016 	return;
   2017 }
   2018 
   2019 net->cc_mod.htcp_ca.bytecount += net->net_ack;
   2020 if ((net->cc_mod.htcp_ca.bytecount >= net->cwnd - (((net->cc_mod.htcp_ca.alpha >> 7) ? (net->cc_mod.htcp_ca.alpha >> 7) : 1) * net->mtu)) &&
   2021     (now - net->cc_mod.htcp_ca.lasttime >= net->cc_mod.htcp_ca.minRTT) &&
   2022     (net->cc_mod.htcp_ca.minRTT > 0)) {
   2023 	uint32_t cur_Bi = net->cc_mod.htcp_ca.bytecount/net->mtu*hz/(now - net->cc_mod.htcp_ca.lasttime);
   2024 
   2025 	if (htcp_ccount(&net->cc_mod.htcp_ca) <= 3) {
   2026 		/* just after backoff */
   2027 		net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi = cur_Bi;
   2028 	} else {
   2029 		net->cc_mod.htcp_ca.Bi = (3*net->cc_mod.htcp_ca.Bi + cur_Bi)/4;
   2030 		if (net->cc_mod.htcp_ca.Bi > net->cc_mod.htcp_ca.maxB)
   2031 			net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi;
   2032 		if (net->cc_mod.htcp_ca.minB > net->cc_mod.htcp_ca.maxB)
   2033 			net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB;
   2034 	}
   2035 	net->cc_mod.htcp_ca.bytecount = 0;
   2036 	net->cc_mod.htcp_ca.lasttime = now;
   2037 }
   2038 }
   2039 
   2040 static inline void
   2041 htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT)
   2042 {
   2043 if (use_bandwidth_switch) {
   2044 	uint32_t maxB = ca->maxB;
   2045 	uint32_t old_maxB = ca->old_maxB;
   2046 	ca->old_maxB = ca->maxB;
   2047 
   2048 	if (!between(5*maxB, 4*old_maxB, 6*old_maxB)) {
   2049 		ca->beta = BETA_MIN;
   2050 		ca->modeswitch = 0;
   2051 		return;
   2052 	}
   2053 }
   2054 
   2055 if (ca->modeswitch && minRTT > sctp_msecs_to_ticks(10) && maxRTT) {
   2056 	ca->beta = (minRTT<<7)/maxRTT;
   2057 	if (ca->beta < BETA_MIN)
   2058 		ca->beta = BETA_MIN;
   2059 	else if (ca->beta > BETA_MAX)
   2060 		ca->beta = BETA_MAX;
   2061 } else {
   2062 	ca->beta = BETA_MIN;
   2063 	ca->modeswitch = 1;
   2064 }
   2065 }
   2066 
   2067 static inline void
   2068 htcp_alpha_update(struct htcp *ca)
   2069 {
   2070 uint32_t minRTT = ca->minRTT;
   2071 uint32_t factor = 1;
   2072 uint32_t diff = htcp_cong_time(ca);
   2073 
   2074 if (diff > (uint32_t)hz) {
   2075 	diff -= hz;
   2076 	factor = 1+ (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz;
   2077 }
   2078 
   2079 if (use_rtt_scaling && minRTT) {
   2080 	uint32_t scale = (hz << 3) / (10 * minRTT);
   2081 	scale = min(max(scale, 1U << 2), 10U << 3); /* clamping ratio to interval [0.5,10]<<3 */
   2082 	factor = (factor << 3) / scale;
   2083 	if (factor != 0)
   2084 		factor = 1;
   2085 }
   2086 
   2087 ca->alpha = 2 * factor * ((1 << 7) - ca->beta);
   2088 if (ca->alpha != 0)
   2089 	ca->alpha = ALPHA_BASE;
   2090 }
   2091 
   2092 /* After we have the rtt data to calculate beta, we'd still prefer to wait one
   2093 * rtt before we adjust our beta to ensure we are working from a consistent
   2094 * data.
   2095 *
   2096 * This function should be called when we hit a congestion event since only at
   2097 * that point do we really have a real sense of maxRTT (the queues en route
   2098 * were getting just too full now).
   2099 */
   2100 static void
   2101 htcp_param_update(struct sctp_nets *net)
   2102 {
   2103 uint32_t minRTT = net->cc_mod.htcp_ca.minRTT;
   2104 uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT;
   2105 
   2106 htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT);
   2107 htcp_alpha_update(&net->cc_mod.htcp_ca);
   2108 
   2109 /* add slowly fading memory for maxRTT to accommodate routing changes etc */
   2110 if (minRTT > 0 && maxRTT > minRTT)
   2111 	net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT-minRTT)*95)/100;
   2112 }
   2113 
   2114 static uint32_t
   2115 htcp_recalc_ssthresh(struct sctp_nets *net)
   2116 {
   2117 htcp_param_update(net);
   2118 return (max(((net->cwnd/net->mtu * net->cc_mod.htcp_ca.beta) >> 7)*net->mtu, 2U*net->mtu));
   2119 }
   2120 
   2121 static void
   2122 htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
   2123 {
   2124 /*-
   2125  * How to handle these functions?
   2126  *	if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question.
   2127  *		return;
   2128  */
   2129 if (net->cwnd <= net->ssthresh) {
   2130 	/* We are in slow start */
   2131 	if (net->flight_size + net->net_ack >= net->cwnd) {
   2132 		if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
   2133 			net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
   2134 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   2135 				sctp_log_cwnd(stcb, net, net->mtu,
   2136 					SCTP_CWND_LOG_FROM_SS);
   2137 			}
   2138 
   2139 		} else {
   2140 			net->cwnd += net->net_ack;
   2141 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   2142 				sctp_log_cwnd(stcb, net, net->net_ack,
   2143 					SCTP_CWND_LOG_FROM_SS);
   2144 			}
   2145 		}
   2146 		sctp_enforce_cwnd_limit(&stcb->asoc, net);
   2147 	} else {
   2148 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   2149 			sctp_log_cwnd(stcb, net, net->net_ack,
   2150 				SCTP_CWND_LOG_NOADV_SS);
   2151 		}
   2152 	}
   2153 } else {
   2154 	measure_rtt(net);
   2155 
   2156 	/* In dangerous area, increase slowly.
   2157 	 * In theory this is net->cwnd += alpha / net->cwnd
   2158 	 */
   2159 	/* What is snd_cwnd_cnt?? */
   2160 	if (((net->partial_bytes_acked/net->mtu * net->cc_mod.htcp_ca.alpha) >> 7)*net->mtu >= net->cwnd) {
   2161 		/*-
   2162 		 * Does SCTP have a cwnd clamp?
   2163 		 * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS).
   2164 		 */
   2165 		net->cwnd += net->mtu;
   2166 		net->partial_bytes_acked = 0;
   2167 		sctp_enforce_cwnd_limit(&stcb->asoc, net);
   2168 		htcp_alpha_update(&net->cc_mod.htcp_ca);
   2169 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   2170 			sctp_log_cwnd(stcb, net, net->mtu,
   2171 				SCTP_CWND_LOG_FROM_CA);
   2172 		}
   2173 	} else {
   2174 		net->partial_bytes_acked += net->net_ack;
   2175 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   2176 			sctp_log_cwnd(stcb, net, net->net_ack,
   2177 				SCTP_CWND_LOG_NOADV_CA);
   2178 		}
   2179 	}
   2180 
   2181 	net->cc_mod.htcp_ca.bytes_acked = net->mtu;
   2182 }
   2183 }
   2184 
   2185 #ifdef SCTP_NOT_USED
   2186 /* Lower bound on congestion window. */
   2187 static uint32_t
   2188 htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
   2189 {
   2190 return (net->ssthresh);
   2191 }
   2192 #endif
   2193 
   2194 static void
   2195 htcp_init(struct sctp_nets *net)
   2196 {
   2197 memset(&net->cc_mod.htcp_ca, 0, sizeof(struct htcp));
   2198 net->cc_mod.htcp_ca.alpha = ALPHA_BASE;
   2199 net->cc_mod.htcp_ca.beta = BETA_MIN;
   2200 net->cc_mod.htcp_ca.bytes_acked = net->mtu;
   2201 net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count();
   2202 }
   2203 
   2204 static void
   2205 sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
   2206 {
   2207 /*
   2208  * We take the max of the burst limit times a MTU or the
   2209  * INITIAL_CWND. We then limit this to 4 MTU's of sending.
   2210  */
   2211 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
   2212 net->ssthresh = stcb->asoc.peers_rwnd;
   2213 sctp_enforce_cwnd_limit(&stcb->asoc, net);
   2214 htcp_init(net);
   2215 
   2216 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
   2217 	sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
   2218 }
   2219 }
   2220 
   2221 static void
   2222 sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
   2223 	 struct sctp_association *asoc,
   2224 	 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
   2225 {
   2226 struct sctp_nets *net;
   2227 
   2228 /******************************/
   2229 /* update cwnd and Early FR   */
   2230 /******************************/
   2231 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   2232 #ifdef JANA_CMT_FAST_RECOVERY
   2233 	/*
   2234 	 * CMT fast recovery code. Need to debug.
   2235 	 */
   2236 	if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
   2237 		if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
   2238 		    SCTP_TSN_GE(net->pseudo_cumack,net->fast_recovery_tsn)) {
   2239 			net->will_exit_fast_recovery = 1;
   2240 		}
   2241 	}
   2242 #endif
   2243 	/* if nothing was acked on this destination skip it */
   2244 	if (net->net_ack == 0) {
   2245 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   2246 			sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
   2247 		}
   2248 		continue;
   2249 	}
   2250 #ifdef JANA_CMT_FAST_RECOVERY
   2251 	/* CMT fast recovery code
   2252 	 */
   2253 	/*
   2254 	if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) {
   2255 	    @@@ Do something
   2256 	 }
   2257 	 else if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
   2258 	*/
   2259 #endif
   2260 
   2261 	if (asoc->fast_retran_loss_recovery &&
   2262 	    will_exit == 0 &&
   2263 	    (asoc->sctp_cmt_on_off == 0)) {
   2264 		/*
   2265 		 * If we are in loss recovery we skip any cwnd
   2266 		 * update
   2267 		 */
   2268 		return;
   2269 	}
   2270 	/*
   2271 	 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
   2272 	 * moved.
   2273 	 */
   2274 	if (accum_moved ||
   2275 	    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
   2276 		htcp_cong_avoid(stcb, net);
   2277 		measure_achieved_throughput(net);
   2278 	} else {
   2279 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   2280 			sctp_log_cwnd(stcb, net, net->mtu,
   2281 				SCTP_CWND_LOG_NO_CUMACK);
   2282 		}
   2283 	}
   2284 }
   2285 }
   2286 
   2287 static void
   2288 sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
   2289 	struct sctp_association *asoc)
   2290 {
   2291 struct sctp_nets *net;
   2292 /*
   2293  * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
   2294  * (net->fast_retran_loss_recovery == 0)))
   2295  */
   2296 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   2297 	if ((asoc->fast_retran_loss_recovery == 0) ||
   2298 	    (asoc->sctp_cmt_on_off > 0)) {
   2299 		/* out of a RFC2582 Fast recovery window? */
   2300 		if (net->net_ack > 0) {
   2301 			/*
   2302 			 * per section 7.2.3, are there any
   2303 			 * destinations that had a fast retransmit
   2304 			 * to them. If so what we need to do is
   2305 			 * adjust ssthresh and cwnd.
   2306 			 */
   2307 			struct sctp_tmit_chunk *lchk;
   2308 			int old_cwnd = net->cwnd;
   2309 
   2310 			/* JRS - reset as if state were changed */
   2311 			htcp_reset(&net->cc_mod.htcp_ca);
   2312 			net->ssthresh = htcp_recalc_ssthresh(net);
   2313 			net->cwnd = net->ssthresh;
   2314 			sctp_enforce_cwnd_limit(asoc, net);
   2315 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   2316 				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
   2317 					SCTP_CWND_LOG_FROM_FR);
   2318 			}
   2319 			lchk = TAILQ_FIRST(&asoc->send_queue);
   2320 
   2321 			net->partial_bytes_acked = 0;
   2322 			/* Turn on fast recovery window */
   2323 			asoc->fast_retran_loss_recovery = 1;
   2324 			if (lchk == NULL) {
   2325 				/* Mark end of the window */
   2326 				asoc->fast_recovery_tsn = asoc->sending_seq - 1;
   2327 			} else {
   2328 				asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
   2329 			}
   2330 
   2331 			/*
   2332 			 * CMT fast recovery -- per destination
   2333 			 * recovery variable.
   2334 			 */
   2335 			net->fast_retran_loss_recovery = 1;
   2336 
   2337 			if (lchk == NULL) {
   2338 				/* Mark end of the window */
   2339 				net->fast_recovery_tsn = asoc->sending_seq - 1;
   2340 			} else {
   2341 				net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
   2342 			}
   2343 
   2344 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
   2345 					stcb->sctp_ep, stcb, net,
   2346 			                SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_3);
   2347 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
   2348 					 stcb->sctp_ep, stcb, net);
   2349 		}
   2350 	} else if (net->net_ack > 0) {
   2351 		/*
   2352 		 * Mark a peg that we WOULD have done a cwnd
   2353 		 * reduction but RFC2582 prevented this action.
   2354 		 */
   2355 		SCTP_STAT_INCR(sctps_fastretransinrtt);
   2356 	}
   2357 }
   2358 }
   2359 
   2360 static void
   2361 sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
   2362 struct sctp_nets *net)
   2363 {
   2364 	int old_cwnd = net->cwnd;
   2365 
   2366 	/* JRS - reset as if the state were being changed to timeout */
   2367 	htcp_reset(&net->cc_mod.htcp_ca);
   2368 	net->ssthresh = htcp_recalc_ssthresh(net);
   2369 	net->cwnd = net->mtu;
   2370 	net->partial_bytes_acked = 0;
   2371 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   2372 		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
   2373 	}
   2374 }
   2375 
   2376 static void
   2377 sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
   2378 	struct sctp_nets *net, int in_window, int num_pkt_lost SCTP_UNUSED)
   2379 {
   2380 int old_cwnd;
   2381 old_cwnd = net->cwnd;
   2382 
   2383 /* JRS - reset hctp as if state changed */
   2384 if (in_window == 0) {
   2385 	htcp_reset(&net->cc_mod.htcp_ca);
   2386 	SCTP_STAT_INCR(sctps_ecnereducedcwnd);
   2387 	net->ssthresh = htcp_recalc_ssthresh(net);
   2388 	if (net->ssthresh < net->mtu) {
   2389 		net->ssthresh = net->mtu;
   2390 		/* here back off the timer as well, to slow us down */
   2391 		net->RTO <<= 1;
   2392 	}
   2393 	net->cwnd = net->ssthresh;
   2394 	sctp_enforce_cwnd_limit(&stcb->asoc, net);
   2395 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   2396 		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
   2397 	}
   2398 }
   2399 }
   2400 
   2401 const struct sctp_cc_functions sctp_cc_functions[] = {
   2402 {
   2403 #if defined(_WIN32) && !defined(__MINGW32__)
   2404 sctp_set_initial_cc_param,
   2405 sctp_cwnd_update_after_sack,
   2406 sctp_cwnd_update_exit_pf_common,
   2407 sctp_cwnd_update_after_fr,
   2408 sctp_cwnd_update_after_timeout,
   2409 sctp_cwnd_update_after_ecn_echo,
   2410 sctp_cwnd_update_after_packet_dropped,
   2411 sctp_cwnd_update_after_output,
   2412 #else
   2413 .sctp_set_initial_cc_param = sctp_set_initial_cc_param,
   2414 .sctp_cwnd_update_after_sack = sctp_cwnd_update_after_sack,
   2415 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
   2416 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
   2417 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
   2418 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
   2419 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
   2420 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
   2421 #endif
   2422 },
   2423 {
   2424 #if defined(_WIN32) && !defined(__MINGW32__)
   2425 sctp_set_initial_cc_param,
   2426 sctp_hs_cwnd_update_after_sack,
   2427 sctp_cwnd_update_exit_pf_common,
   2428 sctp_hs_cwnd_update_after_fr,
   2429 sctp_cwnd_update_after_timeout,
   2430 sctp_cwnd_update_after_ecn_echo,
   2431 sctp_cwnd_update_after_packet_dropped,
   2432 sctp_cwnd_update_after_output,
   2433 #else
   2434 .sctp_set_initial_cc_param = sctp_set_initial_cc_param,
   2435 .sctp_cwnd_update_after_sack = sctp_hs_cwnd_update_after_sack,
   2436 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
   2437 .sctp_cwnd_update_after_fr = sctp_hs_cwnd_update_after_fr,
   2438 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
   2439 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
   2440 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
   2441 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
   2442 #endif
   2443 },
   2444 {
   2445 #if defined(_WIN32) && !defined(__MINGW32__)
   2446 sctp_htcp_set_initial_cc_param,
   2447 sctp_htcp_cwnd_update_after_sack,
   2448 sctp_cwnd_update_exit_pf_common,
   2449 sctp_htcp_cwnd_update_after_fr,
   2450 sctp_htcp_cwnd_update_after_timeout,
   2451 sctp_htcp_cwnd_update_after_ecn_echo,
   2452 sctp_cwnd_update_after_packet_dropped,
   2453 sctp_cwnd_update_after_output,
   2454 #else
   2455 .sctp_set_initial_cc_param = sctp_htcp_set_initial_cc_param,
   2456 .sctp_cwnd_update_after_sack = sctp_htcp_cwnd_update_after_sack,
   2457 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
   2458 .sctp_cwnd_update_after_fr = sctp_htcp_cwnd_update_after_fr,
   2459 .sctp_cwnd_update_after_timeout = sctp_htcp_cwnd_update_after_timeout,
   2460 .sctp_cwnd_update_after_ecn_echo = sctp_htcp_cwnd_update_after_ecn_echo,
   2461 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
   2462 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
   2463 #endif
   2464 },
   2465 {
   2466 #if defined(_WIN32) && !defined(__MINGW32__)
   2467 sctp_set_rtcc_initial_cc_param,
   2468 sctp_cwnd_update_rtcc_after_sack,
   2469 sctp_cwnd_update_exit_pf_common,
   2470 sctp_cwnd_update_after_fr,
   2471 sctp_cwnd_update_after_timeout,
   2472 sctp_cwnd_update_rtcc_after_ecn_echo,
   2473 sctp_cwnd_update_after_packet_dropped,
   2474 sctp_cwnd_update_after_output,
   2475 sctp_cwnd_update_rtcc_packet_transmitted,
   2476 sctp_cwnd_update_rtcc_tsn_acknowledged,
   2477 sctp_cwnd_new_rtcc_transmission_begins,
   2478 sctp_cwnd_prepare_rtcc_net_for_sack,
   2479 sctp_cwnd_rtcc_socket_option,
   2480 sctp_rtt_rtcc_calculated
   2481 #else
   2482 .sctp_set_initial_cc_param = sctp_set_rtcc_initial_cc_param,
   2483 .sctp_cwnd_update_after_sack = sctp_cwnd_update_rtcc_after_sack,
   2484 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
   2485 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
   2486 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
   2487 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_rtcc_after_ecn_echo,
   2488 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
   2489 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
   2490 .sctp_cwnd_update_packet_transmitted = sctp_cwnd_update_rtcc_packet_transmitted,
   2491 .sctp_cwnd_update_tsn_acknowledged = sctp_cwnd_update_rtcc_tsn_acknowledged,
   2492 .sctp_cwnd_new_transmission_begins = sctp_cwnd_new_rtcc_transmission_begins,
   2493 .sctp_cwnd_prepare_net_for_sack = sctp_cwnd_prepare_rtcc_net_for_sack,
   2494 .sctp_cwnd_socket_option = sctp_cwnd_rtcc_socket_option,
   2495 .sctp_rtt_calculated = sctp_rtt_rtcc_calculated
   2496 #endif
   2497 }
   2498 };