tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

sctp_indata.c (187965B)


      1 /*-
      2 * SPDX-License-Identifier: BSD-3-Clause
      3 *
      4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
      5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
      6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
      7 *
      8 * Redistribution and use in source and binary forms, with or without
      9 * modification, are permitted provided that the following conditions are met:
     10 *
     11 * a) Redistributions of source code must retain the above copyright notice,
     12 *    this list of conditions and the following disclaimer.
     13 *
     14 * b) Redistributions in binary form must reproduce the above copyright
     15 *    notice, this list of conditions and the following disclaimer in
     16 *    the documentation and/or other materials provided with the distribution.
     17 *
     18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
     19 *    contributors may be used to endorse or promote products derived
     20 *    from this software without specific prior written permission.
     21 *
     22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     32 * THE POSSIBILITY OF SUCH DAMAGE.
     33 */
     34 
     35 #include <netinet/sctp_os.h>
     36 #if defined(__FreeBSD__) && !defined(__Userspace__)
     37 #include <sys/proc.h>
     38 #endif
     39 #include <netinet/sctp_var.h>
     40 #include <netinet/sctp_sysctl.h>
     41 #include <netinet/sctp_header.h>
     42 #include <netinet/sctp_pcb.h>
     43 #include <netinet/sctputil.h>
     44 #include <netinet/sctp_output.h>
     45 #include <netinet/sctp_uio.h>
     46 #include <netinet/sctp_auth.h>
     47 #include <netinet/sctp_timer.h>
     48 #include <netinet/sctp_asconf.h>
     49 #include <netinet/sctp_indata.h>
     50 #include <netinet/sctp_bsd_addr.h>
     51 #include <netinet/sctp_input.h>
     52 #include <netinet/sctp_crc32.h>
     53 #if defined(__FreeBSD__) && !defined(__Userspace__)
     54 #include <netinet/sctp_lock_bsd.h>
     55 #endif
     56 
     57 #if defined(_WIN32) && !defined(_MSC_VER)
     58 #include <minmax.h>
     59 #endif
     60 
     61 /*
     62 * NOTES: On the outbound side of things I need to check the sack timer to
     63 * see if I should generate a sack into the chunk queue (if I have data to
     64 * send that is and will be sending it .. for bundling.
     65 *
     66 * The callback in sctp_usrreq.c will get called when the socket is read from.
     67 * This will cause sctp_service_queues() to get called on the top entry in
     68 * the list.
     69 */
     70 static uint32_t
     71 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
     72 		struct sctp_stream_in *strm,
     73 		struct sctp_tcb *stcb,
     74 		struct sctp_association *asoc,
     75 		struct sctp_tmit_chunk *chk, int hold_rlock);
     76 
     77 void
     78 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
     79 {
     80 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
     81 }
     82 
     83 /* Calculate what the rwnd would be */
     84 uint32_t
     85 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
     86 {
     87 uint32_t calc = 0;
     88 
     89 /*
     90  * This is really set wrong with respect to a 1-2-m socket. Since
     91  * the sb_cc is the count that everyone as put up. When we re-write
     92  * sctp_soreceive then we will fix this so that ONLY this
     93  * associations data is taken into account.
     94  */
     95 if (stcb->sctp_socket == NULL) {
     96 	return (calc);
     97 }
     98 
     99 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
    100         ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
    101 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
    102         ("size_on_all_streams is %u", asoc->size_on_all_streams));
    103 if (stcb->asoc.sb_cc == 0 &&
    104     asoc->cnt_on_reasm_queue == 0 &&
    105     asoc->cnt_on_all_streams == 0) {
    106 	/* Full rwnd granted */
    107 	calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
    108 	return (calc);
    109 }
    110 /* get actual space */
    111 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
    112 /*
    113  * take out what has NOT been put on socket queue and we yet hold
    114  * for putting up.
    115  */
    116 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
    117                                          asoc->cnt_on_reasm_queue * MSIZE));
    118 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
    119                                          asoc->cnt_on_all_streams * MSIZE));
    120 if (calc == 0) {
    121 	/* out of space */
    122 	return (calc);
    123 }
    124 
    125 /* what is the overhead of all these rwnd's */
    126 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
    127 /* If the window gets too small due to ctrl-stuff, reduce it
    128  * to 1, even it is 0. SWS engaged
    129  */
    130 if (calc < stcb->asoc.my_rwnd_control_len) {
    131 	calc = 1;
    132 }
    133 return (calc);
    134 }
    135 
    136 /*
    137 * Build out our readq entry based on the incoming packet.
    138 */
    139 struct sctp_queued_to_read *
    140 sctp_build_readq_entry(struct sctp_tcb *stcb,
    141    struct sctp_nets *net,
    142    uint32_t tsn, uint32_t ppid,
    143    uint32_t context, uint16_t sid,
    144    uint32_t mid, uint8_t flags,
    145    struct mbuf *dm)
    146 {
    147 struct sctp_queued_to_read *read_queue_e = NULL;
    148 
    149 sctp_alloc_a_readq(stcb, read_queue_e);
    150 if (read_queue_e == NULL) {
    151 	goto failed_build;
    152 }
    153 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
    154 read_queue_e->sinfo_stream = sid;
    155 read_queue_e->sinfo_flags = (flags << 8);
    156 read_queue_e->sinfo_ppid = ppid;
    157 read_queue_e->sinfo_context = context;
    158 read_queue_e->sinfo_tsn = tsn;
    159 read_queue_e->sinfo_cumtsn = tsn;
    160 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
    161 read_queue_e->mid = mid;
    162 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
    163 TAILQ_INIT(&read_queue_e->reasm);
    164 read_queue_e->whoFrom = net;
    165 atomic_add_int(&net->ref_count, 1);
    166 read_queue_e->data = dm;
    167 read_queue_e->stcb = stcb;
    168 read_queue_e->port_from = stcb->rport;
    169 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
    170 	read_queue_e->do_not_ref_stcb = 1;
    171 }
    172 failed_build:
    173 return (read_queue_e);
    174 }
    175 
    176 struct mbuf *
    177 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
    178 {
    179 struct sctp_extrcvinfo *seinfo;
    180 struct sctp_sndrcvinfo *outinfo;
    181 struct sctp_rcvinfo *rcvinfo;
    182 struct sctp_nxtinfo *nxtinfo;
    183 #if defined(_WIN32)
    184 WSACMSGHDR *cmh;
    185 #else
    186 struct cmsghdr *cmh;
    187 #endif
    188 struct mbuf *ret;
    189 int len;
    190 int use_extended;
    191 int provide_nxt;
    192 
    193 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
    194     sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
    195     sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
    196 	/* user does not want any ancillary data */
    197 	return (NULL);
    198 }
    199 
    200 len = 0;
    201 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
    202 	len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
    203 }
    204 seinfo = (struct sctp_extrcvinfo *)sinfo;
    205 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
    206     (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
    207 	provide_nxt = 1;
    208 	len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
    209 } else {
    210 	provide_nxt = 0;
    211 }
    212 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
    213 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
    214 		use_extended = 1;
    215 		len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
    216 	} else {
    217 		use_extended = 0;
    218 		len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
    219 	}
    220 } else {
    221 	use_extended = 0;
    222 }
    223 
    224 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
    225 if (ret == NULL) {
    226 	/* No space */
    227 	return (ret);
    228 }
    229 SCTP_BUF_LEN(ret) = 0;
    230 
    231 /* We need a CMSG header followed by the struct */
    232 #if defined(_WIN32)
    233 cmh = mtod(ret, WSACMSGHDR *);
    234 #else
    235 cmh = mtod(ret, struct cmsghdr *);
    236 #endif
    237 /*
    238  * Make sure that there is no un-initialized padding between
    239  * the cmsg header and cmsg data and after the cmsg data.
    240  */
    241 memset(cmh, 0, len);
    242 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
    243 	cmh->cmsg_level = IPPROTO_SCTP;
    244 	cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
    245 	cmh->cmsg_type = SCTP_RCVINFO;
    246 	rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
    247 	rcvinfo->rcv_sid = sinfo->sinfo_stream;
    248 	rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
    249 	rcvinfo->rcv_flags = sinfo->sinfo_flags;
    250 	rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
    251 	rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
    252 	rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
    253 	rcvinfo->rcv_context = sinfo->sinfo_context;
    254 	rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
    255 #if defined(_WIN32)
    256 	cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
    257 #else
    258 	cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
    259 #endif
    260 	SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
    261 }
    262 if (provide_nxt) {
    263 	cmh->cmsg_level = IPPROTO_SCTP;
    264 	cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
    265 	cmh->cmsg_type = SCTP_NXTINFO;
    266 	nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
    267 	nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
    268 	nxtinfo->nxt_flags = 0;
    269 	if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
    270 		nxtinfo->nxt_flags |= SCTP_UNORDERED;
    271 	}
    272 	if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
    273 		nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
    274 	}
    275 	if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
    276 		nxtinfo->nxt_flags |= SCTP_COMPLETE;
    277 	}
    278 	nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
    279 	nxtinfo->nxt_length = seinfo->serinfo_next_length;
    280 	nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
    281 #if defined(_WIN32)
    282 	cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
    283 #else
    284 	cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
    285 #endif
    286 	SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
    287 }
    288 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
    289 	cmh->cmsg_level = IPPROTO_SCTP;
    290 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
    291 	if (use_extended) {
    292 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
    293 		cmh->cmsg_type = SCTP_EXTRCV;
    294 		memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
    295 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
    296 	} else {
    297 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
    298 		cmh->cmsg_type = SCTP_SNDRCV;
    299 		*outinfo = *sinfo;
    300 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
    301 	}
    302 }
    303 return (ret);
    304 }
    305 
    306 static void
    307 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
    308 {
    309 uint32_t gap, i;
    310 int in_r, in_nr;
    311 
    312 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
    313 	return;
    314 }
    315 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
    316 	/*
    317 	 * This tsn is behind the cum ack and thus we don't
    318 	 * need to worry about it being moved from one to the other.
    319 	 */
    320 	return;
    321 }
    322 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
    323 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
    324 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
    325 KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__));
    326 if (!in_nr) {
    327 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
    328 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
    329 		asoc->highest_tsn_inside_nr_map = tsn;
    330 	}
    331 }
    332 if (in_r) {
    333 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
    334 	if (tsn == asoc->highest_tsn_inside_map) {
    335 		/* We must back down to see what the new highest is. */
    336 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
    337 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
    338 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
    339 				asoc->highest_tsn_inside_map = i;
    340 				break;
    341 			}
    342 		}
    343 		if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) {
    344 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
    345 		}
    346 	}
    347 }
    348 }
    349 
    350 static int
    351 sctp_place_control_in_stream(struct sctp_stream_in *strm,
    352 		     struct sctp_association *asoc,
    353 		     struct sctp_queued_to_read *control)
    354 {
    355 struct sctp_queued_to_read *at;
    356 struct sctp_readhead *q;
    357 uint8_t flags, unordered;
    358 
    359 flags = (control->sinfo_flags >> 8);
    360 unordered = flags & SCTP_DATA_UNORDERED;
    361 if (unordered) {
    362 	q = &strm->uno_inqueue;
    363 	if (asoc->idata_supported == 0) {
    364 		if (!TAILQ_EMPTY(q)) {
    365 			/* Only one stream can be here in old style  -- abort */
    366 			return (-1);
    367 		}
    368 		TAILQ_INSERT_TAIL(q, control, next_instrm);
    369 		control->on_strm_q = SCTP_ON_UNORDERED;
    370 		return (0);
    371 	}
    372 } else {
    373 	q = &strm->inqueue;
    374 }
    375 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
    376 	control->end_added = 1;
    377 	control->first_frag_seen = 1;
    378 	control->last_frag_seen = 1;
    379 }
    380 if (TAILQ_EMPTY(q)) {
    381 	/* Empty queue */
    382 	TAILQ_INSERT_HEAD(q, control, next_instrm);
    383 	if (unordered) {
    384 		control->on_strm_q = SCTP_ON_UNORDERED;
    385 	} else {
    386 		control->on_strm_q = SCTP_ON_ORDERED;
    387 	}
    388 	return (0);
    389 } else {
    390 	TAILQ_FOREACH(at, q, next_instrm) {
    391 		if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
    392 			/*
    393 			 * one in queue is bigger than the
    394 			 * new one, insert before this one
    395 			 */
    396 			TAILQ_INSERT_BEFORE(at, control, next_instrm);
    397 			if (unordered) {
    398 				control->on_strm_q = SCTP_ON_UNORDERED;
    399 			} else {
    400 				control->on_strm_q = SCTP_ON_ORDERED;
    401 			}
    402 			break;
    403 		} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
    404 			/*
    405 			 * Gak, He sent me a duplicate msg
    406 			 * id number?? return -1 to abort.
    407 			 */
    408 			return (-1);
    409 		} else {
    410 			if (TAILQ_NEXT(at, next_instrm) == NULL) {
    411 				/*
    412 				 * We are at the end, insert
    413 				 * it after this one
    414 				 */
    415 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
    416 					sctp_log_strm_del(control, at,
    417 							  SCTP_STR_LOG_FROM_INSERT_TL);
    418 				}
    419 				TAILQ_INSERT_AFTER(q, at, control, next_instrm);
    420 				if (unordered) {
    421 					control->on_strm_q = SCTP_ON_UNORDERED;
    422 				} else {
    423 					control->on_strm_q = SCTP_ON_ORDERED;
    424 				}
    425 				break;
    426 			}
    427 		}
    428 	}
    429 }
    430 return (0);
    431 }
    432 
    433 static void
    434 sctp_abort_in_reasm(struct sctp_tcb *stcb,
    435                    struct sctp_queued_to_read *control,
    436                    struct sctp_tmit_chunk *chk,
    437                    int *abort_flag, int opspot)
    438 {
    439 char msg[SCTP_DIAG_INFO_LEN];
    440 struct mbuf *oper;
    441 
    442 if (stcb->asoc.idata_supported) {
    443 	SCTP_SNPRINTF(msg, sizeof(msg),
    444 	              "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
    445 	              opspot,
    446 	              control->fsn_included,
    447 	              chk->rec.data.tsn,
    448 	              chk->rec.data.sid,
    449 	              chk->rec.data.fsn, chk->rec.data.mid);
    450 } else {
    451 	SCTP_SNPRINTF(msg, sizeof(msg),
    452 	              "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
    453 	              opspot,
    454 	              control->fsn_included,
    455 	              chk->rec.data.tsn,
    456 	              chk->rec.data.sid,
    457 	              chk->rec.data.fsn,
    458 	              (uint16_t)chk->rec.data.mid);
    459 }
    460 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
    461 sctp_m_freem(chk->data);
    462 chk->data = NULL;
    463 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
    464 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
    465 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED);
    466 *abort_flag = 1;
    467 }
    468 
    469 static void
    470 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
    471 {
    472 /*
    473  * The control could not be placed and must be cleaned.
    474  */
    475 struct sctp_tmit_chunk *chk, *nchk;
    476 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
    477 	TAILQ_REMOVE(&control->reasm, chk, sctp_next);
    478 	if (chk->data)
    479 		sctp_m_freem(chk->data);
    480 	chk->data = NULL;
    481 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
    482 }
    483 sctp_free_remote_addr(control->whoFrom);
    484 if (control->data) {
    485 	sctp_m_freem(control->data);
    486 	control->data = NULL;
    487 }
    488 sctp_free_a_readq(stcb, control);
    489 }
    490 
    491 /*
    492 * Queue the chunk either right into the socket buffer if it is the next one
    493 * to go OR put it in the correct place in the delivery queue.  If we do
    494 * append to the so_buf, keep doing so until we are out of order as
    495 * long as the control's entered are non-fragmented.
    496 */
    497 static void
    498 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
    499    struct sctp_association *asoc,
    500    struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
    501 {
    502 /*
    503  * FIX-ME maybe? What happens when the ssn wraps? If we are getting
    504  * all the data in one stream this could happen quite rapidly. One
    505  * could use the TSN to keep track of things, but this scheme breaks
    506  * down in the other type of stream usage that could occur. Send a
    507  * single msg to stream 0, send 4Billion messages to stream 1, now
    508  * send a message to stream 0. You have a situation where the TSN
    509  * has wrapped but not in the stream. Is this worth worrying about
    510  * or should we just change our queue sort at the bottom to be by
    511  * TSN.
    512  *
    513  * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
    514  * with TSN 1? If the peer is doing some sort of funky TSN/SSN
    515  * assignment this could happen... and I don't see how this would be
    516  * a violation. So for now I am undecided an will leave the sort by
    517  * SSN alone. Maybe a hybrid approach is the answer
    518  *
    519  */
    520 struct sctp_queued_to_read *at;
    521 int queue_needed;
    522 uint32_t nxt_todel;
    523 struct mbuf *op_err;
    524 struct sctp_stream_in *strm;
    525 char msg[SCTP_DIAG_INFO_LEN];
    526 
    527 strm = &asoc->strmin[control->sinfo_stream];
    528 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
    529 	sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
    530 }
    531 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
    532 	/* The incoming sseq is behind where we last delivered? */
    533 	SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
    534 		strm->last_mid_delivered, control->mid);
    535 	/*
    536 	 * throw it in the stream so it gets cleaned up in
    537 	 * association destruction
    538 	 */
    539 	TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
    540 	if (asoc->idata_supported) {
    541 		SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
    542 		              strm->last_mid_delivered, control->sinfo_tsn,
    543 		              control->sinfo_stream, control->mid);
    544 	} else {
    545 		SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
    546 		              (uint16_t)strm->last_mid_delivered,
    547 		              control->sinfo_tsn,
    548 		              control->sinfo_stream,
    549 		              (uint16_t)control->mid);
    550 	}
    551 	op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
    552 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
    553 	sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
    554 	*abort_flag = 1;
    555 	return;
    556 }
    557 queue_needed = 1;
    558 asoc->size_on_all_streams += control->length;
    559 sctp_ucount_incr(asoc->cnt_on_all_streams);
    560 nxt_todel = strm->last_mid_delivered + 1;
    561 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
    562 #if defined(__APPLE__) && !defined(__Userspace__)
    563 	struct socket *so;
    564 
    565 	so = SCTP_INP_SO(stcb->sctp_ep);
    566 	atomic_add_int(&stcb->asoc.refcnt, 1);
    567 	SCTP_TCB_UNLOCK(stcb);
    568 	SCTP_SOCKET_LOCK(so, 1);
    569 	SCTP_TCB_LOCK(stcb);
    570 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
    571 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
    572 		SCTP_SOCKET_UNLOCK(so, 1);
    573 		return;
    574 	}
    575 #endif
    576 	/* can be delivered right away? */
    577 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
    578 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
    579 	}
    580 	/* EY it wont be queued if it could be delivered directly */
    581 	queue_needed = 0;
    582 	if (asoc->size_on_all_streams >= control->length) {
    583 		asoc->size_on_all_streams -= control->length;
    584 	} else {
    585 #ifdef INVARIANTS
    586 		panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
    587 #else
    588 		asoc->size_on_all_streams = 0;
    589 #endif
    590 	}
    591 	sctp_ucount_decr(asoc->cnt_on_all_streams);
    592 	strm->last_mid_delivered++;
    593 	sctp_mark_non_revokable(asoc, control->sinfo_tsn);
    594 	sctp_add_to_readq(stcb->sctp_ep, stcb,
    595 	                  control,
    596 	                  &stcb->sctp_socket->so_rcv, 1,
    597 	                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
    598 	TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
    599 		/* all delivered */
    600 		nxt_todel = strm->last_mid_delivered + 1;
    601 		if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
    602 		    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
    603 			if (control->on_strm_q == SCTP_ON_ORDERED) {
    604 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
    605 				if (asoc->size_on_all_streams >= control->length) {
    606 					asoc->size_on_all_streams -= control->length;
    607 				} else {
    608 #ifdef INVARIANTS
    609 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
    610 #else
    611 					asoc->size_on_all_streams = 0;
    612 #endif
    613 				}
    614 				sctp_ucount_decr(asoc->cnt_on_all_streams);
    615 #ifdef INVARIANTS
    616 			} else {
    617 				panic("Huh control: %p is on_strm_q: %d",
    618 				      control, control->on_strm_q);
    619 #endif
    620 			}
    621 			control->on_strm_q = 0;
    622 			strm->last_mid_delivered++;
    623 			/*
    624 			 * We ignore the return of deliver_data here
    625 			 * since we always can hold the chunk on the
    626 			 * d-queue. And we have a finite number that
    627 			 * can be delivered from the strq.
    628 			 */
    629 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
    630 				sctp_log_strm_del(control, NULL,
    631 						  SCTP_STR_LOG_FROM_IMMED_DEL);
    632 			}
    633 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
    634 			sctp_add_to_readq(stcb->sctp_ep, stcb,
    635 			                  control,
    636 			                  &stcb->sctp_socket->so_rcv, 1,
    637 			                  SCTP_READ_LOCK_NOT_HELD,
    638 			                  SCTP_SO_LOCKED);
    639 			continue;
    640 		} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
    641 			*need_reasm = 1;
    642 		}
    643 		break;
    644 	}
    645 #if defined(__APPLE__) && !defined(__Userspace__)
    646 	SCTP_SOCKET_UNLOCK(so, 1);
    647 #endif
    648 }
    649 if (queue_needed) {
    650 	/*
    651 	 * Ok, we did not deliver this guy, find the correct place
    652 	 * to put it on the queue.
    653 	 */
    654 	if (sctp_place_control_in_stream(strm, asoc, control)) {
    655 		SCTP_SNPRINTF(msg, sizeof(msg),
    656 		              "Queue to str MID: %u duplicate", control->mid);
    657 		sctp_clean_up_control(stcb, control);
    658 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
    659 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
    660 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
    661 		*abort_flag = 1;
    662 	}
    663 }
    664 }
    665 
    666 static void
    667 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
    668 {
    669 struct mbuf *m, *prev = NULL;
    670 struct sctp_tcb *stcb;
    671 
    672 stcb = control->stcb;
    673 control->held_length = 0;
    674 control->length = 0;
    675 m = control->data;
    676 while (m) {
    677 	if (SCTP_BUF_LEN(m) == 0) {
    678 		/* Skip mbufs with NO length */
    679 		if (prev == NULL) {
    680 			/* First one */
    681 			control->data = sctp_m_free(m);
    682 			m = control->data;
    683 		} else {
    684 			SCTP_BUF_NEXT(prev) = sctp_m_free(m);
    685 			m = SCTP_BUF_NEXT(prev);
    686 		}
    687 		if (m == NULL) {
    688 			control->tail_mbuf = prev;
    689 		}
    690 		continue;
    691 	}
    692 	prev = m;
    693 	atomic_add_int(&control->length, SCTP_BUF_LEN(m));
    694 	if (control->on_read_q) {
    695 		/*
    696 		 * On read queue so we must increment the
    697 		 * SB stuff, we assume caller has done any locks of SB.
    698 		 */
    699 		sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
    700 	}
    701 	m = SCTP_BUF_NEXT(m);
    702 }
    703 if (prev) {
    704 	control->tail_mbuf = prev;
    705 }
    706 }
    707 
    708 static void
    709 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
    710 {
    711 struct mbuf *prev=NULL;
    712 struct sctp_tcb *stcb;
    713 
    714 stcb = control->stcb;
    715 if (stcb == NULL) {
    716 #ifdef INVARIANTS
    717 	panic("Control broken");
    718 #else
    719 	return;
    720 #endif
    721 }
    722 if (control->tail_mbuf == NULL) {
    723 	/* TSNH */
    724 	sctp_m_freem(control->data);
    725 	control->data = m;
    726 	sctp_setup_tail_pointer(control);
    727 	return;
    728 }
    729 control->tail_mbuf->m_next = m;
    730 while (m) {
    731 	if (SCTP_BUF_LEN(m) == 0) {
    732 		/* Skip mbufs with NO length */
    733 		if (prev == NULL) {
    734 			/* First one */
    735 			control->tail_mbuf->m_next = sctp_m_free(m);
    736 			m = control->tail_mbuf->m_next;
    737 		} else {
    738 			SCTP_BUF_NEXT(prev) = sctp_m_free(m);
    739 			m = SCTP_BUF_NEXT(prev);
    740 		}
    741 		if (m == NULL) {
    742 			control->tail_mbuf = prev;
    743 		}
    744 		continue;
    745 	}
    746 	prev = m;
    747 	if (control->on_read_q) {
    748 		/*
    749 		 * On read queue so we must increment the
    750 		 * SB stuff, we assume caller has done any locks of SB.
    751 		 */
    752 		sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
    753 	}
    754 	*added += SCTP_BUF_LEN(m);
    755 	atomic_add_int(&control->length, SCTP_BUF_LEN(m));
    756 	m = SCTP_BUF_NEXT(m);
    757 }
    758 if (prev) {
    759 	control->tail_mbuf = prev;
    760 }
    761 }
    762 
    763 static void
    764 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
    765 {
    766 memset(nc, 0, sizeof(struct sctp_queued_to_read));
    767 nc->sinfo_stream = control->sinfo_stream;
    768 nc->mid = control->mid;
    769 TAILQ_INIT(&nc->reasm);
    770 nc->top_fsn = control->top_fsn;
    771 nc->mid = control->mid;
    772 nc->sinfo_flags = control->sinfo_flags;
    773 nc->sinfo_ppid = control->sinfo_ppid;
    774 nc->sinfo_context = control->sinfo_context;
    775 nc->fsn_included = 0xffffffff;
    776 nc->sinfo_tsn = control->sinfo_tsn;
    777 nc->sinfo_cumtsn = control->sinfo_cumtsn;
    778 nc->sinfo_assoc_id = control->sinfo_assoc_id;
    779 nc->whoFrom = control->whoFrom;
    780 atomic_add_int(&nc->whoFrom->ref_count, 1);
    781 nc->stcb = control->stcb;
    782 nc->port_from = control->port_from;
    783 nc->do_not_ref_stcb = control->do_not_ref_stcb;
    784 }
    785 
    786 static int
    787 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
    788                               struct sctp_association *asoc,
    789                               struct sctp_stream_in *strm,
    790                               struct sctp_queued_to_read *control,
    791                               uint32_t pd_point,
    792                               int inp_read_lock_held)
    793 {
    794 /* Special handling for the old un-ordered data chunk.
    795  * All the chunks/TSN's go to mid 0. So
    796  * we have to do the old style watching to see
    797  * if we have it all. If you return one, no other
    798  * control entries on the un-ordered queue will
    799  * be looked at. In theory there should be no others
    800  * entries in reality, unless the guy is sending both
    801  * unordered NDATA and unordered DATA...
    802  */
    803 struct sctp_tmit_chunk *chk, *lchk, *tchk;
    804 uint32_t fsn;
    805 struct sctp_queued_to_read *nc;
    806 int cnt_added;
    807 
    808 if (control->first_frag_seen == 0) {
    809 	/* Nothing we can do, we have not seen the first piece yet */
    810 	return (1);
    811 }
    812 /* Collapse any we can */
    813 cnt_added = 0;
    814 restart:
    815 fsn = control->fsn_included + 1;
    816 /* Now what can we add? */
    817 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
    818 	if (chk->rec.data.fsn == fsn) {
    819 		/* Ok lets add it */
    820 		sctp_alloc_a_readq(stcb, nc);
    821 		if (nc == NULL) {
    822 			break;
    823 		}
    824 		memset(nc, 0, sizeof(struct sctp_queued_to_read));
    825 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
    826 		sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held);
    827 		fsn++;
    828 		cnt_added++;
    829 		chk = NULL;
    830 		if (control->end_added) {
    831 			/* We are done */
    832 			if (!TAILQ_EMPTY(&control->reasm)) {
    833 				/*
    834 				 * Ok we have to move anything left on
    835 				 * the control queue to a new control.
    836 				 */
    837 				sctp_build_readq_entry_from_ctl(nc, control);
    838 				tchk = TAILQ_FIRST(&control->reasm);
    839 				if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
    840 					TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
    841 					if (asoc->size_on_reasm_queue >= tchk->send_size) {
    842 						asoc->size_on_reasm_queue -= tchk->send_size;
    843 					} else {
    844 #ifdef INVARIANTS
    845 					panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
    846 #else
    847 					asoc->size_on_reasm_queue = 0;
    848 #endif
    849 					}
    850 					sctp_ucount_decr(asoc->cnt_on_reasm_queue);
    851 					nc->first_frag_seen = 1;
    852 					nc->fsn_included = tchk->rec.data.fsn;
    853 					nc->data = tchk->data;
    854 					nc->sinfo_ppid = tchk->rec.data.ppid;
    855 					nc->sinfo_tsn = tchk->rec.data.tsn;
    856 					sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
    857 					tchk->data = NULL;
    858 					sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
    859 					sctp_setup_tail_pointer(nc);
    860 					tchk = TAILQ_FIRST(&control->reasm);
    861 				}
    862 				/* Spin the rest onto the queue */
    863 				while (tchk) {
    864 					TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
    865 					TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
    866 					tchk = TAILQ_FIRST(&control->reasm);
    867 				}
    868 				/* Now lets add it to the queue after removing control */
    869 				TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
    870 				nc->on_strm_q = SCTP_ON_UNORDERED;
    871 				if (control->on_strm_q) {
    872 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
    873 					control->on_strm_q = 0;
    874 				}
    875 			}
    876 			if (control->pdapi_started) {
    877 				strm->pd_api_started = 0;
    878 				control->pdapi_started = 0;
    879 			}
    880 			if (control->on_strm_q) {
    881 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
    882 				control->on_strm_q = 0;
    883 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
    884 			}
    885 			if (control->on_read_q == 0) {
    886 				sctp_add_to_readq(stcb->sctp_ep, stcb, control,
    887 						  &stcb->sctp_socket->so_rcv, control->end_added,
    888 						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
    889 #if defined(__Userspace__)
    890 			} else {
    891 				sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
    892 #endif
    893 			}
    894 			sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
    895 			if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
    896 				/* Switch to the new guy and continue */
    897 				control = nc;
    898 				goto restart;
    899 			} else {
    900 				if (nc->on_strm_q == 0) {
    901 					sctp_free_a_readq(stcb, nc);
    902 				}
    903 			}
    904 			return (1);
    905 		} else {
    906 			sctp_free_a_readq(stcb, nc);
    907 		}
    908 	} else {
    909 		/* Can't add more */
    910 		break;
    911 	}
    912 }
    913 if (cnt_added && strm->pd_api_started) {
    914 #if defined(__Userspace__)
    915 	sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
    916 #endif
    917 	sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
    918 }
    919 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
    920 	strm->pd_api_started = 1;
    921 	control->pdapi_started = 1;
    922 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
    923 	                  &stcb->sctp_socket->so_rcv, control->end_added,
    924 	                  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
    925 	sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
    926 	return (0);
    927 } else {
    928 	return (1);
    929 }
    930 }
    931 
    932 static void
    933 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
    934                               struct sctp_association *asoc,
    935                               struct sctp_queued_to_read *control,
    936                               struct sctp_tmit_chunk *chk,
    937                               int *abort_flag)
    938 {
    939 struct sctp_tmit_chunk *at;
    940 int inserted;
    941 /*
    942  * Here we need to place the chunk into the control structure
    943  * sorted in the correct order.
    944  */
    945 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
    946 	/* Its the very first one. */
    947 	SCTPDBG(SCTP_DEBUG_XXX,
    948 		"chunk is a first fsn: %u becomes fsn_included\n",
    949 		chk->rec.data.fsn);
    950 	at = TAILQ_FIRST(&control->reasm);
    951 	if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
    952 		/*
    953 		 * The first chunk in the reassembly is
    954 		 * a smaller TSN than this one, even though
    955 		 * this has a first, it must be from a subsequent
    956 		 * msg.
    957 		 */
    958 		goto place_chunk;
    959 	}
    960 	if (control->first_frag_seen) {
    961 		/*
    962 		 * In old un-ordered we can reassembly on
    963 		 * one control multiple messages. As long
    964 		 * as the next FIRST is greater then the old
    965 		 * first (TSN i.e. FSN wise)
    966 		 */
    967 		struct mbuf *tdata;
    968 		uint32_t tmp;
    969 
    970 		if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
    971 			/* Easy way the start of a new guy beyond the lowest */
    972 			goto place_chunk;
    973 		}
    974 		if ((chk->rec.data.fsn == control->fsn_included) ||
    975 		    (control->pdapi_started)) {
    976 			/*
    977 			 * Ok this should not happen, if it does
    978 			 * we started the pd-api on the higher TSN (since
    979 			 * the equals part is a TSN failure it must be that).
    980 			 *
    981 			 * We are completely hosed in that case since I have
    982 			 * no way to recover. This really will only happen
    983 			 * if we can get more TSN's higher before the pd-api-point.
    984 			 */
    985 			sctp_abort_in_reasm(stcb, control, chk,
    986 					    abort_flag,
    987 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
    988 
    989 			return;
    990 		}
    991 		/*
    992 		 * Ok we have two firsts and the one we just got
    993 		 * is smaller than the one we previously placed.. yuck!
    994 		 * We must swap them out.
    995 		 */
    996 		/* swap the mbufs */
    997 		tdata = control->data;
    998 		control->data = chk->data;
    999 		chk->data = tdata;
   1000 		/* Save the lengths */
   1001 		chk->send_size = control->length;
   1002 		/* Recompute length of control and tail pointer */
   1003 		sctp_setup_tail_pointer(control);
   1004 		/* Fix the FSN included */
   1005 		tmp = control->fsn_included;
   1006 		control->fsn_included = chk->rec.data.fsn;
   1007 		chk->rec.data.fsn = tmp;
   1008 		/* Fix the TSN included */
   1009 		tmp = control->sinfo_tsn;
   1010 		control->sinfo_tsn = chk->rec.data.tsn;
   1011 		chk->rec.data.tsn = tmp;
   1012 		/* Fix the PPID included */
   1013 		tmp = control->sinfo_ppid;
   1014 		control->sinfo_ppid = chk->rec.data.ppid;
   1015 		chk->rec.data.ppid = tmp;
   1016 		/* Fix tail pointer */
   1017 		goto place_chunk;
   1018 	}
   1019 	control->first_frag_seen = 1;
   1020 	control->fsn_included = chk->rec.data.fsn;
   1021 	control->top_fsn = chk->rec.data.fsn;
   1022 	control->sinfo_tsn = chk->rec.data.tsn;
   1023 	control->sinfo_ppid = chk->rec.data.ppid;
   1024 	control->data = chk->data;
   1025 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
   1026 	chk->data = NULL;
   1027 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   1028 	sctp_setup_tail_pointer(control);
   1029 	return;
   1030 }
   1031 place_chunk:
   1032 inserted = 0;
   1033 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
   1034 	if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
   1035 		/*
   1036 		 * This one in queue is bigger than the new one, insert
   1037 		 * the new one before at.
   1038 		 */
   1039 		asoc->size_on_reasm_queue += chk->send_size;
   1040 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
   1041 		inserted = 1;
   1042 		TAILQ_INSERT_BEFORE(at, chk, sctp_next);
   1043 		break;
   1044 	} else if (at->rec.data.fsn == chk->rec.data.fsn) {
   1045 		/*
   1046 		 * They sent a duplicate fsn number. This
   1047 		 * really should not happen since the FSN is
   1048 		 * a TSN and it should have been dropped earlier.
   1049 		 */
   1050 		sctp_abort_in_reasm(stcb, control, chk,
   1051 		                    abort_flag,
   1052 		                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
   1053 		return;
   1054 	}
   1055 }
   1056 if (inserted == 0) {
   1057 	/* Its at the end */
   1058 	asoc->size_on_reasm_queue += chk->send_size;
   1059 	sctp_ucount_incr(asoc->cnt_on_reasm_queue);
   1060 	control->top_fsn = chk->rec.data.fsn;
   1061 	TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
   1062 }
   1063 }
   1064 
   1065 static int
   1066 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
   1067                         struct sctp_stream_in *strm, int inp_read_lock_held)
   1068 {
   1069 /*
   1070  * Given a stream, strm, see if any of
   1071  * the SSN's on it that are fragmented
   1072  * are ready to deliver. If so go ahead
   1073  * and place them on the read queue. In
   1074  * so placing if we have hit the end, then
   1075  * we need to remove them from the stream's queue.
   1076  */
   1077 struct sctp_queued_to_read *control, *nctl = NULL;
   1078 uint32_t next_to_del;
   1079 uint32_t pd_point;
   1080 int ret = 0;
   1081 
   1082 if (stcb->sctp_socket) {
   1083 	pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
   1084 		       stcb->sctp_ep->partial_delivery_point);
   1085 } else {
   1086 	pd_point = stcb->sctp_ep->partial_delivery_point;
   1087 }
   1088 control = TAILQ_FIRST(&strm->uno_inqueue);
   1089 
   1090 if ((control != NULL) &&
   1091     (asoc->idata_supported == 0)) {
   1092 	/* Special handling needed for "old" data format */
   1093 	if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
   1094 		goto done_un;
   1095 	}
   1096 }
   1097 if (strm->pd_api_started) {
   1098 	/* Can't add more */
   1099 	return (0);
   1100 }
   1101 while (control) {
   1102 	SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
   1103 		control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
   1104 	nctl = TAILQ_NEXT(control, next_instrm);
   1105 	if (control->end_added) {
   1106 		/* We just put the last bit on */
   1107 		if (control->on_strm_q) {
   1108 #ifdef INVARIANTS
   1109 			if (control->on_strm_q != SCTP_ON_UNORDERED) {
   1110 				panic("Huh control: %p on_q: %d -- not unordered?",
   1111 				      control, control->on_strm_q);
   1112 			}
   1113 #endif
   1114 			SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
   1115 			TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
   1116 			if (asoc->size_on_all_streams >= control->length) {
   1117 				asoc->size_on_all_streams -= control->length;
   1118 			} else {
   1119 #ifdef INVARIANTS
   1120 				panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
   1121 #else
   1122 				asoc->size_on_all_streams = 0;
   1123 #endif
   1124 			}
   1125 			sctp_ucount_decr(asoc->cnt_on_all_streams);
   1126 			control->on_strm_q = 0;
   1127 		}
   1128 		if (control->on_read_q == 0) {
   1129 			sctp_add_to_readq(stcb->sctp_ep, stcb,
   1130 					  control,
   1131 					  &stcb->sctp_socket->so_rcv, control->end_added,
   1132 					  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
   1133 		}
   1134 	} else {
   1135 		/* Can we do a PD-API for this un-ordered guy? */
   1136 		if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
   1137 			strm->pd_api_started = 1;
   1138 			control->pdapi_started = 1;
   1139 			sctp_add_to_readq(stcb->sctp_ep, stcb,
   1140 					  control,
   1141 					  &stcb->sctp_socket->so_rcv, control->end_added,
   1142 					  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
   1143 
   1144 			break;
   1145 		}
   1146 	}
   1147 	control = nctl;
   1148 }
   1149 done_un:
   1150 control = TAILQ_FIRST(&strm->inqueue);
   1151 if (strm->pd_api_started) {
   1152 	/* Can't add more */
   1153 	return (0);
   1154 }
   1155 if (control == NULL) {
   1156 	return (ret);
   1157 }
   1158 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
   1159 	/* Ok the guy at the top was being partially delivered
   1160 	 * completed, so we remove it. Note
   1161 	 * the pd_api flag was taken off when the
   1162 	 * chunk was merged on in sctp_queue_data_for_reasm below.
   1163 	 */
   1164 	nctl = TAILQ_NEXT(control, next_instrm);
   1165 	SCTPDBG(SCTP_DEBUG_XXX,
   1166 		"Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
   1167 		control, control->end_added, control->mid,
   1168 		control->top_fsn, control->fsn_included,
   1169 		strm->last_mid_delivered);
   1170 	if (control->end_added) {
   1171 		if (control->on_strm_q) {
   1172 #ifdef INVARIANTS
   1173 			if (control->on_strm_q != SCTP_ON_ORDERED) {
   1174 				panic("Huh control: %p on_q: %d -- not ordered?",
   1175 				      control, control->on_strm_q);
   1176 			}
   1177 #endif
   1178 			SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
   1179 			TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
   1180 			if (asoc->size_on_all_streams >= control->length) {
   1181 				asoc->size_on_all_streams -= control->length;
   1182 			} else {
   1183 #ifdef INVARIANTS
   1184 				panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
   1185 #else
   1186 				asoc->size_on_all_streams = 0;
   1187 #endif
   1188 			}
   1189 			sctp_ucount_decr(asoc->cnt_on_all_streams);
   1190 			control->on_strm_q = 0;
   1191 		}
   1192 		if (strm->pd_api_started && control->pdapi_started) {
   1193 			control->pdapi_started = 0;
   1194 			strm->pd_api_started = 0;
   1195 		}
   1196 		if (control->on_read_q == 0) {
   1197 			sctp_add_to_readq(stcb->sctp_ep, stcb,
   1198 					  control,
   1199 					  &stcb->sctp_socket->so_rcv, control->end_added,
   1200 					  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
   1201 		}
   1202 		control = nctl;
   1203 	}
   1204 }
   1205 if (strm->pd_api_started) {
   1206 	/* Can't add more must have gotten an un-ordered above being partially delivered. */
   1207 	return (0);
   1208 }
   1209 deliver_more:
   1210 next_to_del = strm->last_mid_delivered + 1;
   1211 if (control) {
   1212 	SCTPDBG(SCTP_DEBUG_XXX,
   1213 		"Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
   1214 		control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
   1215 		next_to_del);
   1216 	nctl = TAILQ_NEXT(control, next_instrm);
   1217 	if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
   1218 	    (control->first_frag_seen)) {
   1219 		int done;
   1220 
   1221 		/* Ok we can deliver it onto the stream. */
   1222 		if (control->end_added) {
   1223 			/* We are done with it afterwards */
   1224 			if (control->on_strm_q) {
   1225 #ifdef INVARIANTS
   1226 				if (control->on_strm_q != SCTP_ON_ORDERED) {
   1227 					panic("Huh control: %p on_q: %d -- not ordered?",
   1228 					      control, control->on_strm_q);
   1229 				}
   1230 #endif
   1231 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
   1232 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
   1233 				if (asoc->size_on_all_streams >= control->length) {
   1234 					asoc->size_on_all_streams -= control->length;
   1235 				} else {
   1236 #ifdef INVARIANTS
   1237 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
   1238 #else
   1239 					asoc->size_on_all_streams = 0;
   1240 #endif
   1241 				}
   1242 				sctp_ucount_decr(asoc->cnt_on_all_streams);
   1243 				control->on_strm_q = 0;
   1244 			}
   1245 			ret++;
   1246 		}
   1247 		if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
   1248 			/* A singleton now slipping through - mark it non-revokable too */
   1249 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
   1250 		} else if (control->end_added == 0) {
   1251 			/* Check if we can defer adding until its all there */
   1252 			if ((control->length < pd_point) || (strm->pd_api_started)) {
   1253 				/* Don't need it or cannot add more (one being delivered that way) */
   1254 				goto out;
   1255 			}
   1256 		}
   1257 		done = (control->end_added) && (control->last_frag_seen);
   1258 		if (control->on_read_q == 0) {
   1259 			if (!done) {
   1260 				if (asoc->size_on_all_streams >= control->length) {
   1261 					asoc->size_on_all_streams -= control->length;
   1262 				} else {
   1263 #ifdef INVARIANTS
   1264 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
   1265 #else
   1266 					asoc->size_on_all_streams = 0;
   1267 #endif
   1268 				}
   1269 				strm->pd_api_started = 1;
   1270 				control->pdapi_started = 1;
   1271 			}
   1272 			sctp_add_to_readq(stcb->sctp_ep, stcb,
   1273 					  control,
   1274 					  &stcb->sctp_socket->so_rcv, control->end_added,
   1275 					  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
   1276 		}
   1277 		strm->last_mid_delivered = next_to_del;
   1278 		if (done) {
   1279 			control = nctl;
   1280 			goto deliver_more;
   1281 		}
   1282 	}
   1283 }
   1284 out:
   1285 return (ret);
   1286 }
   1287 
   1288 uint32_t
   1289 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
   1290 		struct sctp_stream_in *strm,
   1291 		struct sctp_tcb *stcb, struct sctp_association *asoc,
   1292 		struct sctp_tmit_chunk *chk, int hold_rlock)
   1293 {
   1294 /*
   1295  * Given a control and a chunk, merge the
   1296  * data from the chk onto the control and free
   1297  * up the chunk resources.
   1298  */
   1299 uint32_t added = 0;
   1300 bool i_locked = false;
   1301 
   1302 if (control->on_read_q) {
   1303 	if (hold_rlock == 0) {
   1304 		/* Its being pd-api'd so we must do some locks. */
   1305 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
   1306 		i_locked = true;
   1307 	}
   1308 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
   1309 		goto out;
   1310 	}
   1311 }
   1312 if (control->data == NULL) {
   1313 	control->data = chk->data;
   1314 	sctp_setup_tail_pointer(control);
   1315 } else {
   1316 	sctp_add_to_tail_pointer(control, chk->data, &added);
   1317 }
   1318 control->fsn_included = chk->rec.data.fsn;
   1319 asoc->size_on_reasm_queue -= chk->send_size;
   1320 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
   1321 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
   1322 chk->data = NULL;
   1323 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
   1324 	control->first_frag_seen = 1;
   1325 	control->sinfo_tsn = chk->rec.data.tsn;
   1326 	control->sinfo_ppid = chk->rec.data.ppid;
   1327 }
   1328 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
   1329 	/* Its complete */
   1330 	if ((control->on_strm_q) && (control->on_read_q)) {
   1331 		if (control->pdapi_started) {
   1332 			control->pdapi_started = 0;
   1333 			strm->pd_api_started = 0;
   1334 		}
   1335 		if (control->on_strm_q == SCTP_ON_UNORDERED) {
   1336 			/* Unordered */
   1337 			TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
   1338 			control->on_strm_q = 0;
   1339 		} else if (control->on_strm_q == SCTP_ON_ORDERED) {
   1340 			/* Ordered */
   1341 			TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
   1342 			/*
   1343 			 * Don't need to decrement size_on_all_streams,
   1344 			 * since control is on the read queue.
   1345 			 */
   1346 			sctp_ucount_decr(asoc->cnt_on_all_streams);
   1347 			control->on_strm_q = 0;
   1348 #ifdef INVARIANTS
   1349 		} else if (control->on_strm_q) {
   1350 			panic("Unknown state on ctrl: %p on_strm_q: %d", control,
   1351 			      control->on_strm_q);
   1352 #endif
   1353 		}
   1354 	}
   1355 	control->end_added = 1;
   1356 	control->last_frag_seen = 1;
   1357 }
   1358 out:
   1359 if (i_locked) {
   1360 	SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
   1361 }
   1362 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   1363 return (added);
   1364 }
   1365 
   1366 /*
   1367 * Dump onto the re-assembly queue, in its proper place. After dumping on the
   1368 * queue, see if anything can be delivered. If so pull it off (or as much as
   1369 * we can. If we run out of space then we must dump what we can and set the
   1370 * appropriate flag to say we queued what we could.
   1371 */
   1372 static void
   1373 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
   1374 		  struct sctp_queued_to_read *control,
   1375 		  struct sctp_tmit_chunk *chk,
   1376 		  int created_control,
   1377 		  int *abort_flag, uint32_t tsn)
   1378 {
   1379 uint32_t next_fsn;
   1380 struct sctp_tmit_chunk *at, *nat;
   1381 struct sctp_stream_in *strm;
   1382 int do_wakeup, unordered;
   1383 uint32_t lenadded;
   1384 
   1385 strm = &asoc->strmin[control->sinfo_stream];
   1386 /*
   1387  * For old un-ordered data chunks.
   1388  */
   1389 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
   1390 	unordered = 1;
   1391 } else {
   1392 	unordered = 0;
   1393 }
   1394 /* Must be added to the stream-in queue */
   1395 if (created_control) {
   1396 	if ((unordered == 0) || (asoc->idata_supported)) {
   1397 		sctp_ucount_incr(asoc->cnt_on_all_streams);
   1398 	}
   1399 	if (sctp_place_control_in_stream(strm, asoc, control)) {
   1400 		/* Duplicate SSN? */
   1401 		sctp_abort_in_reasm(stcb, control, chk,
   1402 				    abort_flag,
   1403 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
   1404 		sctp_clean_up_control(stcb, control);
   1405 		return;
   1406 	}
   1407 	if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
   1408 		/* Ok we created this control and now
   1409 		 * lets validate that its legal i.e. there
   1410 		 * is a B bit set, if not and we have
   1411 		 * up to the cum-ack then its invalid.
   1412 		 */
   1413 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
   1414 			sctp_abort_in_reasm(stcb, control, chk,
   1415 			                    abort_flag,
   1416 			                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
   1417 			return;
   1418 		}
   1419 	}
   1420 }
   1421 if ((asoc->idata_supported == 0) && (unordered == 1)) {
   1422 	sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
   1423 	return;
   1424 }
   1425 /*
   1426  * Ok we must queue the chunk into the reasembly portion:
   1427  *  o if its the first it goes to the control mbuf.
   1428  *  o if its not first but the next in sequence it goes to the control,
   1429  *    and each succeeding one in order also goes.
   1430  *  o if its not in order we place it on the list in its place.
   1431  */
   1432 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
   1433 	/* Its the very first one. */
   1434 	SCTPDBG(SCTP_DEBUG_XXX,
   1435 		"chunk is a first fsn: %u becomes fsn_included\n",
   1436 		chk->rec.data.fsn);
   1437 	if (control->first_frag_seen) {
   1438 		/*
   1439 		 * Error on senders part, they either
   1440 		 * sent us two data chunks with FIRST,
   1441 		 * or they sent two un-ordered chunks that
   1442 		 * were fragmented at the same time in the same stream.
   1443 		 */
   1444 		sctp_abort_in_reasm(stcb, control, chk,
   1445 		                    abort_flag,
   1446 		                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
   1447 		return;
   1448 	}
   1449 	control->first_frag_seen = 1;
   1450 	control->sinfo_ppid = chk->rec.data.ppid;
   1451 	control->sinfo_tsn = chk->rec.data.tsn;
   1452 	control->fsn_included = chk->rec.data.fsn;
   1453 	control->data = chk->data;
   1454 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
   1455 	chk->data = NULL;
   1456 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   1457 	sctp_setup_tail_pointer(control);
   1458 	asoc->size_on_all_streams += control->length;
   1459 } else {
   1460 	/* Place the chunk in our list */
   1461 	int inserted=0;
   1462 	if (control->last_frag_seen == 0) {
   1463 		/* Still willing to raise highest FSN seen */
   1464 		if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
   1465 			SCTPDBG(SCTP_DEBUG_XXX,
   1466 				"We have a new top_fsn: %u\n",
   1467 				chk->rec.data.fsn);
   1468 			control->top_fsn = chk->rec.data.fsn;
   1469 		}
   1470 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
   1471 			SCTPDBG(SCTP_DEBUG_XXX,
   1472 				"The last fsn is now in place fsn: %u\n",
   1473 				chk->rec.data.fsn);
   1474 			control->last_frag_seen = 1;
   1475 			if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
   1476 				SCTPDBG(SCTP_DEBUG_XXX,
   1477 					"New fsn: %u is not at top_fsn: %u -- abort\n",
   1478 					chk->rec.data.fsn,
   1479 					control->top_fsn);
   1480 				sctp_abort_in_reasm(stcb, control, chk,
   1481 						    abort_flag,
   1482 						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
   1483 				return;
   1484 			}
   1485 		}
   1486 		if (asoc->idata_supported || control->first_frag_seen) {
   1487 			/*
   1488 			 * For IDATA we always check since we know that
   1489 			 * the first fragment is 0. For old DATA we have
   1490 			 * to receive the first before we know the first FSN
   1491 			 * (which is the TSN).
   1492 			 */
   1493 			if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
   1494 				/* We have already delivered up to this so its a dup */
   1495 				sctp_abort_in_reasm(stcb, control, chk,
   1496 						    abort_flag,
   1497 						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
   1498 				return;
   1499 			}
   1500 		}
   1501 	} else {
   1502 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
   1503 			/* Second last? huh? */
   1504 			SCTPDBG(SCTP_DEBUG_XXX,
   1505 				"Duplicate last fsn: %u (top: %u) -- abort\n",
   1506 				chk->rec.data.fsn, control->top_fsn);
   1507 			sctp_abort_in_reasm(stcb, control,
   1508 					    chk, abort_flag,
   1509 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
   1510 			return;
   1511 		}
   1512 		if (asoc->idata_supported || control->first_frag_seen) {
   1513 			/*
   1514 			 * For IDATA we always check since we know that
   1515 			 * the first fragment is 0. For old DATA we have
   1516 			 * to receive the first before we know the first FSN
   1517 			 * (which is the TSN).
   1518 			 */
   1519 
   1520 			if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
   1521 				/* We have already delivered up to this so its a dup */
   1522 				SCTPDBG(SCTP_DEBUG_XXX,
   1523 					"New fsn: %u is already seen in included_fsn: %u -- abort\n",
   1524 					chk->rec.data.fsn, control->fsn_included);
   1525 				sctp_abort_in_reasm(stcb, control, chk,
   1526 						    abort_flag,
   1527 						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
   1528 				return;
   1529 			}
   1530 		}
   1531 		/* validate not beyond top FSN if we have seen last one */
   1532 		if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
   1533 			SCTPDBG(SCTP_DEBUG_XXX,
   1534 				"New fsn: %u is beyond or at top_fsn: %u -- abort\n",
   1535 				chk->rec.data.fsn,
   1536 				control->top_fsn);
   1537 			sctp_abort_in_reasm(stcb, control, chk,
   1538 					    abort_flag,
   1539 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
   1540 			return;
   1541 		}
   1542 	}
   1543 	/*
   1544 	 * If we reach here, we need to place the
   1545 	 * new chunk in the reassembly for this
   1546 	 * control.
   1547 	 */
   1548 	SCTPDBG(SCTP_DEBUG_XXX,
   1549 		"chunk is a not first fsn: %u needs to be inserted\n",
   1550 		chk->rec.data.fsn);
   1551 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
   1552 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
   1553 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
   1554 				/* Last not at the end? huh? */
   1555 				SCTPDBG(SCTP_DEBUG_XXX,
   1556 				        "Last fragment not last in list: -- abort\n");
   1557 				sctp_abort_in_reasm(stcb, control,
   1558 				                    chk, abort_flag,
   1559 				                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
   1560 				return;
   1561 			}
   1562 			/*
   1563 			 * This one in queue is bigger than the new one, insert
   1564 			 * the new one before at.
   1565 			 */
   1566 			SCTPDBG(SCTP_DEBUG_XXX,
   1567 				"Insert it before fsn: %u\n",
   1568 				at->rec.data.fsn);
   1569 			asoc->size_on_reasm_queue += chk->send_size;
   1570 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
   1571 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
   1572 			inserted = 1;
   1573 			break;
   1574 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
   1575 			/* Gak, He sent me a duplicate str seq number */
   1576 			/*
   1577 			 * foo bar, I guess I will just free this new guy,
   1578 			 * should we abort too? FIX ME MAYBE? Or it COULD be
   1579 			 * that the SSN's have wrapped. Maybe I should
   1580 			 * compare to TSN somehow... sigh for now just blow
   1581 			 * away the chunk!
   1582 			 */
   1583 			SCTPDBG(SCTP_DEBUG_XXX,
   1584 				"Duplicate to fsn: %u -- abort\n",
   1585 				at->rec.data.fsn);
   1586 			sctp_abort_in_reasm(stcb, control,
   1587 					    chk, abort_flag,
   1588 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
   1589 			return;
   1590 		}
   1591 	}
   1592 	if (inserted == 0) {
   1593 		/* Goes on the end */
   1594 		SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
   1595 			chk->rec.data.fsn);
   1596 		asoc->size_on_reasm_queue += chk->send_size;
   1597 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
   1598 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
   1599 	}
   1600 }
   1601 /*
   1602  * Ok lets see if we can suck any up into the control
   1603  * structure that are in seq if it makes sense.
   1604  */
   1605 do_wakeup = 0;
   1606 /*
   1607  * If the first fragment has not been
   1608  * seen there is no sense in looking.
   1609  */
   1610 if (control->first_frag_seen) {
   1611 	next_fsn = control->fsn_included + 1;
   1612 	TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
   1613 		if (at->rec.data.fsn == next_fsn) {
   1614 			/* We can add this one now to the control */
   1615 			SCTPDBG(SCTP_DEBUG_XXX,
   1616 				"Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
   1617 				control, at,
   1618 				at->rec.data.fsn,
   1619 				next_fsn, control->fsn_included);
   1620 			TAILQ_REMOVE(&control->reasm, at, sctp_next);
   1621 			lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
   1622 			if (control->on_read_q) {
   1623 				do_wakeup = 1;
   1624 			} else {
   1625 				/*
   1626 				 * We only add to the size-on-all-streams
   1627 				 * if its not on the read q. The read q
   1628 				 * flag will cause a sballoc so its accounted
   1629 				 * for there.
   1630 				 */
   1631 				asoc->size_on_all_streams += lenadded;
   1632 			}
   1633 			next_fsn++;
   1634 			if (control->end_added && control->pdapi_started) {
   1635 				if (strm->pd_api_started) {
   1636 					strm->pd_api_started = 0;
   1637 					control->pdapi_started = 0;
   1638 				}
   1639 				if (control->on_read_q == 0) {
   1640 					sctp_add_to_readq(stcb->sctp_ep, stcb,
   1641 							  control,
   1642 							  &stcb->sctp_socket->so_rcv, control->end_added,
   1643 							  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
   1644 				}
   1645 				break;
   1646 			}
   1647 		} else {
   1648 			break;
   1649 		}
   1650 	}
   1651 }
   1652 if (do_wakeup) {
   1653 #if defined(__Userspace__)
   1654 	sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
   1655 #endif
   1656 	/* Need to wakeup the reader */
   1657 	sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
   1658 }
   1659 }
   1660 
   1661 static struct sctp_queued_to_read *
   1662 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
   1663 {
   1664 struct sctp_queued_to_read *control;
   1665 
   1666 if (ordered) {
   1667 	TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
   1668 		if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
   1669 			break;
   1670 		}
   1671 	}
   1672 } else {
   1673 	if (idata_supported) {
   1674 		TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
   1675 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
   1676 				break;
   1677 			}
   1678 		}
   1679 	} else {
   1680 		control = TAILQ_FIRST(&strm->uno_inqueue);
   1681 	}
   1682 }
   1683 return (control);
   1684 }
   1685 
   1686 static int
   1687 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
   1688 		  struct mbuf **m, int offset,  int chk_length,
   1689 		  struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
   1690 		  int *break_flag, int last_chunk, uint8_t chk_type)
   1691 {
   1692 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
   1693 struct sctp_stream_in *strm;
   1694 uint32_t tsn, fsn, gap, mid;
   1695 struct mbuf *dmbuf;
   1696 int the_len;
   1697 int need_reasm_check = 0;
   1698 uint16_t sid;
   1699 struct mbuf *op_err;
   1700 char msg[SCTP_DIAG_INFO_LEN];
   1701 struct sctp_queued_to_read *control, *ncontrol;
   1702 uint32_t ppid;
   1703 uint8_t chk_flags;
   1704 struct sctp_stream_reset_list *liste;
   1705 int ordered;
   1706 size_t clen;
   1707 int created_control = 0;
   1708 
   1709 if (chk_type == SCTP_IDATA) {
   1710 	struct sctp_idata_chunk *chunk, chunk_buf;
   1711 
   1712 	chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
   1713 	                                                 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
   1714 	chk_flags = chunk->ch.chunk_flags;
   1715 	clen = sizeof(struct sctp_idata_chunk);
   1716 	tsn = ntohl(chunk->dp.tsn);
   1717 	sid = ntohs(chunk->dp.sid);
   1718 	mid = ntohl(chunk->dp.mid);
   1719 	if (chk_flags & SCTP_DATA_FIRST_FRAG) {
   1720 		fsn = 0;
   1721 		ppid = chunk->dp.ppid_fsn.ppid;
   1722 	} else {
   1723 		fsn = ntohl(chunk->dp.ppid_fsn.fsn);
   1724 		ppid = 0xffffffff; /* Use as an invalid value. */
   1725 	}
   1726 } else {
   1727 	struct sctp_data_chunk *chunk, chunk_buf;
   1728 
   1729 	chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
   1730 	                                                sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
   1731 	chk_flags = chunk->ch.chunk_flags;
   1732 	clen = sizeof(struct sctp_data_chunk);
   1733 	tsn = ntohl(chunk->dp.tsn);
   1734 	sid = ntohs(chunk->dp.sid);
   1735 	mid = (uint32_t)(ntohs(chunk->dp.ssn));
   1736 	fsn = tsn;
   1737 	ppid = chunk->dp.ppid;
   1738 }
   1739 if ((size_t)chk_length == clen) {
   1740 	/*
   1741 	 * Need to send an abort since we had a
   1742 	 * empty data chunk.
   1743 	 */
   1744 	op_err = sctp_generate_no_user_data_cause(tsn);
   1745 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
   1746 	sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   1747 	*abort_flag = 1;
   1748 	return (0);
   1749 }
   1750 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
   1751 	asoc->send_sack = 1;
   1752 }
   1753 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
   1754 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   1755 	sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
   1756 }
   1757 if (stcb == NULL) {
   1758 	return (0);
   1759 }
   1760 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
   1761 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
   1762 	/* It is a duplicate */
   1763 	SCTP_STAT_INCR(sctps_recvdupdata);
   1764 	if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
   1765 		/* Record a dup for the next outbound sack */
   1766 		asoc->dup_tsns[asoc->numduptsns] = tsn;
   1767 		asoc->numduptsns++;
   1768 	}
   1769 	asoc->send_sack = 1;
   1770 	return (0);
   1771 }
   1772 /* Calculate the number of TSN's between the base and this TSN */
   1773 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
   1774 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
   1775 	/* Can't hold the bit in the mapping at max array, toss it */
   1776 	return (0);
   1777 }
   1778 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
   1779 	SCTP_TCB_LOCK_ASSERT(stcb);
   1780 	if (sctp_expand_mapping_array(asoc, gap)) {
   1781 		/* Can't expand, drop it */
   1782 		return (0);
   1783 	}
   1784 }
   1785 if (SCTP_TSN_GT(tsn, *high_tsn)) {
   1786 	*high_tsn = tsn;
   1787 }
   1788 /* See if we have received this one already */
   1789 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
   1790     SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
   1791 	SCTP_STAT_INCR(sctps_recvdupdata);
   1792 	if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
   1793 		/* Record a dup for the next outbound sack */
   1794 		asoc->dup_tsns[asoc->numduptsns] = tsn;
   1795 		asoc->numduptsns++;
   1796 	}
   1797 	asoc->send_sack = 1;
   1798 	return (0);
   1799 }
   1800 /*
   1801  * Check to see about the GONE flag, duplicates would cause a sack
   1802  * to be sent up above
   1803  */
   1804 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
   1805      (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
   1806      (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
   1807 	/*
   1808 	 * wait a minute, this guy is gone, there is no longer a
   1809 	 * receiver. Send peer an ABORT!
   1810 	 */
   1811 	op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
   1812 	sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   1813 	*abort_flag = 1;
   1814 	return (0);
   1815 }
   1816 /*
   1817  * Now before going further we see if there is room. If NOT then we
   1818  * MAY let one through only IF this TSN is the one we are waiting
   1819  * for on a partial delivery API.
   1820  */
   1821 
   1822 /* Is the stream valid? */
   1823 if (sid >= asoc->streamincnt) {
   1824 	struct sctp_error_invalid_stream *cause;
   1825 
   1826 	op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
   1827 	                               0, M_NOWAIT, 1, MT_DATA);
   1828 	if (op_err != NULL) {
   1829 		/* add some space up front so prepend will work well */
   1830 		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
   1831 		cause = mtod(op_err, struct sctp_error_invalid_stream *);
   1832 		/*
   1833 		 * Error causes are just param's and this one has
   1834 		 * two back to back phdr, one with the error type
   1835 		 * and size, the other with the streamid and a rsvd
   1836 		 */
   1837 		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
   1838 		cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
   1839 		cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
   1840 		cause->stream_id = htons(sid);
   1841 		cause->reserved = htons(0);
   1842 		sctp_queue_op_err(stcb, op_err);
   1843 	}
   1844 	SCTP_STAT_INCR(sctps_badsid);
   1845 	SCTP_TCB_LOCK_ASSERT(stcb);
   1846 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
   1847 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
   1848 		asoc->highest_tsn_inside_nr_map = tsn;
   1849 	}
   1850 	if (tsn == (asoc->cumulative_tsn + 1)) {
   1851 		/* Update cum-ack */
   1852 		asoc->cumulative_tsn = tsn;
   1853 	}
   1854 	return (0);
   1855 }
   1856 /*
   1857  * If its a fragmented message, lets see if we can
   1858  * find the control on the reassembly queues.
   1859  */
   1860 if ((chk_type == SCTP_IDATA) &&
   1861     ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
   1862     (fsn == 0)) {
   1863 	/*
   1864 	 *  The first *must* be fsn 0, and other
   1865 	 *  (middle/end) pieces can *not* be fsn 0.
   1866 	 * XXX: This can happen in case of a wrap around.
   1867 	 *      Ignore is for now.
   1868 	 */
   1869 	SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
   1870 	goto err_out;
   1871 }
   1872 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
   1873 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
   1874 	chk_flags, control);
   1875 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
   1876 	/* See if we can find the re-assembly entity */
   1877 	if (control != NULL) {
   1878 		/* We found something, does it belong? */
   1879 		if (ordered && (mid != control->mid)) {
   1880 			SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
   1881 		err_out:
   1882 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1883 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
   1884 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   1885 			*abort_flag = 1;
   1886 			return (0);
   1887 		}
   1888 		if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
   1889 			/* We can't have a switched order with an unordered chunk */
   1890 			SCTP_SNPRINTF(msg, sizeof(msg),
   1891 			              "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
   1892 			              tsn);
   1893 			goto err_out;
   1894 		}
   1895 		if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
   1896 			/* We can't have a switched unordered with a ordered chunk */
   1897 			SCTP_SNPRINTF(msg, sizeof(msg),
   1898 			             "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
   1899 			             tsn);
   1900 			goto err_out;
   1901 		}
   1902 	}
   1903 } else {
   1904 	/* Its a complete segment. Lets validate we
   1905 	 * don't have a re-assembly going on with
   1906 	 * the same Stream/Seq (for ordered) or in
   1907 	 * the same Stream for unordered.
   1908 	 */
   1909 	if (control != NULL) {
   1910 		if (ordered || asoc->idata_supported) {
   1911 			SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
   1912 				chk_flags, mid);
   1913 			SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
   1914 			goto err_out;
   1915 		} else {
   1916 			if ((control->first_frag_seen) &&
   1917 			    (tsn == control->fsn_included + 1) &&
   1918 			    (control->end_added == 0)) {
   1919 				SCTP_SNPRINTF(msg, sizeof(msg),
   1920 				              "Illegal message sequence, missing end for MID: %8.8x",
   1921 				              control->fsn_included);
   1922 				goto err_out;
   1923 			} else {
   1924 				control = NULL;
   1925 			}
   1926 		}
   1927 	}
   1928 }
   1929 /* now do the tests */
   1930 if (((asoc->cnt_on_all_streams +
   1931       asoc->cnt_on_reasm_queue +
   1932       asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
   1933     (((int)asoc->my_rwnd) <= 0)) {
   1934 	/*
   1935 	 * When we have NO room in the rwnd we check to make sure
   1936 	 * the reader is doing its job...
   1937 	 */
   1938 	if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) {
   1939 		/* some to read, wake-up */
   1940 #if defined(__APPLE__) && !defined(__Userspace__)
   1941 		struct socket *so;
   1942 
   1943 		so = SCTP_INP_SO(stcb->sctp_ep);
   1944 		atomic_add_int(&stcb->asoc.refcnt, 1);
   1945 		SCTP_TCB_UNLOCK(stcb);
   1946 		SCTP_SOCKET_LOCK(so, 1);
   1947 		SCTP_TCB_LOCK(stcb);
   1948 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
   1949 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
   1950 			/* assoc was freed while we were unlocked */
   1951 			SCTP_SOCKET_UNLOCK(so, 1);
   1952 			return (0);
   1953 		}
   1954 #endif
   1955 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
   1956 #if defined(__APPLE__) && !defined(__Userspace__)
   1957 		SCTP_SOCKET_UNLOCK(so, 1);
   1958 #endif
   1959 	}
   1960 	/* now is it in the mapping array of what we have accepted? */
   1961 	if (chk_type == SCTP_DATA) {
   1962 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
   1963 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
   1964 			/* Nope not in the valid range dump it */
   1965 		dump_packet:
   1966 			sctp_set_rwnd(stcb, asoc);
   1967 			if ((asoc->cnt_on_all_streams +
   1968 			     asoc->cnt_on_reasm_queue +
   1969 			     asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
   1970 				SCTP_STAT_INCR(sctps_datadropchklmt);
   1971 			} else {
   1972 				SCTP_STAT_INCR(sctps_datadroprwnd);
   1973 			}
   1974 			*break_flag = 1;
   1975 			return (0);
   1976 		}
   1977 	} else {
   1978 		if (control == NULL) {
   1979 			goto dump_packet;
   1980 		}
   1981 		if (SCTP_TSN_GT(fsn, control->top_fsn)) {
   1982 			goto dump_packet;
   1983 		}
   1984 	}
   1985 }
   1986 #ifdef SCTP_ASOCLOG_OF_TSNS
   1987 SCTP_TCB_LOCK_ASSERT(stcb);
   1988 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
   1989 	asoc->tsn_in_at = 0;
   1990 	asoc->tsn_in_wrapped = 1;
   1991 }
   1992 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
   1993 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
   1994 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
   1995 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
   1996 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
   1997 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
   1998 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
   1999 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
   2000 asoc->tsn_in_at++;
   2001 #endif
   2002 /*
   2003  * Before we continue lets validate that we are not being fooled by
   2004  * an evil attacker. We can only have Nk chunks based on our TSN
   2005  * spread allowed by the mapping array N * 8 bits, so there is no
   2006  * way our stream sequence numbers could have wrapped. We of course
   2007  * only validate the FIRST fragment so the bit must be set.
   2008  */
   2009 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
   2010     (TAILQ_EMPTY(&asoc->resetHead)) &&
   2011     (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
   2012     SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
   2013 	/* The incoming sseq is behind where we last delivered? */
   2014 	SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
   2015 		mid, asoc->strmin[sid].last_mid_delivered);
   2016 
   2017 	if (asoc->idata_supported) {
   2018 		SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
   2019 		              asoc->strmin[sid].last_mid_delivered,
   2020 		              tsn,
   2021 		              sid,
   2022 		              mid);
   2023 	} else {
   2024 		SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
   2025 		              (uint16_t)asoc->strmin[sid].last_mid_delivered,
   2026 		              tsn,
   2027 		              sid,
   2028 		              (uint16_t)mid);
   2029 	}
   2030 	op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   2031 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
   2032 	sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   2033 	*abort_flag = 1;
   2034 	return (0);
   2035 }
   2036 if (chk_type == SCTP_IDATA) {
   2037 	the_len = (chk_length - sizeof(struct sctp_idata_chunk));
   2038 } else {
   2039 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
   2040 }
   2041 if (last_chunk == 0) {
   2042 	if (chk_type == SCTP_IDATA) {
   2043 		dmbuf = SCTP_M_COPYM(*m,
   2044 				     (offset + sizeof(struct sctp_idata_chunk)),
   2045 				     the_len, M_NOWAIT);
   2046 	} else {
   2047 		dmbuf = SCTP_M_COPYM(*m,
   2048 				     (offset + sizeof(struct sctp_data_chunk)),
   2049 				     the_len, M_NOWAIT);
   2050 	}
   2051 #ifdef SCTP_MBUF_LOGGING
   2052 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
   2053 		sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
   2054 	}
   2055 #endif
   2056 } else {
   2057 	/* We can steal the last chunk */
   2058 	int l_len;
   2059 	dmbuf = *m;
   2060 	/* lop off the top part */
   2061 	if (chk_type == SCTP_IDATA) {
   2062 		m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
   2063 	} else {
   2064 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
   2065 	}
   2066 	if (SCTP_BUF_NEXT(dmbuf) == NULL) {
   2067 		l_len = SCTP_BUF_LEN(dmbuf);
   2068 	} else {
   2069 		/* need to count up the size hopefully
   2070 		 * does not hit this to often :-0
   2071 		 */
   2072 		struct mbuf *lat;
   2073 
   2074 		l_len = 0;
   2075 		for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
   2076 			l_len += SCTP_BUF_LEN(lat);
   2077 		}
   2078 	}
   2079 	if (l_len > the_len) {
   2080 		/* Trim the end round bytes off  too */
   2081 		m_adj(dmbuf, -(l_len - the_len));
   2082 	}
   2083 }
   2084 if (dmbuf == NULL) {
   2085 	SCTP_STAT_INCR(sctps_nomem);
   2086 	return (0);
   2087 }
   2088 /*
   2089  * Now no matter what, we need a control, get one
   2090  * if we don't have one (we may have gotten it
   2091  * above when we found the message was fragmented
   2092  */
   2093 if (control == NULL) {
   2094 	sctp_alloc_a_readq(stcb, control);
   2095 	sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
   2096 				   ppid,
   2097 				   sid,
   2098 				   chk_flags,
   2099 				   NULL, fsn, mid);
   2100 	if (control == NULL) {
   2101 		SCTP_STAT_INCR(sctps_nomem);
   2102 		return (0);
   2103 	}
   2104 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
   2105 		struct mbuf *mm;
   2106 
   2107 		control->data = dmbuf;
   2108 		control->tail_mbuf = NULL;
   2109 		for (mm = control->data; mm; mm = mm->m_next) {
   2110 			control->length += SCTP_BUF_LEN(mm);
   2111 			if (SCTP_BUF_NEXT(mm) == NULL) {
   2112 				control->tail_mbuf = mm;
   2113 			}
   2114 		}
   2115 		control->end_added = 1;
   2116 		control->last_frag_seen = 1;
   2117 		control->first_frag_seen = 1;
   2118 		control->fsn_included = fsn;
   2119 		control->top_fsn = fsn;
   2120 	}
   2121 	created_control = 1;
   2122 }
   2123 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
   2124 	chk_flags, ordered, mid, control);
   2125 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
   2126     TAILQ_EMPTY(&asoc->resetHead) &&
   2127     ((ordered == 0) ||
   2128      (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
   2129       TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
   2130 	/* Candidate for express delivery */
   2131 	/*
   2132 	 * Its not fragmented, No PD-API is up, Nothing in the
   2133 	 * delivery queue, Its un-ordered OR ordered and the next to
   2134 	 * deliver AND nothing else is stuck on the stream queue,
   2135 	 * And there is room for it in the socket buffer. Lets just
   2136 	 * stuff it up the buffer....
   2137 	 */
   2138 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
   2139 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
   2140 		asoc->highest_tsn_inside_nr_map = tsn;
   2141 	}
   2142 	SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
   2143 		control, mid);
   2144 
   2145 	sctp_add_to_readq(stcb->sctp_ep, stcb,
   2146 	                  control, &stcb->sctp_socket->so_rcv,
   2147 	                  1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
   2148 
   2149 	if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
   2150 		/* for ordered, bump what we delivered */
   2151 		asoc->strmin[sid].last_mid_delivered++;
   2152 	}
   2153 	SCTP_STAT_INCR(sctps_recvexpress);
   2154 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
   2155 		sctp_log_strm_del_alt(stcb, tsn, mid, sid,
   2156 				      SCTP_STR_LOG_FROM_EXPRS_DEL);
   2157 	}
   2158 	control = NULL;
   2159 	goto finish_express_del;
   2160 }
   2161 
   2162 /* Now will we need a chunk too? */
   2163 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
   2164 	sctp_alloc_a_chunk(stcb, chk);
   2165 	if (chk == NULL) {
   2166 		/* No memory so we drop the chunk */
   2167 		SCTP_STAT_INCR(sctps_nomem);
   2168 		if (last_chunk == 0) {
   2169 			/* we copied it, free the copy */
   2170 			sctp_m_freem(dmbuf);
   2171 		}
   2172 		return (0);
   2173 	}
   2174 	chk->rec.data.tsn = tsn;
   2175 	chk->no_fr_allowed = 0;
   2176 	chk->rec.data.fsn = fsn;
   2177 	chk->rec.data.mid = mid;
   2178 	chk->rec.data.sid = sid;
   2179 	chk->rec.data.ppid = ppid;
   2180 	chk->rec.data.context = stcb->asoc.context;
   2181 	chk->rec.data.doing_fast_retransmit = 0;
   2182 	chk->rec.data.rcv_flags = chk_flags;
   2183 	chk->asoc = asoc;
   2184 	chk->send_size = the_len;
   2185 	chk->whoTo = net;
   2186 	SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
   2187 		chk,
   2188 		control, mid);
   2189 	atomic_add_int(&net->ref_count, 1);
   2190 	chk->data = dmbuf;
   2191 }
   2192 /* Set the appropriate TSN mark */
   2193 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
   2194 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
   2195 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
   2196 		asoc->highest_tsn_inside_nr_map = tsn;
   2197 	}
   2198 } else {
   2199 	SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
   2200 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
   2201 		asoc->highest_tsn_inside_map = tsn;
   2202 	}
   2203 }
   2204 /* Now is it complete (i.e. not fragmented)? */
   2205 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
   2206 	/*
   2207 	 * Special check for when streams are resetting. We
   2208 	 * could be more smart about this and check the
   2209 	 * actual stream to see if it is not being reset..
   2210 	 * that way we would not create a HOLB when amongst
   2211 	 * streams being reset and those not being reset.
   2212 	 *
   2213 	 */
   2214 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
   2215 	    SCTP_TSN_GT(tsn, liste->tsn)) {
   2216 		/*
   2217 		 * yep its past where we need to reset... go
   2218 		 * ahead and queue it.
   2219 		 */
   2220 		if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
   2221 			/* first one on */
   2222 			TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
   2223 		} else {
   2224 			struct sctp_queued_to_read *lcontrol, *nlcontrol;
   2225 			unsigned char inserted = 0;
   2226 			TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
   2227 				if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
   2228 					continue;
   2229 				} else {
   2230 					/* found it */
   2231 					TAILQ_INSERT_BEFORE(lcontrol, control, next);
   2232 					inserted = 1;
   2233 					break;
   2234 				}
   2235 			}
   2236 			if (inserted == 0) {
   2237 				/*
   2238 				 * must be put at end, use
   2239 				 * prevP (all setup from
   2240 				 * loop) to setup nextP.
   2241 				 */
   2242 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
   2243 			}
   2244 		}
   2245 		goto finish_express_del;
   2246 	}
   2247 	if (chk_flags & SCTP_DATA_UNORDERED) {
   2248 		/* queue directly into socket buffer */
   2249 		SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
   2250 			control, mid);
   2251 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
   2252 		sctp_add_to_readq(stcb->sctp_ep, stcb,
   2253 		                  control,
   2254 		                  &stcb->sctp_socket->so_rcv, 1,
   2255 		                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
   2256 
   2257 	} else {
   2258 		SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
   2259 			mid);
   2260 		sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
   2261 		if (*abort_flag) {
   2262 			if (last_chunk) {
   2263 				*m = NULL;
   2264 			}
   2265 			return (0);
   2266 		}
   2267 	}
   2268 	goto finish_express_del;
   2269 }
   2270 /* If we reach here its a reassembly */
   2271 need_reasm_check = 1;
   2272 SCTPDBG(SCTP_DEBUG_XXX,
   2273 	"Queue data to stream for reasm control: %p MID: %u\n",
   2274 	control, mid);
   2275 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
   2276 if (*abort_flag) {
   2277 	/*
   2278 	 * the assoc is now gone and chk was put onto the
   2279 	 * reasm queue, which has all been freed.
   2280 	 */
   2281 	if (last_chunk) {
   2282 		*m = NULL;
   2283 	}
   2284 	return (0);
   2285 }
   2286 finish_express_del:
   2287 /* Here we tidy up things */
   2288 if (tsn == (asoc->cumulative_tsn + 1)) {
   2289 	/* Update cum-ack */
   2290 	asoc->cumulative_tsn = tsn;
   2291 }
   2292 if (last_chunk) {
   2293 	*m = NULL;
   2294 }
   2295 if (ordered) {
   2296 	SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
   2297 } else {
   2298 	SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
   2299 }
   2300 SCTP_STAT_INCR(sctps_recvdata);
   2301 /* Set it present please */
   2302 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
   2303 	sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
   2304 }
   2305 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   2306 	sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
   2307 		     asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
   2308 }
   2309 if (need_reasm_check) {
   2310 	(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
   2311 	need_reasm_check = 0;
   2312 }
   2313 /* check the special flag for stream resets */
   2314 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
   2315     SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
   2316 	/*
   2317 	 * we have finished working through the backlogged TSN's now
   2318 	 * time to reset streams. 1: call reset function. 2: free
   2319 	 * pending_reply space 3: distribute any chunks in
   2320 	 * pending_reply_queue.
   2321 	 */
   2322 	sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
   2323 	TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
   2324 	sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
   2325 	SCTP_FREE(liste, SCTP_M_STRESET);
   2326 	/*sa_ignore FREED_MEMORY*/
   2327 	liste = TAILQ_FIRST(&asoc->resetHead);
   2328 	if (TAILQ_EMPTY(&asoc->resetHead)) {
   2329 		/* All can be removed */
   2330 		TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
   2331 			TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
   2332 			strm = &asoc->strmin[control->sinfo_stream];
   2333 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
   2334 			if (*abort_flag) {
   2335 				return (0);
   2336 			}
   2337 			if (need_reasm_check) {
   2338 				(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
   2339 				need_reasm_check = 0;
   2340 			}
   2341 		}
   2342 	} else {
   2343 		TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
   2344 			if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
   2345 				break;
   2346 			}
   2347 			/*
   2348 			 * if control->sinfo_tsn is <= liste->tsn we can
   2349 			 * process it which is the NOT of
   2350 			 * control->sinfo_tsn > liste->tsn
   2351 			 */
   2352 			TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
   2353 			strm = &asoc->strmin[control->sinfo_stream];
   2354 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
   2355 			if (*abort_flag) {
   2356 				return (0);
   2357 			}
   2358 			if (need_reasm_check) {
   2359 				(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
   2360 				need_reasm_check = 0;
   2361 			}
   2362 		}
   2363 	}
   2364 }
   2365 return (1);
   2366 }
   2367 
   2368 static const int8_t sctp_map_lookup_tab[256] = {
   2369  0, 1, 0, 2, 0, 1, 0, 3,
   2370  0, 1, 0, 2, 0, 1, 0, 4,
   2371  0, 1, 0, 2, 0, 1, 0, 3,
   2372  0, 1, 0, 2, 0, 1, 0, 5,
   2373  0, 1, 0, 2, 0, 1, 0, 3,
   2374  0, 1, 0, 2, 0, 1, 0, 4,
   2375  0, 1, 0, 2, 0, 1, 0, 3,
   2376  0, 1, 0, 2, 0, 1, 0, 6,
   2377  0, 1, 0, 2, 0, 1, 0, 3,
   2378  0, 1, 0, 2, 0, 1, 0, 4,
   2379  0, 1, 0, 2, 0, 1, 0, 3,
   2380  0, 1, 0, 2, 0, 1, 0, 5,
   2381  0, 1, 0, 2, 0, 1, 0, 3,
   2382  0, 1, 0, 2, 0, 1, 0, 4,
   2383  0, 1, 0, 2, 0, 1, 0, 3,
   2384  0, 1, 0, 2, 0, 1, 0, 7,
   2385  0, 1, 0, 2, 0, 1, 0, 3,
   2386  0, 1, 0, 2, 0, 1, 0, 4,
   2387  0, 1, 0, 2, 0, 1, 0, 3,
   2388  0, 1, 0, 2, 0, 1, 0, 5,
   2389  0, 1, 0, 2, 0, 1, 0, 3,
   2390  0, 1, 0, 2, 0, 1, 0, 4,
   2391  0, 1, 0, 2, 0, 1, 0, 3,
   2392  0, 1, 0, 2, 0, 1, 0, 6,
   2393  0, 1, 0, 2, 0, 1, 0, 3,
   2394  0, 1, 0, 2, 0, 1, 0, 4,
   2395  0, 1, 0, 2, 0, 1, 0, 3,
   2396  0, 1, 0, 2, 0, 1, 0, 5,
   2397  0, 1, 0, 2, 0, 1, 0, 3,
   2398  0, 1, 0, 2, 0, 1, 0, 4,
   2399  0, 1, 0, 2, 0, 1, 0, 3,
   2400  0, 1, 0, 2, 0, 1, 0, 8
   2401 };
   2402 
   2403 void
   2404 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
   2405 {
   2406 /*
   2407  * Now we also need to check the mapping array in a couple of ways.
   2408  * 1) Did we move the cum-ack point?
   2409  *
   2410  * When you first glance at this you might think
   2411  * that all entries that make up the position
   2412  * of the cum-ack would be in the nr-mapping array
   2413  * only.. i.e. things up to the cum-ack are always
   2414  * deliverable. Thats true with one exception, when
   2415  * its a fragmented message we may not deliver the data
   2416  * until some threshold (or all of it) is in place. So
   2417  * we must OR the nr_mapping_array and mapping_array to
   2418  * get a true picture of the cum-ack.
   2419  */
   2420 struct sctp_association *asoc;
   2421 int at;
   2422 uint8_t val;
   2423 int slide_from, slide_end, lgap, distance;
   2424 uint32_t old_cumack, old_base, old_highest, highest_tsn;
   2425 
   2426 asoc = &stcb->asoc;
   2427 
   2428 old_cumack = asoc->cumulative_tsn;
   2429 old_base = asoc->mapping_array_base_tsn;
   2430 old_highest = asoc->highest_tsn_inside_map;
   2431 /*
   2432  * We could probably improve this a small bit by calculating the
   2433  * offset of the current cum-ack as the starting point.
   2434  */
   2435 at = 0;
   2436 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
   2437 	val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
   2438 	if (val == 0xff) {
   2439 		at += 8;
   2440 	} else {
   2441 		/* there is a 0 bit */
   2442 		at += sctp_map_lookup_tab[val];
   2443 		break;
   2444 	}
   2445 }
   2446 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
   2447 
   2448 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
   2449            SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
   2450 #ifdef INVARIANTS
   2451 	panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
   2452 	      asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
   2453 #else
   2454 	SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
   2455 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
   2456 	sctp_print_mapping_array(asoc);
   2457 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   2458 		sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
   2459 	}
   2460 	asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
   2461 	asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
   2462 #endif
   2463 }
   2464 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
   2465 	highest_tsn = asoc->highest_tsn_inside_nr_map;
   2466 } else {
   2467 	highest_tsn = asoc->highest_tsn_inside_map;
   2468 }
   2469 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
   2470 	/* The complete array was completed by a single FR */
   2471 	/* highest becomes the cum-ack */
   2472 	int clr;
   2473 #ifdef INVARIANTS
   2474 	unsigned int i;
   2475 #endif
   2476 
   2477 	/* clear the array */
   2478 	clr = ((at+7) >> 3);
   2479 	if (clr > asoc->mapping_array_size) {
   2480 		clr = asoc->mapping_array_size;
   2481 	}
   2482 	memset(asoc->mapping_array, 0, clr);
   2483 	memset(asoc->nr_mapping_array, 0, clr);
   2484 #ifdef INVARIANTS
   2485 	for (i = 0; i < asoc->mapping_array_size; i++) {
   2486 		if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
   2487 			SCTP_PRINTF("Error Mapping array's not clean at clear\n");
   2488 			sctp_print_mapping_array(asoc);
   2489 		}
   2490 	}
   2491 #endif
   2492 	asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
   2493 	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
   2494 } else if (at >= 8) {
   2495 	/* we can slide the mapping array down */
   2496 	/* slide_from holds where we hit the first NON 0xff byte */
   2497 
   2498 	/*
   2499 	 * now calculate the ceiling of the move using our highest
   2500 	 * TSN value
   2501 	 */
   2502 	SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
   2503 	slide_end = (lgap >> 3);
   2504 	if (slide_end < slide_from) {
   2505 		sctp_print_mapping_array(asoc);
   2506 #ifdef INVARIANTS
   2507 		panic("impossible slide");
   2508 #else
   2509 		SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
   2510 		            lgap, slide_end, slide_from, at);
   2511 		return;
   2512 #endif
   2513 	}
   2514 	if (slide_end > asoc->mapping_array_size) {
   2515 #ifdef INVARIANTS
   2516 		panic("would overrun buffer");
   2517 #else
   2518 		SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
   2519 		            asoc->mapping_array_size, slide_end);
   2520 		slide_end = asoc->mapping_array_size;
   2521 #endif
   2522 	}
   2523 	distance = (slide_end - slide_from) + 1;
   2524 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   2525 		sctp_log_map(old_base, old_cumack, old_highest,
   2526 			     SCTP_MAP_PREPARE_SLIDE);
   2527 		sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
   2528 			     (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
   2529 	}
   2530 	if (distance + slide_from > asoc->mapping_array_size ||
   2531 	    distance < 0) {
   2532 		/*
   2533 		 * Here we do NOT slide forward the array so that
   2534 		 * hopefully when more data comes in to fill it up
   2535 		 * we will be able to slide it forward. Really I
   2536 		 * don't think this should happen :-0
   2537 		 */
   2538 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   2539 			sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
   2540 				     (uint32_t) asoc->mapping_array_size,
   2541 				     SCTP_MAP_SLIDE_NONE);
   2542 		}
   2543 	} else {
   2544 		int ii;
   2545 
   2546 		for (ii = 0; ii < distance; ii++) {
   2547 			asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
   2548 			asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
   2549 		}
   2550 		for (ii = distance; ii < asoc->mapping_array_size; ii++) {
   2551 			asoc->mapping_array[ii] = 0;
   2552 			asoc->nr_mapping_array[ii] = 0;
   2553 		}
   2554 		if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
   2555 			asoc->highest_tsn_inside_map += (slide_from << 3);
   2556 		}
   2557 		if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
   2558 			asoc->highest_tsn_inside_nr_map += (slide_from << 3);
   2559 		}
   2560 		asoc->mapping_array_base_tsn += (slide_from << 3);
   2561 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   2562 			sctp_log_map(asoc->mapping_array_base_tsn,
   2563 				     asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
   2564 				     SCTP_MAP_SLIDE_RESULT);
   2565 		}
   2566 	}
   2567 }
   2568 }
   2569 
   2570 void
   2571 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
   2572 {
   2573 struct sctp_association *asoc;
   2574 uint32_t highest_tsn;
   2575 int is_a_gap;
   2576 
   2577 sctp_slide_mapping_arrays(stcb);
   2578 asoc = &stcb->asoc;
   2579 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
   2580 	highest_tsn = asoc->highest_tsn_inside_nr_map;
   2581 } else {
   2582 	highest_tsn = asoc->highest_tsn_inside_map;
   2583 }
   2584 /* Is there a gap now? */
   2585 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
   2586 
   2587 /*
   2588  * Now we need to see if we need to queue a sack or just start the
   2589  * timer (if allowed).
   2590  */
   2591 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
   2592 	/*
   2593 	 * Ok special case, in SHUTDOWN-SENT case. here we
   2594 	 * maker sure SACK timer is off and instead send a
   2595 	 * SHUTDOWN and a SACK
   2596 	 */
   2597 	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
   2598 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
   2599 		                stcb->sctp_ep, stcb, NULL,
   2600 		                SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
   2601 	}
   2602 	sctp_send_shutdown(stcb,
   2603 	                   ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
   2604 	if (is_a_gap) {
   2605 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
   2606 	}
   2607 } else {
   2608 	/*
   2609 	 * CMT DAC algorithm: increase number of packets
   2610 	 * received since last ack
   2611 	 */
   2612 	stcb->asoc.cmt_dac_pkts_rcvd++;
   2613 
   2614 	if ((stcb->asoc.send_sack == 1) ||      /* We need to send a SACK */
   2615 	    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
   2616 	                                         * longer is one */
   2617 	    (stcb->asoc.numduptsns) ||          /* we have dup's */
   2618 	    (is_a_gap) ||                       /* is still a gap */
   2619 	    (stcb->asoc.delayed_ack == 0) ||    /* Delayed sack disabled */
   2620 	    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) {	/* hit limit of pkts */
   2621 		if ((stcb->asoc.sctp_cmt_on_off > 0) &&
   2622 		    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
   2623 		    (stcb->asoc.send_sack == 0) &&
   2624 		    (stcb->asoc.numduptsns == 0) &&
   2625 		    (stcb->asoc.delayed_ack) &&
   2626 		    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
   2627 			/*
   2628 			 * CMT DAC algorithm: With CMT,
   2629 			 * delay acks even in the face of
   2630 			 * reordering. Therefore, if acks
   2631 			 * that do not have to be sent
   2632 			 * because of the above reasons,
   2633 			 * will be delayed. That is, acks
   2634 			 * that would have been sent due to
   2635 			 * gap reports will be delayed with
   2636 			 * DAC. Start the delayed ack timer.
   2637 			 */
   2638 			sctp_timer_start(SCTP_TIMER_TYPE_RECV,
   2639 			                 stcb->sctp_ep, stcb, NULL);
   2640 		} else {
   2641 			/*
   2642 			 * Ok we must build a SACK since the
   2643 			 * timer is pending, we got our
   2644 			 * first packet OR there are gaps or
   2645 			 * duplicates.
   2646 			 */
   2647 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
   2648 			                SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
   2649 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
   2650 		}
   2651 	} else {
   2652 		if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
   2653 			sctp_timer_start(SCTP_TIMER_TYPE_RECV,
   2654 			                 stcb->sctp_ep, stcb, NULL);
   2655 		}
   2656 	}
   2657 }
   2658 }
   2659 
   2660 int
   2661 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
   2662                  struct sctp_inpcb *inp, struct sctp_tcb *stcb,
   2663                  struct sctp_nets *net, uint32_t *high_tsn)
   2664 {
   2665 struct sctp_chunkhdr *ch, chunk_buf;
   2666 struct sctp_association *asoc;
   2667 int num_chunks = 0;	/* number of control chunks processed */
   2668 int stop_proc = 0;
   2669 int break_flag, last_chunk;
   2670 int abort_flag = 0, was_a_gap;
   2671 struct mbuf *m;
   2672 uint32_t highest_tsn;
   2673 uint16_t chk_length;
   2674 
   2675 /* set the rwnd */
   2676 sctp_set_rwnd(stcb, &stcb->asoc);
   2677 
   2678 m = *mm;
   2679 SCTP_TCB_LOCK_ASSERT(stcb);
   2680 asoc = &stcb->asoc;
   2681 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
   2682 	highest_tsn = asoc->highest_tsn_inside_nr_map;
   2683 } else {
   2684 	highest_tsn = asoc->highest_tsn_inside_map;
   2685 }
   2686 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
   2687 /*
   2688  * setup where we got the last DATA packet from for any SACK that
   2689  * may need to go out. Don't bump the net. This is done ONLY when a
   2690  * chunk is assigned.
   2691  */
   2692 asoc->last_data_chunk_from = net;
   2693 
   2694 /*-
   2695  * Now before we proceed we must figure out if this is a wasted
   2696  * cluster... i.e. it is a small packet sent in and yet the driver
   2697  * underneath allocated a full cluster for it. If so we must copy it
   2698  * to a smaller mbuf and free up the cluster mbuf. This will help
   2699  * with cluster starvation.
   2700  */
   2701 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
   2702 	/* we only handle mbufs that are singletons.. not chains */
   2703 	m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
   2704 	if (m) {
   2705 		/* ok lets see if we can copy the data up */
   2706 		caddr_t *from, *to;
   2707 		/* get the pointers and copy */
   2708 		to = mtod(m, caddr_t *);
   2709 		from = mtod((*mm), caddr_t *);
   2710 		memcpy(to, from, SCTP_BUF_LEN((*mm)));
   2711 		/* copy the length and free up the old */
   2712 		SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
   2713 		sctp_m_freem(*mm);
   2714 		/* success, back copy */
   2715 		*mm = m;
   2716 	} else {
   2717 		/* We are in trouble in the mbuf world .. yikes */
   2718 		m = *mm;
   2719 	}
   2720 }
   2721 /* get pointer to the first chunk header */
   2722 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
   2723                                            sizeof(struct sctp_chunkhdr),
   2724                                            (uint8_t *)&chunk_buf);
   2725 if (ch == NULL) {
   2726 	return (1);
   2727 }
   2728 /*
   2729  * process all DATA chunks...
   2730  */
   2731 *high_tsn = asoc->cumulative_tsn;
   2732 break_flag = 0;
   2733 asoc->data_pkts_seen++;
   2734 while (stop_proc == 0) {
   2735 	/* validate chunk length */
   2736 	chk_length = ntohs(ch->chunk_length);
   2737 	if (length - *offset < chk_length) {
   2738 		/* all done, mutulated chunk */
   2739 		stop_proc = 1;
   2740 		continue;
   2741 	}
   2742 	if ((asoc->idata_supported == 1) &&
   2743 	    (ch->chunk_type == SCTP_DATA)) {
   2744 		struct mbuf *op_err;
   2745 		char msg[SCTP_DIAG_INFO_LEN];
   2746 
   2747 		SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
   2748 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   2749 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
   2750 		sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   2751 		return (2);
   2752 	}
   2753 	if ((asoc->idata_supported == 0) &&
   2754 	    (ch->chunk_type == SCTP_IDATA)) {
   2755 		struct mbuf *op_err;
   2756 		char msg[SCTP_DIAG_INFO_LEN];
   2757 
   2758 		SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
   2759 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   2760 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
   2761 		sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   2762 		return (2);
   2763 	}
   2764 	if ((ch->chunk_type == SCTP_DATA) ||
   2765 	    (ch->chunk_type == SCTP_IDATA)) {
   2766 		uint16_t clen;
   2767 
   2768 		if (ch->chunk_type == SCTP_DATA) {
   2769 			clen = sizeof(struct sctp_data_chunk);
   2770 		} else {
   2771 			clen = sizeof(struct sctp_idata_chunk);
   2772 		}
   2773 		if (chk_length < clen) {
   2774 			/*
   2775 			 * Need to send an abort since we had a
   2776 			 * invalid data chunk.
   2777 			 */
   2778 			struct mbuf *op_err;
   2779 			char msg[SCTP_DIAG_INFO_LEN];
   2780 
   2781 			SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
   2782 			              ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
   2783 			              chk_length);
   2784 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   2785 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
   2786 			sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   2787 			return (2);
   2788 		}
   2789 #ifdef SCTP_AUDITING_ENABLED
   2790 		sctp_audit_log(0xB1, 0);
   2791 #endif
   2792 		if (SCTP_SIZE32(chk_length) == (length - *offset)) {
   2793 			last_chunk = 1;
   2794 		} else {
   2795 			last_chunk = 0;
   2796 		}
   2797 		if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
   2798 					      chk_length, net, high_tsn, &abort_flag, &break_flag,
   2799 					      last_chunk, ch->chunk_type)) {
   2800 			num_chunks++;
   2801 		}
   2802 		if (abort_flag)
   2803 			return (2);
   2804 
   2805 		if (break_flag) {
   2806 			/*
   2807 			 * Set because of out of rwnd space and no
   2808 			 * drop rep space left.
   2809 			 */
   2810 			stop_proc = 1;
   2811 			continue;
   2812 		}
   2813 	} else {
   2814 		/* not a data chunk in the data region */
   2815 		switch (ch->chunk_type) {
   2816 		case SCTP_INITIATION:
   2817 		case SCTP_INITIATION_ACK:
   2818 		case SCTP_SELECTIVE_ACK:
   2819 		case SCTP_NR_SELECTIVE_ACK:
   2820 		case SCTP_HEARTBEAT_REQUEST:
   2821 		case SCTP_HEARTBEAT_ACK:
   2822 		case SCTP_ABORT_ASSOCIATION:
   2823 		case SCTP_SHUTDOWN:
   2824 		case SCTP_SHUTDOWN_ACK:
   2825 		case SCTP_OPERATION_ERROR:
   2826 		case SCTP_COOKIE_ECHO:
   2827 		case SCTP_COOKIE_ACK:
   2828 		case SCTP_ECN_ECHO:
   2829 		case SCTP_ECN_CWR:
   2830 		case SCTP_SHUTDOWN_COMPLETE:
   2831 		case SCTP_AUTHENTICATION:
   2832 		case SCTP_ASCONF_ACK:
   2833 		case SCTP_PACKET_DROPPED:
   2834 		case SCTP_STREAM_RESET:
   2835 		case SCTP_FORWARD_CUM_TSN:
   2836 		case SCTP_ASCONF:
   2837 		{
   2838 			/*
   2839 			 * Now, what do we do with KNOWN chunks that
   2840 			 * are NOT in the right place?
   2841 			 *
   2842 			 * For now, I do nothing but ignore them. We
   2843 			 * may later want to add sysctl stuff to
   2844 			 * switch out and do either an ABORT() or
   2845 			 * possibly process them.
   2846 			 */
   2847 			struct mbuf *op_err;
   2848 			char msg[SCTP_DIAG_INFO_LEN];
   2849 
   2850 			SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
   2851 			              ch->chunk_type);
   2852 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   2853 			sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   2854 			return (2);
   2855 		}
   2856 		default:
   2857 			/*
   2858 			 * Unknown chunk type: use bit rules after
   2859 			 * checking length
   2860 			 */
   2861 			if (chk_length < sizeof(struct sctp_chunkhdr)) {
   2862 				/*
   2863 				 * Need to send an abort since we had a
   2864 				 * invalid chunk.
   2865 				 */
   2866 				struct mbuf *op_err;
   2867 				char msg[SCTP_DIAG_INFO_LEN];
   2868 
   2869 				SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
   2870 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   2871 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
   2872 				sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   2873 				return (2);
   2874 			}
   2875 			if (ch->chunk_type & 0x40) {
   2876 				/* Add a error report to the queue */
   2877 				struct mbuf *op_err;
   2878 				struct sctp_gen_error_cause *cause;
   2879 
   2880 				op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
   2881 				                               0, M_NOWAIT, 1, MT_DATA);
   2882 				if (op_err != NULL) {
   2883 					cause  = mtod(op_err, struct sctp_gen_error_cause *);
   2884 					cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
   2885 					cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
   2886 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
   2887 					SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
   2888 					if (SCTP_BUF_NEXT(op_err) != NULL) {
   2889 						sctp_queue_op_err(stcb, op_err);
   2890 					} else {
   2891 						sctp_m_freem(op_err);
   2892 					}
   2893 				}
   2894 			}
   2895 			if ((ch->chunk_type & 0x80) == 0) {
   2896 				/* discard the rest of this packet */
   2897 				stop_proc = 1;
   2898 			}	/* else skip this bad chunk and
   2899 				 * continue... */
   2900 			break;
   2901 		}	/* switch of chunk type */
   2902 	}
   2903 	*offset += SCTP_SIZE32(chk_length);
   2904 	if ((*offset >= length) || stop_proc) {
   2905 		/* no more data left in the mbuf chain */
   2906 		stop_proc = 1;
   2907 		continue;
   2908 	}
   2909 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
   2910 	                                           sizeof(struct sctp_chunkhdr),
   2911 	                                           (uint8_t *)&chunk_buf);
   2912 	if (ch == NULL) {
   2913 		*offset = length;
   2914 		stop_proc = 1;
   2915 		continue;
   2916 	}
   2917 }
   2918 if (break_flag) {
   2919 	/*
   2920 	 * we need to report rwnd overrun drops.
   2921 	 */
   2922 	sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
   2923 }
   2924 if (num_chunks) {
   2925 	/*
   2926 	 * Did we get data, if so update the time for auto-close and
   2927 	 * give peer credit for being alive.
   2928 	 */
   2929 	SCTP_STAT_INCR(sctps_recvpktwithdata);
   2930 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
   2931 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
   2932 			       stcb->asoc.overall_error_count,
   2933 			       0,
   2934 			       SCTP_FROM_SCTP_INDATA,
   2935 			       __LINE__);
   2936 	}
   2937 	stcb->asoc.overall_error_count = 0;
   2938 	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
   2939 }
   2940 /* now service all of the reassm queue if needed */
   2941 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
   2942 	/* Assure that we ack right away */
   2943 	stcb->asoc.send_sack = 1;
   2944 }
   2945 /* Start a sack timer or QUEUE a SACK for sending */
   2946 sctp_sack_check(stcb, was_a_gap);
   2947 return (0);
   2948 }
   2949 
   2950 static int
   2951 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
   2952 		   uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
   2953 		   int *num_frs,
   2954 		   uint32_t *biggest_newly_acked_tsn,
   2955 		   uint32_t  *this_sack_lowest_newack,
   2956 		   int *rto_ok)
   2957 {
   2958 struct sctp_tmit_chunk *tp1;
   2959 unsigned int theTSN;
   2960 int j, wake_him = 0, circled = 0;
   2961 
   2962 /* Recover the tp1 we last saw */
   2963 tp1 = *p_tp1;
   2964 if (tp1 == NULL) {
   2965 	tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
   2966 }
   2967 for (j = frag_strt; j <= frag_end; j++) {
   2968 	theTSN = j + last_tsn;
   2969 	while (tp1) {
   2970 		if (tp1->rec.data.doing_fast_retransmit)
   2971 			(*num_frs) += 1;
   2972 
   2973 		/*-
   2974 		 * CMT: CUCv2 algorithm. For each TSN being
   2975 		 * processed from the sent queue, track the
   2976 		 * next expected pseudo-cumack, or
   2977 		 * rtx_pseudo_cumack, if required. Separate
   2978 		 * cumack trackers for first transmissions,
   2979 		 * and retransmissions.
   2980 		 */
   2981 		if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
   2982 		    (tp1->whoTo->find_pseudo_cumack == 1) &&
   2983 		    (tp1->snd_count == 1)) {
   2984 			tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
   2985 			tp1->whoTo->find_pseudo_cumack = 0;
   2986 		}
   2987 		if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
   2988 		    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
   2989 		    (tp1->snd_count > 1)) {
   2990 			tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
   2991 			tp1->whoTo->find_rtx_pseudo_cumack = 0;
   2992 		}
   2993 		if (tp1->rec.data.tsn == theTSN) {
   2994 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
   2995 				/*-
   2996 				 * must be held until
   2997 				 * cum-ack passes
   2998 				 */
   2999 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   3000 					/*-
   3001 					 * If it is less than RESEND, it is
   3002 					 * now no-longer in flight.
   3003 					 * Higher values may already be set
   3004 					 * via previous Gap Ack Blocks...
   3005 					 * i.e. ACKED or RESEND.
   3006 					 */
   3007 					if (SCTP_TSN_GT(tp1->rec.data.tsn,
   3008 					                *biggest_newly_acked_tsn)) {
   3009 						*biggest_newly_acked_tsn = tp1->rec.data.tsn;
   3010 					}
   3011 					/*-
   3012 					 * CMT: SFR algo (and HTNA) - set
   3013 					 * saw_newack to 1 for dest being
   3014 					 * newly acked. update
   3015 					 * this_sack_highest_newack if
   3016 					 * appropriate.
   3017 					 */
   3018 					if (tp1->rec.data.chunk_was_revoked == 0)
   3019 						tp1->whoTo->saw_newack = 1;
   3020 
   3021 					if (SCTP_TSN_GT(tp1->rec.data.tsn,
   3022 					                tp1->whoTo->this_sack_highest_newack)) {
   3023 						tp1->whoTo->this_sack_highest_newack =
   3024 							tp1->rec.data.tsn;
   3025 					}
   3026 					/*-
   3027 					 * CMT DAC algo: also update
   3028 					 * this_sack_lowest_newack
   3029 					 */
   3030 					if (*this_sack_lowest_newack == 0) {
   3031 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   3032 							sctp_log_sack(*this_sack_lowest_newack,
   3033 								      last_tsn,
   3034 								      tp1->rec.data.tsn,
   3035 								      0,
   3036 								      0,
   3037 								      SCTP_LOG_TSN_ACKED);
   3038 						}
   3039 						*this_sack_lowest_newack = tp1->rec.data.tsn;
   3040 					}
   3041 					/*-
   3042 					 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
   3043 					 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
   3044 					 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
   3045 					 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
   3046 					 * Separate pseudo_cumack trackers for first transmissions and
   3047 					 * retransmissions.
   3048 					 */
   3049 					if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
   3050 						if (tp1->rec.data.chunk_was_revoked == 0) {
   3051 							tp1->whoTo->new_pseudo_cumack = 1;
   3052 						}
   3053 						tp1->whoTo->find_pseudo_cumack = 1;
   3054 					}
   3055 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   3056 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
   3057 					}
   3058 					if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
   3059 						if (tp1->rec.data.chunk_was_revoked == 0) {
   3060 							tp1->whoTo->new_pseudo_cumack = 1;
   3061 						}
   3062 						tp1->whoTo->find_rtx_pseudo_cumack = 1;
   3063 					}
   3064 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   3065 						sctp_log_sack(*biggest_newly_acked_tsn,
   3066 							      last_tsn,
   3067 							      tp1->rec.data.tsn,
   3068 							      frag_strt,
   3069 							      frag_end,
   3070 							      SCTP_LOG_TSN_ACKED);
   3071 					}
   3072 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   3073 						sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
   3074 							       tp1->whoTo->flight_size,
   3075 							       tp1->book_size,
   3076 							       (uint32_t)(uintptr_t)tp1->whoTo,
   3077 							       tp1->rec.data.tsn);
   3078 					}
   3079 					sctp_flight_size_decrease(tp1);
   3080 					if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
   3081 						(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
   3082 													     tp1);
   3083 					}
   3084 					sctp_total_flight_decrease(stcb, tp1);
   3085 
   3086 					tp1->whoTo->net_ack += tp1->send_size;
   3087 					if (tp1->snd_count < 2) {
   3088 						/*-
   3089 						 * True non-retransmitted chunk
   3090 						 */
   3091 						tp1->whoTo->net_ack2 += tp1->send_size;
   3092 
   3093 						/*-
   3094 						 * update RTO too ?
   3095 						 */
   3096 						if (tp1->do_rtt) {
   3097 							if (*rto_ok &&
   3098 							    sctp_calculate_rto(stcb,
   3099 							                       &stcb->asoc,
   3100 							                       tp1->whoTo,
   3101 							                       &tp1->sent_rcv_time,
   3102 							                       SCTP_RTT_FROM_DATA)) {
   3103 								*rto_ok = 0;
   3104 							}
   3105 							if (tp1->whoTo->rto_needed == 0) {
   3106 								tp1->whoTo->rto_needed = 1;
   3107 							}
   3108 							tp1->do_rtt = 0;
   3109 						}
   3110 					}
   3111 				}
   3112 				if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
   3113 					if (SCTP_TSN_GT(tp1->rec.data.tsn,
   3114 					                stcb->asoc.this_sack_highest_gap)) {
   3115 						stcb->asoc.this_sack_highest_gap =
   3116 							tp1->rec.data.tsn;
   3117 					}
   3118 					if (tp1->sent == SCTP_DATAGRAM_RESEND) {
   3119 						sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
   3120 #ifdef SCTP_AUDITING_ENABLED
   3121 						sctp_audit_log(0xB2,
   3122 							       (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
   3123 #endif
   3124 					}
   3125 				}
   3126 				/*-
   3127 				 * All chunks NOT UNSENT fall through here and are marked
   3128 				 * (leave PR-SCTP ones that are to skip alone though)
   3129 				 */
   3130 				if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
   3131 				    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
   3132 					tp1->sent = SCTP_DATAGRAM_MARKED;
   3133 				}
   3134 				if (tp1->rec.data.chunk_was_revoked) {
   3135 					/* deflate the cwnd */
   3136 					tp1->whoTo->cwnd -= tp1->book_size;
   3137 					tp1->rec.data.chunk_was_revoked = 0;
   3138 				}
   3139 				/* NR Sack code here */
   3140 				if (nr_sacking &&
   3141 				    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
   3142 					if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
   3143 						stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
   3144 #ifdef INVARIANTS
   3145 					} else {
   3146 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
   3147 #endif
   3148 					}
   3149 					if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
   3150 					    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
   3151 					    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
   3152 						stcb->asoc.trigger_reset = 1;
   3153 					}
   3154 					tp1->sent = SCTP_DATAGRAM_NR_ACKED;
   3155 					if (tp1->data) {
   3156 						/* sa_ignore NO_NULL_CHK */
   3157 						sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
   3158 						sctp_m_freem(tp1->data);
   3159 						tp1->data = NULL;
   3160 					}
   3161 					wake_him++;
   3162 				}
   3163 			}
   3164 			break;
   3165 		}	/* if (tp1->tsn == theTSN) */
   3166 		if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
   3167 			break;
   3168 		}
   3169 		tp1 = TAILQ_NEXT(tp1, sctp_next);
   3170 		if ((tp1 == NULL) && (circled == 0)) {
   3171 			circled++;
   3172 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
   3173 		}
   3174 	}	/* end while (tp1) */
   3175 	if (tp1 == NULL) {
   3176 		circled = 0;
   3177 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
   3178 	}
   3179 	/* In case the fragments were not in order we must reset */
   3180 } /* end for (j = fragStart */
   3181 *p_tp1 = tp1;
   3182 return (wake_him);	/* Return value only used for nr-sack */
   3183 }
   3184 
   3185 static int
   3186 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
   3187 	uint32_t last_tsn, uint32_t *biggest_tsn_acked,
   3188 	uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
   3189 	int num_seg, int num_nr_seg, int *rto_ok)
   3190 {
   3191 struct sctp_gap_ack_block *frag, block;
   3192 struct sctp_tmit_chunk *tp1;
   3193 int i;
   3194 int num_frs = 0;
   3195 int chunk_freed;
   3196 int non_revocable;
   3197 uint16_t frag_strt, frag_end, prev_frag_end;
   3198 
   3199 tp1 = TAILQ_FIRST(&asoc->sent_queue);
   3200 prev_frag_end = 0;
   3201 chunk_freed = 0;
   3202 
   3203 for (i = 0; i < (num_seg + num_nr_seg); i++) {
   3204 	if (i == num_seg) {
   3205 		prev_frag_end = 0;
   3206 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
   3207 	}
   3208 	frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
   3209 	                                                  sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
   3210 	*offset += sizeof(block);
   3211 	if (frag == NULL) {
   3212 		return (chunk_freed);
   3213 	}
   3214 	frag_strt = ntohs(frag->start);
   3215 	frag_end = ntohs(frag->end);
   3216 
   3217 	if (frag_strt > frag_end) {
   3218 		/* This gap report is malformed, skip it. */
   3219 		continue;
   3220 	}
   3221 	if (frag_strt <= prev_frag_end) {
   3222 		/* This gap report is not in order, so restart. */
   3223 		 tp1 = TAILQ_FIRST(&asoc->sent_queue);
   3224 	}
   3225 	if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
   3226 		*biggest_tsn_acked = last_tsn + frag_end;
   3227 	}
   3228 	if (i < num_seg) {
   3229 		non_revocable = 0;
   3230 	} else {
   3231 		non_revocable = 1;
   3232 	}
   3233 	if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
   3234 	                               non_revocable, &num_frs, biggest_newly_acked_tsn,
   3235 	                               this_sack_lowest_newack, rto_ok)) {
   3236 		chunk_freed = 1;
   3237 	}
   3238 	prev_frag_end = frag_end;
   3239 }
   3240 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3241 	if (num_frs)
   3242 		sctp_log_fr(*biggest_tsn_acked,
   3243 		            *biggest_newly_acked_tsn,
   3244 		            last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
   3245 }
   3246 return (chunk_freed);
   3247 }
   3248 
   3249 static void
   3250 sctp_check_for_revoked(struct sctp_tcb *stcb,
   3251 	       struct sctp_association *asoc, uint32_t cumack,
   3252 	       uint32_t biggest_tsn_acked)
   3253 {
   3254 struct sctp_tmit_chunk *tp1;
   3255 
   3256 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   3257 	if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
   3258 		/*
   3259 		 * ok this guy is either ACK or MARKED. If it is
   3260 		 * ACKED it has been previously acked but not this
   3261 		 * time i.e. revoked.  If it is MARKED it was ACK'ed
   3262 		 * again.
   3263 		 */
   3264 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
   3265 			break;
   3266 		}
   3267 		if (tp1->sent == SCTP_DATAGRAM_ACKED) {
   3268 			/* it has been revoked */
   3269 			tp1->sent = SCTP_DATAGRAM_SENT;
   3270 			tp1->rec.data.chunk_was_revoked = 1;
   3271 			/* We must add this stuff back in to
   3272 			 * assure timers and such get started.
   3273 			 */
   3274 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   3275 				sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
   3276 					       tp1->whoTo->flight_size,
   3277 					       tp1->book_size,
   3278 					       (uint32_t)(uintptr_t)tp1->whoTo,
   3279 					       tp1->rec.data.tsn);
   3280 			}
   3281 			sctp_flight_size_increase(tp1);
   3282 			sctp_total_flight_increase(stcb, tp1);
   3283 			/* We inflate the cwnd to compensate for our
   3284 			 * artificial inflation of the flight_size.
   3285 			 */
   3286 			tp1->whoTo->cwnd += tp1->book_size;
   3287 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   3288 				sctp_log_sack(asoc->last_acked_seq,
   3289 					      cumack,
   3290 					      tp1->rec.data.tsn,
   3291 					      0,
   3292 					      0,
   3293 					      SCTP_LOG_TSN_REVOKED);
   3294 			}
   3295 		} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
   3296 			/* it has been re-acked in this SACK */
   3297 			tp1->sent = SCTP_DATAGRAM_ACKED;
   3298 		}
   3299 	}
   3300 	if (tp1->sent == SCTP_DATAGRAM_UNSENT)
   3301 		break;
   3302 }
   3303 }
   3304 
   3305 static void
   3306 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
   3307 		   uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
   3308 {
   3309 struct sctp_tmit_chunk *tp1;
   3310 int strike_flag = 0;
   3311 struct timeval now;
   3312 uint32_t sending_seq;
   3313 struct sctp_nets *net;
   3314 int num_dests_sacked = 0;
   3315 
   3316 /*
   3317  * select the sending_seq, this is either the next thing ready to be
   3318  * sent but not transmitted, OR, the next seq we assign.
   3319  */
   3320 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
   3321 if (tp1 == NULL) {
   3322 	sending_seq = asoc->sending_seq;
   3323 } else {
   3324 	sending_seq = tp1->rec.data.tsn;
   3325 }
   3326 
   3327 /* CMT DAC algo: finding out if SACK is a mixed SACK */
   3328 if ((asoc->sctp_cmt_on_off > 0) &&
   3329     SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
   3330 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   3331 		if (net->saw_newack)
   3332 			num_dests_sacked++;
   3333 	}
   3334 }
   3335 if (stcb->asoc.prsctp_supported) {
   3336 	(void)SCTP_GETTIME_TIMEVAL(&now);
   3337 }
   3338 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   3339 	strike_flag = 0;
   3340 	if (tp1->no_fr_allowed) {
   3341 		/* this one had a timeout or something */
   3342 		continue;
   3343 	}
   3344 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3345 		if (tp1->sent < SCTP_DATAGRAM_RESEND)
   3346 			sctp_log_fr(biggest_tsn_newly_acked,
   3347 				    tp1->rec.data.tsn,
   3348 				    tp1->sent,
   3349 				    SCTP_FR_LOG_CHECK_STRIKE);
   3350 	}
   3351 	if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
   3352 	    tp1->sent == SCTP_DATAGRAM_UNSENT) {
   3353 		/* done */
   3354 		break;
   3355 	}
   3356 	if (stcb->asoc.prsctp_supported) {
   3357 		if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
   3358 			/* Is it expired? */
   3359 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
   3360 			if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
   3361 #else
   3362 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
   3363 #endif
   3364 				/* Yes so drop it */
   3365 				if (tp1->data != NULL) {
   3366 					(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
   3367 									 SCTP_SO_NOT_LOCKED);
   3368 				}
   3369 				continue;
   3370 			}
   3371 		}
   3372 	}
   3373 	if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
   3374 	                !(accum_moved && asoc->fast_retran_loss_recovery)) {
   3375 		/* we are beyond the tsn in the sack  */
   3376 		break;
   3377 	}
   3378 	if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
   3379 		/* either a RESEND, ACKED, or MARKED */
   3380 		/* skip */
   3381 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
   3382 			/* Continue strikin FWD-TSN chunks */
   3383 			tp1->rec.data.fwd_tsn_cnt++;
   3384 		}
   3385 		continue;
   3386 	}
   3387 	/*
   3388 	 * CMT : SFR algo (covers part of DAC and HTNA as well)
   3389 	 */
   3390 	if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
   3391 		/*
   3392 		 * No new acks were received for data sent to this
   3393 		 * dest. Therefore, according to the SFR algo for
   3394 		 * CMT, no data sent to this dest can be marked for
   3395 		 * FR using this SACK.
   3396 		 */
   3397 		continue;
   3398 	} else if (tp1->whoTo &&
   3399 	           SCTP_TSN_GT(tp1->rec.data.tsn,
   3400 	                       tp1->whoTo->this_sack_highest_newack) &&
   3401 	           !(accum_moved && asoc->fast_retran_loss_recovery)) {
   3402 		/*
   3403 		 * CMT: New acks were received for data sent to
   3404 		 * this dest. But no new acks were seen for data
   3405 		 * sent after tp1. Therefore, according to the SFR
   3406 		 * algo for CMT, tp1 cannot be marked for FR using
   3407 		 * this SACK. This step covers part of the DAC algo
   3408 		 * and the HTNA algo as well.
   3409 		 */
   3410 		continue;
   3411 	}
   3412 	/*
   3413 	 * Here we check to see if we were have already done a FR
   3414 	 * and if so we see if the biggest TSN we saw in the sack is
   3415 	 * smaller than the recovery point. If so we don't strike
   3416 	 * the tsn... otherwise we CAN strike the TSN.
   3417 	 */
   3418 	/*
   3419 	 * @@@ JRI: Check for CMT
   3420 	 * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
   3421 	 */
   3422 	if (accum_moved && asoc->fast_retran_loss_recovery) {
   3423 		/*
   3424 		 * Strike the TSN if in fast-recovery and cum-ack
   3425 		 * moved.
   3426 		 */
   3427 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3428 			sctp_log_fr(biggest_tsn_newly_acked,
   3429 				    tp1->rec.data.tsn,
   3430 				    tp1->sent,
   3431 				    SCTP_FR_LOG_STRIKE_CHUNK);
   3432 		}
   3433 		if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   3434 			tp1->sent++;
   3435 		}
   3436 		if ((asoc->sctp_cmt_on_off > 0) &&
   3437 		    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
   3438 			/*
   3439 			 * CMT DAC algorithm: If SACK flag is set to
   3440 			 * 0, then lowest_newack test will not pass
   3441 			 * because it would have been set to the
   3442 			 * cumack earlier. If not already to be
   3443 			 * rtx'd, If not a mixed sack and if tp1 is
   3444 			 * not between two sacked TSNs, then mark by
   3445 			 * one more.
   3446 			 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
   3447 			 * two packets have been received after this missing TSN.
   3448 			 */
   3449 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
   3450 			    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
   3451 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3452 					sctp_log_fr(16 + num_dests_sacked,
   3453 						    tp1->rec.data.tsn,
   3454 						    tp1->sent,
   3455 						    SCTP_FR_LOG_STRIKE_CHUNK);
   3456 				}
   3457 				tp1->sent++;
   3458 			}
   3459 		}
   3460 	} else if ((tp1->rec.data.doing_fast_retransmit) &&
   3461 	           (asoc->sctp_cmt_on_off == 0)) {
   3462 		/*
   3463 		 * For those that have done a FR we must take
   3464 		 * special consideration if we strike. I.e the
   3465 		 * biggest_newly_acked must be higher than the
   3466 		 * sending_seq at the time we did the FR.
   3467 		 */
   3468 		if (
   3469 #ifdef SCTP_FR_TO_ALTERNATE
   3470 			/*
   3471 			 * If FR's go to new networks, then we must only do
   3472 			 * this for singly homed asoc's. However if the FR's
   3473 			 * go to the same network (Armando's work) then its
   3474 			 * ok to FR multiple times.
   3475 			 */
   3476 			(asoc->numnets < 2)
   3477 #else
   3478 			(1)
   3479 #endif
   3480 			) {
   3481 			if (SCTP_TSN_GE(biggest_tsn_newly_acked,
   3482 			                tp1->rec.data.fast_retran_tsn)) {
   3483 				/*
   3484 				 * Strike the TSN, since this ack is
   3485 				 * beyond where things were when we
   3486 				 * did a FR.
   3487 				 */
   3488 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3489 					sctp_log_fr(biggest_tsn_newly_acked,
   3490 						    tp1->rec.data.tsn,
   3491 						    tp1->sent,
   3492 						    SCTP_FR_LOG_STRIKE_CHUNK);
   3493 				}
   3494 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   3495 					tp1->sent++;
   3496 				}
   3497 				strike_flag = 1;
   3498 				if ((asoc->sctp_cmt_on_off > 0) &&
   3499 				    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
   3500 					/*
   3501 					 * CMT DAC algorithm: If
   3502 					 * SACK flag is set to 0,
   3503 					 * then lowest_newack test
   3504 					 * will not pass because it
   3505 					 * would have been set to
   3506 					 * the cumack earlier. If
   3507 					 * not already to be rtx'd,
   3508 					 * If not a mixed sack and
   3509 					 * if tp1 is not between two
   3510 					 * sacked TSNs, then mark by
   3511 					 * one more.
   3512 					 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
   3513 					 * two packets have been received after this missing TSN.
   3514 					 */
   3515 					if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
   3516 					    (num_dests_sacked == 1) &&
   3517 					    SCTP_TSN_GT(this_sack_lowest_newack,
   3518 					                tp1->rec.data.tsn)) {
   3519 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3520 							sctp_log_fr(32 + num_dests_sacked,
   3521 								    tp1->rec.data.tsn,
   3522 								    tp1->sent,
   3523 								    SCTP_FR_LOG_STRIKE_CHUNK);
   3524 						}
   3525 						if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   3526 							tp1->sent++;
   3527 						}
   3528 					}
   3529 				}
   3530 			}
   3531 		}
   3532 		/*
   3533 		 * JRI: TODO: remove code for HTNA algo. CMT's
   3534 		 * SFR algo covers HTNA.
   3535 		 */
   3536 	} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
   3537 	                       biggest_tsn_newly_acked)) {
   3538 		/*
   3539 		 * We don't strike these: This is the  HTNA
   3540 		 * algorithm i.e. we don't strike If our TSN is
   3541 		 * larger than the Highest TSN Newly Acked.
   3542 		 */
   3543 		;
   3544 	} else {
   3545 		/* Strike the TSN */
   3546 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3547 			sctp_log_fr(biggest_tsn_newly_acked,
   3548 				    tp1->rec.data.tsn,
   3549 				    tp1->sent,
   3550 				    SCTP_FR_LOG_STRIKE_CHUNK);
   3551 		}
   3552 		if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   3553 			tp1->sent++;
   3554 		}
   3555 		if ((asoc->sctp_cmt_on_off > 0) &&
   3556 		    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
   3557 			/*
   3558 			 * CMT DAC algorithm: If SACK flag is set to
   3559 			 * 0, then lowest_newack test will not pass
   3560 			 * because it would have been set to the
   3561 			 * cumack earlier. If not already to be
   3562 			 * rtx'd, If not a mixed sack and if tp1 is
   3563 			 * not between two sacked TSNs, then mark by
   3564 			 * one more.
   3565 			 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
   3566 			 * two packets have been received after this missing TSN.
   3567 			 */
   3568 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
   3569 			    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
   3570 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3571 					sctp_log_fr(48 + num_dests_sacked,
   3572 						    tp1->rec.data.tsn,
   3573 						    tp1->sent,
   3574 						    SCTP_FR_LOG_STRIKE_CHUNK);
   3575 				}
   3576 				tp1->sent++;
   3577 			}
   3578 		}
   3579 	}
   3580 	if (tp1->sent == SCTP_DATAGRAM_RESEND) {
   3581 		struct sctp_nets *alt;
   3582 
   3583 		/* fix counts and things */
   3584 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   3585 			sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
   3586 				       (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
   3587 				       tp1->book_size,
   3588 				       (uint32_t)(uintptr_t)tp1->whoTo,
   3589 				       tp1->rec.data.tsn);
   3590 		}
   3591 		if (tp1->whoTo) {
   3592 			tp1->whoTo->net_ack++;
   3593 			sctp_flight_size_decrease(tp1);
   3594 			if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
   3595 				(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
   3596 											     tp1);
   3597 			}
   3598 		}
   3599 
   3600 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
   3601 			sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
   3602 				      asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
   3603 		}
   3604 		/* add back to the rwnd */
   3605 		asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
   3606 
   3607 		/* remove from the total flight */
   3608 		sctp_total_flight_decrease(stcb, tp1);
   3609 
   3610 		if ((stcb->asoc.prsctp_supported) &&
   3611 		    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
   3612 			/* Has it been retransmitted tv_sec times? - we store the retran count there. */
   3613 			if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
   3614 				/* Yes, so drop it */
   3615 				if (tp1->data != NULL) {
   3616 					(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
   3617 									 SCTP_SO_NOT_LOCKED);
   3618 				}
   3619 				/* Make sure to flag we had a FR */
   3620 				if (tp1->whoTo != NULL) {
   3621 					tp1->whoTo->net_ack++;
   3622 				}
   3623 				continue;
   3624 			}
   3625 		}
   3626 		/* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
   3627 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3628 			sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
   3629 				    0, SCTP_FR_MARKED);
   3630 		}
   3631 		if (strike_flag) {
   3632 			/* This is a subsequent FR */
   3633 			SCTP_STAT_INCR(sctps_sendmultfastretrans);
   3634 		}
   3635 		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
   3636 		if (asoc->sctp_cmt_on_off > 0) {
   3637 			/*
   3638 			 * CMT: Using RTX_SSTHRESH policy for CMT.
   3639 			 * If CMT is being used, then pick dest with
   3640 			 * largest ssthresh for any retransmission.
   3641 			 */
   3642 			tp1->no_fr_allowed = 1;
   3643 			alt = tp1->whoTo;
   3644 			/*sa_ignore NO_NULL_CHK*/
   3645 			if (asoc->sctp_cmt_pf > 0) {
   3646 				/* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
   3647 				alt = sctp_find_alternate_net(stcb, alt, 2);
   3648 			} else {
   3649 				/* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
   3650                                        /*sa_ignore NO_NULL_CHK*/
   3651 				alt = sctp_find_alternate_net(stcb, alt, 1);
   3652 			}
   3653 			if (alt == NULL) {
   3654 				alt = tp1->whoTo;
   3655 			}
   3656 			/*
   3657 			 * CUCv2: If a different dest is picked for
   3658 			 * the retransmission, then new
   3659 			 * (rtx-)pseudo_cumack needs to be tracked
   3660 			 * for orig dest. Let CUCv2 track new (rtx-)
   3661 			 * pseudo-cumack always.
   3662 			 */
   3663 			if (tp1->whoTo) {
   3664 				tp1->whoTo->find_pseudo_cumack = 1;
   3665 				tp1->whoTo->find_rtx_pseudo_cumack = 1;
   3666 			}
   3667 		} else {/* CMT is OFF */
   3668 #ifdef SCTP_FR_TO_ALTERNATE
   3669 			/* Can we find an alternate? */
   3670 			alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
   3671 #else
   3672 			/*
   3673 			 * default behavior is to NOT retransmit
   3674 			 * FR's to an alternate. Armando Caro's
   3675 			 * paper details why.
   3676 			 */
   3677 			alt = tp1->whoTo;
   3678 #endif
   3679 		}
   3680 
   3681 		tp1->rec.data.doing_fast_retransmit = 1;
   3682 		/* mark the sending seq for possible subsequent FR's */
   3683 		/*
   3684 		 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
   3685 		 * (uint32_t)tpi->rec.data.tsn);
   3686 		 */
   3687 		if (TAILQ_EMPTY(&asoc->send_queue)) {
   3688 			/*
   3689 			 * If the queue of send is empty then its
   3690 			 * the next sequence number that will be
   3691 			 * assigned so we subtract one from this to
   3692 			 * get the one we last sent.
   3693 			 */
   3694 			tp1->rec.data.fast_retran_tsn = sending_seq;
   3695 		} else {
   3696 			/*
   3697 			 * If there are chunks on the send queue
   3698 			 * (unsent data that has made it from the
   3699 			 * stream queues but not out the door, we
   3700 			 * take the first one (which will have the
   3701 			 * lowest TSN) and subtract one to get the
   3702 			 * one we last sent.
   3703 			 */
   3704 			struct sctp_tmit_chunk *ttt;
   3705 
   3706 			ttt = TAILQ_FIRST(&asoc->send_queue);
   3707 			tp1->rec.data.fast_retran_tsn =
   3708 				ttt->rec.data.tsn;
   3709 		}
   3710 
   3711 		if (tp1->do_rtt) {
   3712 			/*
   3713 			 * this guy had a RTO calculation pending on
   3714 			 * it, cancel it
   3715 			 */
   3716 			if ((tp1->whoTo != NULL) &&
   3717 			    (tp1->whoTo->rto_needed == 0)) {
   3718 				tp1->whoTo->rto_needed = 1;
   3719 			}
   3720 			tp1->do_rtt = 0;
   3721 		}
   3722 		if (alt != tp1->whoTo) {
   3723 			/* yes, there is an alternate. */
   3724 			sctp_free_remote_addr(tp1->whoTo);
   3725 			/*sa_ignore FREED_MEMORY*/
   3726 			tp1->whoTo = alt;
   3727 			atomic_add_int(&alt->ref_count, 1);
   3728 		}
   3729 	}
   3730 }
   3731 }
   3732 
   3733 struct sctp_tmit_chunk *
   3734 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
   3735    struct sctp_association *asoc)
   3736 {
   3737 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
   3738 struct timeval now;
   3739 int now_filled = 0;
   3740 
   3741 if (asoc->prsctp_supported == 0) {
   3742 	return (NULL);
   3743 }
   3744 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
   3745 	if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
   3746 	    tp1->sent != SCTP_DATAGRAM_RESEND &&
   3747 	    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
   3748 		/* no chance to advance, out of here */
   3749 		break;
   3750 	}
   3751 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
   3752 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
   3753 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
   3754 			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
   3755 				       asoc->advanced_peer_ack_point,
   3756 				       tp1->rec.data.tsn, 0, 0);
   3757 		}
   3758 	}
   3759 	if (!PR_SCTP_ENABLED(tp1->flags)) {
   3760 		/*
   3761 		 * We can't fwd-tsn past any that are reliable aka
   3762 		 * retransmitted until the asoc fails.
   3763 		 */
   3764 		break;
   3765 	}
   3766 	if (!now_filled) {
   3767 		(void)SCTP_GETTIME_TIMEVAL(&now);
   3768 		now_filled = 1;
   3769 	}
   3770 	/*
   3771 	 * now we got a chunk which is marked for another
   3772 	 * retransmission to a PR-stream but has run out its chances
   3773 	 * already maybe OR has been marked to skip now. Can we skip
   3774 	 * it if its a resend?
   3775 	 */
   3776 	if (tp1->sent == SCTP_DATAGRAM_RESEND &&
   3777 	    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
   3778 		/*
   3779 		 * Now is this one marked for resend and its time is
   3780 		 * now up?
   3781 		 */
   3782 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
   3783 		if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
   3784 #else
   3785 		if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
   3786 #endif
   3787 			/* Yes so drop it */
   3788 			if (tp1->data) {
   3789 				(void)sctp_release_pr_sctp_chunk(stcb, tp1,
   3790 				    1, SCTP_SO_NOT_LOCKED);
   3791 			}
   3792 		} else {
   3793 			/*
   3794 			 * No, we are done when hit one for resend
   3795 			 * whos time as not expired.
   3796 			 */
   3797 			break;
   3798 		}
   3799 	}
   3800 	/*
   3801 	 * Ok now if this chunk is marked to drop it we can clean up
   3802 	 * the chunk, advance our peer ack point and we can check
   3803 	 * the next chunk.
   3804 	 */
   3805 	if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
   3806 	    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
   3807 		/* advance PeerAckPoint goes forward */
   3808 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
   3809 			asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
   3810 			a_adv = tp1;
   3811 		} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
   3812 			/* No update but we do save the chk */
   3813 			a_adv = tp1;
   3814 		}
   3815 	} else {
   3816 		/*
   3817 		 * If it is still in RESEND we can advance no
   3818 		 * further
   3819 		 */
   3820 		break;
   3821 	}
   3822 }
   3823 return (a_adv);
   3824 }
   3825 
   3826 static int
   3827 sctp_fs_audit(struct sctp_association *asoc)
   3828 {
   3829 struct sctp_tmit_chunk *chk;
   3830 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
   3831 int ret;
   3832 #ifndef INVARIANTS
   3833 int entry_flight, entry_cnt;
   3834 #endif
   3835 
   3836 ret = 0;
   3837 #ifndef INVARIANTS
   3838 entry_flight = asoc->total_flight;
   3839 entry_cnt = asoc->total_flight_count;
   3840 #endif
   3841 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
   3842 	return (0);
   3843 
   3844 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
   3845 	if (chk->sent < SCTP_DATAGRAM_RESEND) {
   3846 		SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
   3847 		            chk->rec.data.tsn,
   3848 		            chk->send_size,
   3849 		            chk->snd_count);
   3850 		inflight++;
   3851 	} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
   3852 		resend++;
   3853 	} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
   3854 		inbetween++;
   3855 	} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
   3856 		above++;
   3857 	} else {
   3858 		acked++;
   3859 	}
   3860 }
   3861 
   3862 if ((inflight > 0) || (inbetween > 0)) {
   3863 #ifdef INVARIANTS
   3864 	panic("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d",
   3865 	      inflight, inbetween, resend, above, acked);
   3866 #else
   3867 	SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
   3868 	            entry_flight, entry_cnt);
   3869 	SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
   3870 	            inflight, inbetween, resend, above, acked);
   3871 	ret = 1;
   3872 #endif
   3873 }
   3874 return (ret);
   3875 }
   3876 
   3877 static void
   3878 sctp_window_probe_recovery(struct sctp_tcb *stcb,
   3879                           struct sctp_association *asoc,
   3880                           struct sctp_tmit_chunk *tp1)
   3881 {
   3882 tp1->window_probe = 0;
   3883 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
   3884 	/* TSN's skipped we do NOT move back. */
   3885 	sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
   3886 		       tp1->whoTo ? tp1->whoTo->flight_size : 0,
   3887 		       tp1->book_size,
   3888 		       (uint32_t)(uintptr_t)tp1->whoTo,
   3889 		       tp1->rec.data.tsn);
   3890 	return;
   3891 }
   3892 /* First setup this by shrinking flight */
   3893 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
   3894 	(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
   3895 								     tp1);
   3896 }
   3897 sctp_flight_size_decrease(tp1);
   3898 sctp_total_flight_decrease(stcb, tp1);
   3899 /* Now mark for resend */
   3900 tp1->sent = SCTP_DATAGRAM_RESEND;
   3901 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
   3902 
   3903 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   3904 	sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
   3905 		       tp1->whoTo->flight_size,
   3906 		       tp1->book_size,
   3907 		       (uint32_t)(uintptr_t)tp1->whoTo,
   3908 		       tp1->rec.data.tsn);
   3909 }
   3910 }
   3911 
   3912 void
   3913 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
   3914                         uint32_t rwnd, int *abort_now, int ecne_seen)
   3915 {
   3916 struct sctp_nets *net;
   3917 struct sctp_association *asoc;
   3918 struct sctp_tmit_chunk *tp1, *tp2;
   3919 uint32_t old_rwnd;
   3920 int win_probe_recovery = 0;
   3921 int win_probe_recovered = 0;
   3922 int j, done_once = 0;
   3923 int rto_ok = 1;
   3924 uint32_t send_s;
   3925 
   3926 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
   3927 	sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
   3928 	               rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
   3929 }
   3930 SCTP_TCB_LOCK_ASSERT(stcb);
   3931 #ifdef SCTP_ASOCLOG_OF_TSNS
   3932 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
   3933 stcb->asoc.cumack_log_at++;
   3934 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
   3935 	stcb->asoc.cumack_log_at = 0;
   3936 }
   3937 #endif
   3938 asoc = &stcb->asoc;
   3939 old_rwnd = asoc->peers_rwnd;
   3940 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
   3941 	/* old ack */
   3942 	return;
   3943 } else if (asoc->last_acked_seq == cumack) {
   3944 	/* Window update sack */
   3945 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
   3946 					    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
   3947 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
   3948 		/* SWS sender side engages */
   3949 		asoc->peers_rwnd = 0;
   3950 	}
   3951 	if (asoc->peers_rwnd > old_rwnd) {
   3952 		goto again;
   3953 	}
   3954 	return;
   3955 }
   3956 
   3957 /* First setup for CC stuff */
   3958 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   3959 	if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
   3960 		/* Drag along the window_tsn for cwr's */
   3961 		net->cwr_window_tsn = cumack;
   3962 	}
   3963 	net->prev_cwnd = net->cwnd;
   3964 	net->net_ack = 0;
   3965 	net->net_ack2 = 0;
   3966 
   3967 	/*
   3968 	 * CMT: Reset CUC and Fast recovery algo variables before
   3969 	 * SACK processing
   3970 	 */
   3971 	net->new_pseudo_cumack = 0;
   3972 	net->will_exit_fast_recovery = 0;
   3973 	if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
   3974 		(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
   3975 	}
   3976 }
   3977 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
   3978 	tp1 = TAILQ_LAST(&asoc->sent_queue,
   3979 			 sctpchunk_listhead);
   3980 	send_s = tp1->rec.data.tsn + 1;
   3981 } else {
   3982 	send_s = asoc->sending_seq;
   3983 }
   3984 if (SCTP_TSN_GE(cumack, send_s)) {
   3985 	struct mbuf *op_err;
   3986 	char msg[SCTP_DIAG_INFO_LEN];
   3987 
   3988 	*abort_now = 1;
   3989 	/* XXX */
   3990 	SCTP_SNPRINTF(msg, sizeof(msg),
   3991 	              "Cum ack %8.8x greater or equal than TSN %8.8x",
   3992 	              cumack, send_s);
   3993 	op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   3994 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
   3995 	sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   3996 	return;
   3997 }
   3998 asoc->this_sack_highest_gap = cumack;
   3999 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
   4000 	sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
   4001 		       stcb->asoc.overall_error_count,
   4002 		       0,
   4003 		       SCTP_FROM_SCTP_INDATA,
   4004 		       __LINE__);
   4005 }
   4006 stcb->asoc.overall_error_count = 0;
   4007 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
   4008 	/* process the new consecutive TSN first */
   4009 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
   4010 		if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
   4011 			if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
   4012 				SCTP_PRINTF("Warning, an unsent is now acked?\n");
   4013 			}
   4014 			if (tp1->sent < SCTP_DATAGRAM_ACKED) {
   4015 				/*
   4016 				 * If it is less than ACKED, it is
   4017 				 * now no-longer in flight. Higher
   4018 				 * values may occur during marking
   4019 				 */
   4020 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   4021 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   4022 						sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
   4023 							       tp1->whoTo->flight_size,
   4024 							       tp1->book_size,
   4025 							       (uint32_t)(uintptr_t)tp1->whoTo,
   4026 							       tp1->rec.data.tsn);
   4027 					}
   4028 					sctp_flight_size_decrease(tp1);
   4029 					if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
   4030 						(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
   4031 													     tp1);
   4032 					}
   4033 					/* sa_ignore NO_NULL_CHK */
   4034 					sctp_total_flight_decrease(stcb, tp1);
   4035 				}
   4036 				tp1->whoTo->net_ack += tp1->send_size;
   4037 				if (tp1->snd_count < 2) {
   4038 					/*
   4039 					 * True non-retransmitted
   4040 					 * chunk
   4041 					 */
   4042 					tp1->whoTo->net_ack2 +=
   4043 						tp1->send_size;
   4044 
   4045 					/* update RTO too? */
   4046 					if (tp1->do_rtt) {
   4047 						if (rto_ok &&
   4048 						    sctp_calculate_rto(stcb,
   4049 								       &stcb->asoc,
   4050 								       tp1->whoTo,
   4051 								       &tp1->sent_rcv_time,
   4052 								       SCTP_RTT_FROM_DATA)) {
   4053 							rto_ok = 0;
   4054 						}
   4055 						if (tp1->whoTo->rto_needed == 0) {
   4056 							tp1->whoTo->rto_needed = 1;
   4057 						}
   4058 						tp1->do_rtt = 0;
   4059 					}
   4060 				}
   4061 				/*
   4062 				 * CMT: CUCv2 algorithm. From the
   4063 				 * cumack'd TSNs, for each TSN being
   4064 				 * acked for the first time, set the
   4065 				 * following variables for the
   4066 				 * corresp destination.
   4067 				 * new_pseudo_cumack will trigger a
   4068 				 * cwnd update.
   4069 				 * find_(rtx_)pseudo_cumack will
   4070 				 * trigger search for the next
   4071 				 * expected (rtx-)pseudo-cumack.
   4072 				 */
   4073 				tp1->whoTo->new_pseudo_cumack = 1;
   4074 				tp1->whoTo->find_pseudo_cumack = 1;
   4075 				tp1->whoTo->find_rtx_pseudo_cumack = 1;
   4076 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   4077 					/* sa_ignore NO_NULL_CHK */
   4078 					sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
   4079 				}
   4080 			}
   4081 			if (tp1->sent == SCTP_DATAGRAM_RESEND) {
   4082 				sctp_ucount_decr(asoc->sent_queue_retran_cnt);
   4083 			}
   4084 			if (tp1->rec.data.chunk_was_revoked) {
   4085 				/* deflate the cwnd */
   4086 				tp1->whoTo->cwnd -= tp1->book_size;
   4087 				tp1->rec.data.chunk_was_revoked = 0;
   4088 			}
   4089 			if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
   4090 				if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
   4091 					asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
   4092 #ifdef INVARIANTS
   4093 				} else {
   4094 					panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
   4095 #endif
   4096 				}
   4097 			}
   4098 			if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
   4099 			    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
   4100 			    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
   4101 				asoc->trigger_reset = 1;
   4102 			}
   4103 			TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
   4104 			if (tp1->data) {
   4105 				/* sa_ignore NO_NULL_CHK */
   4106 				sctp_free_bufspace(stcb, asoc, tp1, 1);
   4107 				sctp_m_freem(tp1->data);
   4108 				tp1->data = NULL;
   4109 			}
   4110 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   4111 				sctp_log_sack(asoc->last_acked_seq,
   4112 					      cumack,
   4113 					      tp1->rec.data.tsn,
   4114 					      0,
   4115 					      0,
   4116 					      SCTP_LOG_FREE_SENT);
   4117 			}
   4118 			asoc->sent_queue_cnt--;
   4119 			sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
   4120 		} else {
   4121 			break;
   4122 		}
   4123 	}
   4124 }
   4125 #if defined(__Userspace__)
   4126 if (stcb->sctp_ep->recv_callback) {
   4127 	if (stcb->sctp_socket) {
   4128 		uint32_t inqueue_bytes, sb_free_now;
   4129 		struct sctp_inpcb *inp;
   4130 
   4131 		inp = stcb->sctp_ep;
   4132 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
   4133 		sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
   4134 
   4135 		/* check if the amount free in the send socket buffer crossed the threshold */
   4136 		if (inp->send_callback &&
   4137 		    (((inp->send_sb_threshold > 0) &&
   4138 		      (sb_free_now >= inp->send_sb_threshold) &&
   4139 		      (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
   4140 		     (inp->send_sb_threshold == 0))) {
   4141 			atomic_add_int(&stcb->asoc.refcnt, 1);
   4142 			SCTP_TCB_UNLOCK(stcb);
   4143 			inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
   4144 			SCTP_TCB_LOCK(stcb);
   4145 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
   4146 		}
   4147 	}
   4148 } else if (stcb->sctp_socket) {
   4149 #else
   4150 /* sa_ignore NO_NULL_CHK */
   4151 if (stcb->sctp_socket) {
   4152 #endif
   4153 #if defined(__APPLE__) && !defined(__Userspace__)
   4154 	struct socket *so;
   4155 
   4156 #endif
   4157 	SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
   4158 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
   4159 		/* sa_ignore NO_NULL_CHK */
   4160 		sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
   4161 	}
   4162 #if defined(__APPLE__) && !defined(__Userspace__)
   4163 	so = SCTP_INP_SO(stcb->sctp_ep);
   4164 	atomic_add_int(&stcb->asoc.refcnt, 1);
   4165 	SCTP_TCB_UNLOCK(stcb);
   4166 	SCTP_SOCKET_LOCK(so, 1);
   4167 	SCTP_TCB_LOCK(stcb);
   4168 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
   4169 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
   4170 		/* assoc was freed while we were unlocked */
   4171 		SCTP_SOCKET_UNLOCK(so, 1);
   4172 		return;
   4173 	}
   4174 #endif
   4175 	sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
   4176 #if defined(__APPLE__) && !defined(__Userspace__)
   4177 	SCTP_SOCKET_UNLOCK(so, 1);
   4178 #endif
   4179 } else {
   4180 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
   4181 		sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
   4182 	}
   4183 }
   4184 
   4185 /* JRS - Use the congestion control given in the CC module */
   4186 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
   4187 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4188 		if (net->net_ack2 > 0) {
   4189 			/*
   4190 			 * Karn's rule applies to clearing error count, this
   4191 			 * is optional.
   4192 			 */
   4193 			net->error_count = 0;
   4194 			if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
   4195 				/* addr came good */
   4196 				net->dest_state |= SCTP_ADDR_REACHABLE;
   4197 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
   4198 				                0, (void *)net, SCTP_SO_NOT_LOCKED);
   4199 			}
   4200 			if (net == stcb->asoc.primary_destination) {
   4201 				if (stcb->asoc.alternate) {
   4202 					/* release the alternate, primary is good */
   4203 					sctp_free_remote_addr(stcb->asoc.alternate);
   4204 					stcb->asoc.alternate = NULL;
   4205 				}
   4206 			}
   4207 			if (net->dest_state & SCTP_ADDR_PF) {
   4208 				net->dest_state &= ~SCTP_ADDR_PF;
   4209 				sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
   4210 				                stcb->sctp_ep, stcb, net,
   4211 				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
   4212 				sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
   4213 				asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
   4214 				/* Done with this net */
   4215 				net->net_ack = 0;
   4216 			}
   4217 			/* restore any doubled timers */
   4218 			net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
   4219 			if (net->RTO < stcb->asoc.minrto) {
   4220 				net->RTO = stcb->asoc.minrto;
   4221 			}
   4222 			if (net->RTO > stcb->asoc.maxrto) {
   4223 				net->RTO = stcb->asoc.maxrto;
   4224 			}
   4225 		}
   4226 	}
   4227 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
   4228 }
   4229 asoc->last_acked_seq = cumack;
   4230 
   4231 if (TAILQ_EMPTY(&asoc->sent_queue)) {
   4232 	/* nothing left in-flight */
   4233 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4234 		net->flight_size = 0;
   4235 		net->partial_bytes_acked = 0;
   4236 	}
   4237 	asoc->total_flight = 0;
   4238 	asoc->total_flight_count = 0;
   4239 }
   4240 
   4241 /* RWND update */
   4242 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
   4243 				    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
   4244 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
   4245 	/* SWS sender side engages */
   4246 	asoc->peers_rwnd = 0;
   4247 }
   4248 if (asoc->peers_rwnd > old_rwnd) {
   4249 	win_probe_recovery = 1;
   4250 }
   4251 /* Now assure a timer where data is queued at */
   4252 again:
   4253 j = 0;
   4254 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4255 	if (win_probe_recovery && (net->window_probe)) {
   4256 		win_probe_recovered = 1;
   4257 		/*
   4258 		 * Find first chunk that was used with window probe
   4259 		 * and clear the sent
   4260 		 */
   4261 		/* sa_ignore FREED_MEMORY */
   4262 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   4263 			if (tp1->window_probe) {
   4264 				/* move back to data send queue */
   4265 				sctp_window_probe_recovery(stcb, asoc, tp1);
   4266 				break;
   4267 			}
   4268 		}
   4269 	}
   4270 	if (net->flight_size) {
   4271 		j++;
   4272 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
   4273 		if (net->window_probe) {
   4274 			net->window_probe = 0;
   4275 		}
   4276 	} else {
   4277 		if (net->window_probe) {
   4278 			/* In window probes we must assure a timer is still running there */
   4279 			net->window_probe = 0;
   4280 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   4281 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
   4282 			}
   4283 		} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   4284 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
   4285 			                stcb, net,
   4286 			                SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
   4287 		}
   4288 	}
   4289 }
   4290 if ((j == 0) &&
   4291     (!TAILQ_EMPTY(&asoc->sent_queue)) &&
   4292     (asoc->sent_queue_retran_cnt == 0) &&
   4293     (win_probe_recovered == 0) &&
   4294     (done_once == 0)) {
   4295 	/* huh, this should not happen unless all packets
   4296 	 * are PR-SCTP and marked to skip of course.
   4297 	 */
   4298 	if (sctp_fs_audit(asoc)) {
   4299 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4300 			net->flight_size = 0;
   4301 		}
   4302 		asoc->total_flight = 0;
   4303 		asoc->total_flight_count = 0;
   4304 		asoc->sent_queue_retran_cnt = 0;
   4305 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   4306 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   4307 				sctp_flight_size_increase(tp1);
   4308 				sctp_total_flight_increase(stcb, tp1);
   4309 			} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
   4310 				sctp_ucount_incr(asoc->sent_queue_retran_cnt);
   4311 			}
   4312 		}
   4313 	}
   4314 	done_once = 1;
   4315 	goto again;
   4316 }
   4317 /**********************************/
   4318 /* Now what about shutdown issues */
   4319 /**********************************/
   4320 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
   4321 	/* nothing left on sendqueue.. consider done */
   4322 	/* clean up */
   4323 	if ((asoc->stream_queue_cnt == 1) &&
   4324 	    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
   4325 	     (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
   4326 	    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
   4327 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
   4328 	}
   4329 	if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
   4330 	     (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
   4331 	    (asoc->stream_queue_cnt == 1) &&
   4332 	    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
   4333 		struct mbuf *op_err;
   4334 
   4335 		*abort_now = 1;
   4336 		/* XXX */
   4337 		op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
   4338 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
   4339 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   4340 		return;
   4341 	}
   4342 	if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
   4343 	    (asoc->stream_queue_cnt == 0)) {
   4344 		struct sctp_nets *netp;
   4345 
   4346 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
   4347 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
   4348 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
   4349 		}
   4350 		SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
   4351 		sctp_stop_timers_for_shutdown(stcb);
   4352 		if (asoc->alternate) {
   4353 			netp = asoc->alternate;
   4354 		} else {
   4355 			netp = asoc->primary_destination;
   4356 		}
   4357 		sctp_send_shutdown(stcb, netp);
   4358 		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
   4359 				 stcb->sctp_ep, stcb, netp);
   4360 		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
   4361 				 stcb->sctp_ep, stcb, NULL);
   4362 	} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
   4363 		   (asoc->stream_queue_cnt == 0)) {
   4364 		struct sctp_nets *netp;
   4365 
   4366 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
   4367 		SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
   4368 		sctp_stop_timers_for_shutdown(stcb);
   4369 		if (asoc->alternate) {
   4370 			netp = asoc->alternate;
   4371 		} else {
   4372 			netp = asoc->primary_destination;
   4373 		}
   4374 		sctp_send_shutdown_ack(stcb, netp);
   4375 		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
   4376 				 stcb->sctp_ep, stcb, netp);
   4377 	}
   4378 }
   4379 /*********************************************/
   4380 /* Here we perform PR-SCTP procedures        */
   4381 /* (section 4.2)                             */
   4382 /*********************************************/
   4383 /* C1. update advancedPeerAckPoint */
   4384 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
   4385 	asoc->advanced_peer_ack_point = cumack;
   4386 }
   4387 /* PR-Sctp issues need to be addressed too */
   4388 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
   4389 	struct sctp_tmit_chunk *lchk;
   4390 	uint32_t old_adv_peer_ack_point;
   4391 
   4392 	old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
   4393 	lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
   4394 	/* C3. See if we need to send a Fwd-TSN */
   4395 	if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
   4396 		/*
   4397 		 * ISSUE with ECN, see FWD-TSN processing.
   4398 		 */
   4399 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
   4400 			send_forward_tsn(stcb, asoc);
   4401 		} else if (lchk) {
   4402 			/* try to FR fwd-tsn's that get lost too */
   4403 			if (lchk->rec.data.fwd_tsn_cnt >= 3) {
   4404 				send_forward_tsn(stcb, asoc);
   4405 			}
   4406 		}
   4407 	}
   4408 	for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
   4409 		if (lchk->whoTo != NULL) {
   4410 			break;
   4411 		}
   4412 	}
   4413 	if (lchk != NULL) {
   4414 		/* Assure a timer is up */
   4415 		sctp_timer_start(SCTP_TIMER_TYPE_SEND,
   4416 		                 stcb->sctp_ep, stcb, lchk->whoTo);
   4417 	}
   4418 }
   4419 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
   4420 	sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
   4421 		       rwnd,
   4422 		       stcb->asoc.peers_rwnd,
   4423 		       stcb->asoc.total_flight,
   4424 		       stcb->asoc.total_output_queue_size);
   4425 }
   4426 }
   4427 
   4428 void
   4429 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
   4430                 struct sctp_tcb *stcb,
   4431                 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
   4432                 int *abort_now, uint8_t flags,
   4433                 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
   4434 {
   4435 struct sctp_association *asoc;
   4436 struct sctp_tmit_chunk *tp1, *tp2;
   4437 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
   4438 uint16_t wake_him = 0;
   4439 uint32_t send_s = 0;
   4440 long j;
   4441 int accum_moved = 0;
   4442 int will_exit_fast_recovery = 0;
   4443 uint32_t a_rwnd, old_rwnd;
   4444 int win_probe_recovery = 0;
   4445 int win_probe_recovered = 0;
   4446 struct sctp_nets *net = NULL;
   4447 int done_once;
   4448 int rto_ok = 1;
   4449 uint8_t reneged_all = 0;
   4450 uint8_t cmt_dac_flag;
   4451 /*
   4452  * we take any chance we can to service our queues since we cannot
   4453  * get awoken when the socket is read from :<
   4454  */
   4455 /*
   4456  * Now perform the actual SACK handling: 1) Verify that it is not an
   4457  * old sack, if so discard. 2) If there is nothing left in the send
   4458  * queue (cum-ack is equal to last acked) then you have a duplicate
   4459  * too, update any rwnd change and verify no timers are running.
   4460  * then return. 3) Process any new consecutive data i.e. cum-ack
   4461  * moved process these first and note that it moved. 4) Process any
   4462  * sack blocks. 5) Drop any acked from the queue. 6) Check for any
   4463  * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
   4464  * sync up flightsizes and things, stop all timers and also check
   4465  * for shutdown_pending state. If so then go ahead and send off the
   4466  * shutdown. If in shutdown recv, send off the shutdown-ack and
   4467  * start that timer, Ret. 9) Strike any non-acked things and do FR
   4468  * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
   4469  * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
   4470  * if in shutdown_recv state.
   4471  */
   4472 SCTP_TCB_LOCK_ASSERT(stcb);
   4473 /* CMT DAC algo */
   4474 this_sack_lowest_newack = 0;
   4475 SCTP_STAT_INCR(sctps_slowpath_sack);
   4476 last_tsn = cum_ack;
   4477 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
   4478 #ifdef SCTP_ASOCLOG_OF_TSNS
   4479 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
   4480 stcb->asoc.cumack_log_at++;
   4481 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
   4482 	stcb->asoc.cumack_log_at = 0;
   4483 }
   4484 #endif
   4485 a_rwnd = rwnd;
   4486 
   4487 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
   4488 	sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
   4489 	               rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
   4490 }
   4491 
   4492 old_rwnd = stcb->asoc.peers_rwnd;
   4493 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
   4494 	sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
   4495 	               stcb->asoc.overall_error_count,
   4496 	               0,
   4497 	               SCTP_FROM_SCTP_INDATA,
   4498 	               __LINE__);
   4499 }
   4500 stcb->asoc.overall_error_count = 0;
   4501 asoc = &stcb->asoc;
   4502 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   4503 	sctp_log_sack(asoc->last_acked_seq,
   4504 	              cum_ack,
   4505 	              0,
   4506 	              num_seg,
   4507 	              num_dup,
   4508 	              SCTP_LOG_NEW_SACK);
   4509 }
   4510 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
   4511 	uint16_t i;
   4512 	uint32_t *dupdata, dblock;
   4513 
   4514 	for (i = 0; i < num_dup; i++) {
   4515 		dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
   4516 		                                    sizeof(uint32_t), (uint8_t *)&dblock);
   4517 		if (dupdata == NULL) {
   4518 			break;
   4519 		}
   4520 		sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
   4521 	}
   4522 }
   4523 /* reality check */
   4524 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
   4525 	tp1 = TAILQ_LAST(&asoc->sent_queue,
   4526 			 sctpchunk_listhead);
   4527 	send_s = tp1->rec.data.tsn + 1;
   4528 } else {
   4529 	tp1 = NULL;
   4530 	send_s = asoc->sending_seq;
   4531 }
   4532 if (SCTP_TSN_GE(cum_ack, send_s)) {
   4533 	struct mbuf *op_err;
   4534 	char msg[SCTP_DIAG_INFO_LEN];
   4535 
   4536 	/*
   4537 	 * no way, we have not even sent this TSN out yet.
   4538 	 * Peer is hopelessly messed up with us.
   4539 	 */
   4540 	SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
   4541 		    cum_ack, send_s);
   4542 	if (tp1) {
   4543 		SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
   4544 			    tp1->rec.data.tsn, (void *)tp1);
   4545 	}
   4546 hopeless_peer:
   4547 	*abort_now = 1;
   4548 	/* XXX */
   4549 	SCTP_SNPRINTF(msg, sizeof(msg),
   4550 	              "Cum ack %8.8x greater or equal than TSN %8.8x",
   4551 	              cum_ack, send_s);
   4552 	op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   4553 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
   4554 	sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   4555 	return;
   4556 }
   4557 /**********************/
   4558 /* 1) check the range */
   4559 /**********************/
   4560 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
   4561 	/* acking something behind */
   4562 	return;
   4563 }
   4564 
   4565 /* update the Rwnd of the peer */
   4566 if (TAILQ_EMPTY(&asoc->sent_queue) &&
   4567     TAILQ_EMPTY(&asoc->send_queue) &&
   4568     (asoc->stream_queue_cnt == 0)) {
   4569 	/* nothing left on send/sent and strmq */
   4570 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
   4571 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
   4572 		                  asoc->peers_rwnd, 0, 0, a_rwnd);
   4573 	}
   4574 	asoc->peers_rwnd = a_rwnd;
   4575 	if (asoc->sent_queue_retran_cnt) {
   4576 		asoc->sent_queue_retran_cnt = 0;
   4577 	}
   4578 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
   4579 		/* SWS sender side engages */
   4580 		asoc->peers_rwnd = 0;
   4581 	}
   4582 	/* stop any timers */
   4583 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4584 		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
   4585 		                stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
   4586 		net->partial_bytes_acked = 0;
   4587 		net->flight_size = 0;
   4588 	}
   4589 	asoc->total_flight = 0;
   4590 	asoc->total_flight_count = 0;
   4591 	return;
   4592 }
   4593 /*
   4594  * We init netAckSz and netAckSz2 to 0. These are used to track 2
   4595  * things. The total byte count acked is tracked in netAckSz AND
   4596  * netAck2 is used to track the total bytes acked that are un-
   4597  * ambiguous and were never retransmitted. We track these on a per
   4598  * destination address basis.
   4599  */
   4600 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4601 	if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
   4602 		/* Drag along the window_tsn for cwr's */
   4603 		net->cwr_window_tsn = cum_ack;
   4604 	}
   4605 	net->prev_cwnd = net->cwnd;
   4606 	net->net_ack = 0;
   4607 	net->net_ack2 = 0;
   4608 
   4609 	/*
   4610 	 * CMT: Reset CUC and Fast recovery algo variables before
   4611 	 * SACK processing
   4612 	 */
   4613 	net->new_pseudo_cumack = 0;
   4614 	net->will_exit_fast_recovery = 0;
   4615 	if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
   4616 		(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
   4617 	}
   4618 
   4619 	/*
   4620 	 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
   4621 	 * to be greater than the cumack. Also reset saw_newack to 0
   4622 	 * for all dests.
   4623 	 */
   4624 	net->saw_newack = 0;
   4625 	net->this_sack_highest_newack = last_tsn;
   4626 }
   4627 /* process the new consecutive TSN first */
   4628 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   4629 	if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
   4630 		if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
   4631 			accum_moved = 1;
   4632 			if (tp1->sent < SCTP_DATAGRAM_ACKED) {
   4633 				/*
   4634 				 * If it is less than ACKED, it is
   4635 				 * now no-longer in flight. Higher
   4636 				 * values may occur during marking
   4637 				 */
   4638 				if ((tp1->whoTo->dest_state &
   4639 				     SCTP_ADDR_UNCONFIRMED) &&
   4640 				    (tp1->snd_count < 2)) {
   4641 					/*
   4642 					 * If there was no retran
   4643 					 * and the address is
   4644 					 * un-confirmed and we sent
   4645 					 * there and are now
   4646 					 * sacked.. its confirmed,
   4647 					 * mark it so.
   4648 					 */
   4649 					tp1->whoTo->dest_state &=
   4650 						~SCTP_ADDR_UNCONFIRMED;
   4651 				}
   4652 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   4653 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   4654 						sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
   4655 						               tp1->whoTo->flight_size,
   4656 						               tp1->book_size,
   4657 						               (uint32_t)(uintptr_t)tp1->whoTo,
   4658 						               tp1->rec.data.tsn);
   4659 					}
   4660 					sctp_flight_size_decrease(tp1);
   4661 					sctp_total_flight_decrease(stcb, tp1);
   4662 					if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
   4663 						(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
   4664 													     tp1);
   4665 					}
   4666 				}
   4667 				tp1->whoTo->net_ack += tp1->send_size;
   4668 
   4669 				/* CMT SFR and DAC algos */
   4670 				this_sack_lowest_newack = tp1->rec.data.tsn;
   4671 				tp1->whoTo->saw_newack = 1;
   4672 
   4673 				if (tp1->snd_count < 2) {
   4674 					/*
   4675 					 * True non-retransmitted
   4676 					 * chunk
   4677 					 */
   4678 					tp1->whoTo->net_ack2 +=
   4679 						tp1->send_size;
   4680 
   4681 					/* update RTO too? */
   4682 					if (tp1->do_rtt) {
   4683 						if (rto_ok &&
   4684 						    sctp_calculate_rto(stcb,
   4685 								       &stcb->asoc,
   4686 								       tp1->whoTo,
   4687 								       &tp1->sent_rcv_time,
   4688 								       SCTP_RTT_FROM_DATA)) {
   4689 							rto_ok = 0;
   4690 						}
   4691 						if (tp1->whoTo->rto_needed == 0) {
   4692 							tp1->whoTo->rto_needed = 1;
   4693 						}
   4694 						tp1->do_rtt = 0;
   4695 					}
   4696 				}
   4697 				/*
   4698 				 * CMT: CUCv2 algorithm. From the
   4699 				 * cumack'd TSNs, for each TSN being
   4700 				 * acked for the first time, set the
   4701 				 * following variables for the
   4702 				 * corresp destination.
   4703 				 * new_pseudo_cumack will trigger a
   4704 				 * cwnd update.
   4705 				 * find_(rtx_)pseudo_cumack will
   4706 				 * trigger search for the next
   4707 				 * expected (rtx-)pseudo-cumack.
   4708 				 */
   4709 				tp1->whoTo->new_pseudo_cumack = 1;
   4710 				tp1->whoTo->find_pseudo_cumack = 1;
   4711 				tp1->whoTo->find_rtx_pseudo_cumack = 1;
   4712 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   4713 					sctp_log_sack(asoc->last_acked_seq,
   4714 					              cum_ack,
   4715 					              tp1->rec.data.tsn,
   4716 					              0,
   4717 					              0,
   4718 					              SCTP_LOG_TSN_ACKED);
   4719 				}
   4720 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   4721 					sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
   4722 				}
   4723 			}
   4724 			if (tp1->sent == SCTP_DATAGRAM_RESEND) {
   4725 				sctp_ucount_decr(asoc->sent_queue_retran_cnt);
   4726 #ifdef SCTP_AUDITING_ENABLED
   4727 				sctp_audit_log(0xB3,
   4728 				               (asoc->sent_queue_retran_cnt & 0x000000ff));
   4729 #endif
   4730 			}
   4731 			if (tp1->rec.data.chunk_was_revoked) {
   4732 				/* deflate the cwnd */
   4733 				tp1->whoTo->cwnd -= tp1->book_size;
   4734 				tp1->rec.data.chunk_was_revoked = 0;
   4735 			}
   4736 			if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
   4737 				tp1->sent = SCTP_DATAGRAM_ACKED;
   4738 			}
   4739 		}
   4740 	} else {
   4741 		break;
   4742 	}
   4743 }
   4744 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
   4745 /* always set this up to cum-ack */
   4746 asoc->this_sack_highest_gap = last_tsn;
   4747 
   4748 if ((num_seg > 0) || (num_nr_seg > 0)) {
   4749 	/*
   4750 	 * thisSackHighestGap will increase while handling NEW
   4751 	 * segments this_sack_highest_newack will increase while
   4752 	 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
   4753 	 * used for CMT DAC algo. saw_newack will also change.
   4754 	 */
   4755 	if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
   4756 		&biggest_tsn_newly_acked, &this_sack_lowest_newack,
   4757 		num_seg, num_nr_seg, &rto_ok)) {
   4758 		wake_him++;
   4759 	}
   4760 	/*
   4761 	 * validate the biggest_tsn_acked in the gap acks if
   4762 	 * strict adherence is wanted.
   4763 	 */
   4764 	if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
   4765 		/*
   4766 		 * peer is either confused or we are under
   4767 		 * attack. We must abort.
   4768 		 */
   4769 		SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
   4770 			    biggest_tsn_acked, send_s);
   4771 		goto hopeless_peer;
   4772 	}
   4773 }
   4774 /*******************************************/
   4775 /* cancel ALL T3-send timer if accum moved */
   4776 /*******************************************/
   4777 if (asoc->sctp_cmt_on_off > 0) {
   4778 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4779 		if (net->new_pseudo_cumack)
   4780 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
   4781 			                stcb, net,
   4782 			                SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
   4783 	}
   4784 } else {
   4785 	if (accum_moved) {
   4786 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4787 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
   4788 			                stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
   4789 		}
   4790 	}
   4791 }
   4792 /********************************************/
   4793 /* drop the acked chunks from the sentqueue */
   4794 /********************************************/
   4795 asoc->last_acked_seq = cum_ack;
   4796 
   4797 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
   4798 	if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
   4799 		break;
   4800 	}
   4801 	if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
   4802 		if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
   4803 			asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
   4804 #ifdef INVARIANTS
   4805 		} else {
   4806 			panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
   4807 #endif
   4808 		}
   4809 	}
   4810 	if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
   4811 	    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
   4812 	    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
   4813 		asoc->trigger_reset = 1;
   4814 	}
   4815 	TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
   4816 	if (PR_SCTP_ENABLED(tp1->flags)) {
   4817 		if (asoc->pr_sctp_cnt != 0)
   4818 			asoc->pr_sctp_cnt--;
   4819 	}
   4820 	asoc->sent_queue_cnt--;
   4821 	if (tp1->data) {
   4822 		/* sa_ignore NO_NULL_CHK */
   4823 		sctp_free_bufspace(stcb, asoc, tp1, 1);
   4824 		sctp_m_freem(tp1->data);
   4825 		tp1->data = NULL;
   4826 		if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
   4827 			asoc->sent_queue_cnt_removeable--;
   4828 		}
   4829 	}
   4830 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   4831 		sctp_log_sack(asoc->last_acked_seq,
   4832 		              cum_ack,
   4833 		              tp1->rec.data.tsn,
   4834 		              0,
   4835 		              0,
   4836 		              SCTP_LOG_FREE_SENT);
   4837 	}
   4838 	sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
   4839 	wake_him++;
   4840 }
   4841 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
   4842 #ifdef INVARIANTS
   4843 	panic("Warning flight size is positive and should be 0");
   4844 #else
   4845 	SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
   4846 	            asoc->total_flight);
   4847 #endif
   4848 	asoc->total_flight = 0;
   4849 }
   4850 
   4851 #if defined(__Userspace__)
   4852 if (stcb->sctp_ep->recv_callback) {
   4853 	if (stcb->sctp_socket) {
   4854 		uint32_t inqueue_bytes, sb_free_now;
   4855 		struct sctp_inpcb *inp;
   4856 
   4857 		inp = stcb->sctp_ep;
   4858 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
   4859 		sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
   4860 
   4861 		/* check if the amount free in the send socket buffer crossed the threshold */
   4862 		if (inp->send_callback &&
   4863 		   (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
   4864 		    (inp->send_sb_threshold == 0))) {
   4865 			atomic_add_int(&stcb->asoc.refcnt, 1);
   4866 			SCTP_TCB_UNLOCK(stcb);
   4867 			inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
   4868 			SCTP_TCB_LOCK(stcb);
   4869 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
   4870 		}
   4871 	}
   4872 } else if ((wake_him) && (stcb->sctp_socket)) {
   4873 #else
   4874 /* sa_ignore NO_NULL_CHK */
   4875 if ((wake_him) && (stcb->sctp_socket)) {
   4876 #endif
   4877 #if defined(__APPLE__) && !defined(__Userspace__)
   4878 	struct socket *so;
   4879 
   4880 #endif
   4881 	SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
   4882 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
   4883 		sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
   4884 	}
   4885 #if defined(__APPLE__) && !defined(__Userspace__)
   4886 	so = SCTP_INP_SO(stcb->sctp_ep);
   4887 	atomic_add_int(&stcb->asoc.refcnt, 1);
   4888 	SCTP_TCB_UNLOCK(stcb);
   4889 	SCTP_SOCKET_LOCK(so, 1);
   4890 	SCTP_TCB_LOCK(stcb);
   4891 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
   4892 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
   4893 		/* assoc was freed while we were unlocked */
   4894 		SCTP_SOCKET_UNLOCK(so, 1);
   4895 		return;
   4896 	}
   4897 #endif
   4898 	sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
   4899 #if defined(__APPLE__) && !defined(__Userspace__)
   4900 	SCTP_SOCKET_UNLOCK(so, 1);
   4901 #endif
   4902 } else {
   4903 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
   4904 		sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
   4905 	}
   4906 }
   4907 
   4908 if (asoc->fast_retran_loss_recovery && accum_moved) {
   4909 	if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
   4910 		/* Setup so we will exit RFC2582 fast recovery */
   4911 		will_exit_fast_recovery = 1;
   4912 	}
   4913 }
   4914 /*
   4915  * Check for revoked fragments:
   4916  *
   4917  * if Previous sack - Had no frags then we can't have any revoked if
   4918  * Previous sack - Had frag's then - If we now have frags aka
   4919  * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
   4920  * some of them. else - The peer revoked all ACKED fragments, since
   4921  * we had some before and now we have NONE.
   4922  */
   4923 
   4924 if (num_seg) {
   4925 	sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
   4926 	asoc->saw_sack_with_frags = 1;
   4927 } else if (asoc->saw_sack_with_frags) {
   4928 	int cnt_revoked = 0;
   4929 
   4930 	/* Peer revoked all dg's marked or acked */
   4931 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   4932 		if (tp1->sent == SCTP_DATAGRAM_ACKED) {
   4933 			tp1->sent = SCTP_DATAGRAM_SENT;
   4934 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   4935 				sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
   4936 				               tp1->whoTo->flight_size,
   4937 				               tp1->book_size,
   4938 				               (uint32_t)(uintptr_t)tp1->whoTo,
   4939 				               tp1->rec.data.tsn);
   4940 			}
   4941 			sctp_flight_size_increase(tp1);
   4942 			sctp_total_flight_increase(stcb, tp1);
   4943 			tp1->rec.data.chunk_was_revoked = 1;
   4944 			/*
   4945 			 * To ensure that this increase in
   4946 			 * flightsize, which is artificial,
   4947 			 * does not throttle the sender, we
   4948 			 * also increase the cwnd
   4949 			 * artificially.
   4950 			 */
   4951 			tp1->whoTo->cwnd += tp1->book_size;
   4952 			cnt_revoked++;
   4953 		}
   4954 	}
   4955 	if (cnt_revoked) {
   4956 		reneged_all = 1;
   4957 	}
   4958 	asoc->saw_sack_with_frags = 0;
   4959 }
   4960 if (num_nr_seg > 0)
   4961 	asoc->saw_sack_with_nr_frags = 1;
   4962 else
   4963 	asoc->saw_sack_with_nr_frags = 0;
   4964 
   4965 /* JRS - Use the congestion control given in the CC module */
   4966 if (ecne_seen == 0) {
   4967 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4968 		if (net->net_ack2 > 0) {
   4969 			/*
   4970 			 * Karn's rule applies to clearing error count, this
   4971 			 * is optional.
   4972 			 */
   4973 			net->error_count = 0;
   4974 			if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
   4975 				/* addr came good */
   4976 				net->dest_state |= SCTP_ADDR_REACHABLE;
   4977 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
   4978 				                0, (void *)net, SCTP_SO_NOT_LOCKED);
   4979 			}
   4980 
   4981 			if (net == stcb->asoc.primary_destination) {
   4982 				if (stcb->asoc.alternate) {
   4983 					/* release the alternate, primary is good */
   4984 					sctp_free_remote_addr(stcb->asoc.alternate);
   4985 					stcb->asoc.alternate = NULL;
   4986 				}
   4987 			}
   4988 
   4989 			if (net->dest_state & SCTP_ADDR_PF) {
   4990 				net->dest_state &= ~SCTP_ADDR_PF;
   4991 				sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
   4992 				                stcb->sctp_ep, stcb, net,
   4993 				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
   4994 				sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
   4995 				asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
   4996 				/* Done with this net */
   4997 				net->net_ack = 0;
   4998 			}
   4999 			/* restore any doubled timers */
   5000 			net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
   5001 			if (net->RTO < stcb->asoc.minrto) {
   5002 				net->RTO = stcb->asoc.minrto;
   5003 			}
   5004 			if (net->RTO > stcb->asoc.maxrto) {
   5005 				net->RTO = stcb->asoc.maxrto;
   5006 			}
   5007 		}
   5008 	}
   5009 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
   5010 }
   5011 
   5012 if (TAILQ_EMPTY(&asoc->sent_queue)) {
   5013 	/* nothing left in-flight */
   5014 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   5015 		/* stop all timers */
   5016 		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
   5017 		                stcb, net,
   5018 		                SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
   5019 		net->flight_size = 0;
   5020 		net->partial_bytes_acked = 0;
   5021 	}
   5022 	asoc->total_flight = 0;
   5023 	asoc->total_flight_count = 0;
   5024 }
   5025 
   5026 /**********************************/
   5027 /* Now what about shutdown issues */
   5028 /**********************************/
   5029 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
   5030 	/* nothing left on sendqueue.. consider done */
   5031 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
   5032 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
   5033 		                  asoc->peers_rwnd, 0, 0, a_rwnd);
   5034 	}
   5035 	asoc->peers_rwnd = a_rwnd;
   5036 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
   5037 		/* SWS sender side engages */
   5038 		asoc->peers_rwnd = 0;
   5039 	}
   5040 	/* clean up */
   5041 	if ((asoc->stream_queue_cnt == 1) &&
   5042 	    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
   5043 	     (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
   5044 	    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
   5045 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
   5046 	}
   5047 	if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
   5048 	     (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
   5049 	    (asoc->stream_queue_cnt == 1) &&
   5050 	    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
   5051 		struct mbuf *op_err;
   5052 
   5053 		*abort_now = 1;
   5054 		/* XXX */
   5055 		op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
   5056 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
   5057 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   5058 		return;
   5059 	}
   5060 	if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
   5061 	    (asoc->stream_queue_cnt == 0)) {
   5062 		struct sctp_nets *netp;
   5063 
   5064 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
   5065 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
   5066 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
   5067 		}
   5068 		SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
   5069 		sctp_stop_timers_for_shutdown(stcb);
   5070 		if (asoc->alternate) {
   5071 			netp = asoc->alternate;
   5072 		} else {
   5073 			netp = asoc->primary_destination;
   5074 		}
   5075 		sctp_send_shutdown(stcb, netp);
   5076 		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
   5077 				 stcb->sctp_ep, stcb, netp);
   5078 		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
   5079 				 stcb->sctp_ep, stcb, NULL);
   5080 		return;
   5081 	} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
   5082 		   (asoc->stream_queue_cnt == 0)) {
   5083 		struct sctp_nets *netp;
   5084 
   5085 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
   5086 		SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
   5087 		sctp_stop_timers_for_shutdown(stcb);
   5088 		if (asoc->alternate) {
   5089 			netp = asoc->alternate;
   5090 		} else {
   5091 			netp = asoc->primary_destination;
   5092 		}
   5093 		sctp_send_shutdown_ack(stcb, netp);
   5094 		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
   5095 		                 stcb->sctp_ep, stcb, netp);
   5096 		return;
   5097 	}
   5098 }
   5099 /*
   5100  * Now here we are going to recycle net_ack for a different use...
   5101  * HEADS UP.
   5102  */
   5103 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   5104 	net->net_ack = 0;
   5105 }
   5106 
   5107 /*
   5108  * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
   5109  * to be done. Setting this_sack_lowest_newack to the cum_ack will
   5110  * automatically ensure that.
   5111  */
   5112 if ((asoc->sctp_cmt_on_off > 0) &&
   5113     SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
   5114     (cmt_dac_flag == 0)) {
   5115 	this_sack_lowest_newack = cum_ack;
   5116 }
   5117 if ((num_seg > 0) || (num_nr_seg > 0)) {
   5118 	sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
   5119 	                           biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
   5120 }
   5121 /* JRS - Use the congestion control given in the CC module */
   5122 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
   5123 
   5124 /* Now are we exiting loss recovery ? */
   5125 if (will_exit_fast_recovery) {
   5126 	/* Ok, we must exit fast recovery */
   5127 	asoc->fast_retran_loss_recovery = 0;
   5128 }
   5129 if ((asoc->sat_t3_loss_recovery) &&
   5130     SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
   5131 	/* end satellite t3 loss recovery */
   5132 	asoc->sat_t3_loss_recovery = 0;
   5133 }
   5134 /*
   5135  * CMT Fast recovery
   5136  */
   5137 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   5138 	if (net->will_exit_fast_recovery) {
   5139 		/* Ok, we must exit fast recovery */
   5140 		net->fast_retran_loss_recovery = 0;
   5141 	}
   5142 }
   5143 
   5144 /* Adjust and set the new rwnd value */
   5145 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
   5146 	sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
   5147 	                  asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
   5148 }
   5149 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
   5150                                     (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
   5151 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
   5152 	/* SWS sender side engages */
   5153 	asoc->peers_rwnd = 0;
   5154 }
   5155 if (asoc->peers_rwnd > old_rwnd) {
   5156 	win_probe_recovery = 1;
   5157 }
   5158 
   5159 /*
   5160  * Now we must setup so we have a timer up for anyone with
   5161  * outstanding data.
   5162  */
   5163 done_once = 0;
   5164 again:
   5165 j = 0;
   5166 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   5167 	if (win_probe_recovery && (net->window_probe)) {
   5168 		win_probe_recovered = 1;
   5169 		/*-
   5170 		 * Find first chunk that was used with
   5171 		 * window probe and clear the event. Put
   5172 		 * it back into the send queue as if has
   5173 		 * not been sent.
   5174 		 */
   5175 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   5176 			if (tp1->window_probe) {
   5177 				sctp_window_probe_recovery(stcb, asoc, tp1);
   5178 				break;
   5179 			}
   5180 		}
   5181 	}
   5182 	if (net->flight_size) {
   5183 		j++;
   5184 		if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   5185 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
   5186 			                 stcb->sctp_ep, stcb, net);
   5187 		}
   5188 		if (net->window_probe) {
   5189 			net->window_probe = 0;
   5190 		}
   5191 	} else {
   5192 		if (net->window_probe) {
   5193 			/* In window probes we must assure a timer is still running there */
   5194 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   5195 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
   5196 				                 stcb->sctp_ep, stcb, net);
   5197 			}
   5198 		} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   5199 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
   5200 			                stcb, net,
   5201 			                SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
   5202 		}
   5203 	}
   5204 }
   5205 if ((j == 0) &&
   5206     (!TAILQ_EMPTY(&asoc->sent_queue)) &&
   5207     (asoc->sent_queue_retran_cnt == 0) &&
   5208     (win_probe_recovered == 0) &&
   5209     (done_once == 0)) {
   5210 	/* huh, this should not happen unless all packets
   5211 	 * are PR-SCTP and marked to skip of course.
   5212 	 */
   5213 	if (sctp_fs_audit(asoc)) {
   5214 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   5215 			net->flight_size = 0;
   5216 		}
   5217 		asoc->total_flight = 0;
   5218 		asoc->total_flight_count = 0;
   5219 		asoc->sent_queue_retran_cnt = 0;
   5220 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   5221 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   5222 				sctp_flight_size_increase(tp1);
   5223 				sctp_total_flight_increase(stcb, tp1);
   5224 			} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
   5225 				sctp_ucount_incr(asoc->sent_queue_retran_cnt);
   5226 			}
   5227 		}
   5228 	}
   5229 	done_once = 1;
   5230 	goto again;
   5231 }
   5232 /*********************************************/
   5233 /* Here we perform PR-SCTP procedures        */
   5234 /* (section 4.2)                             */
   5235 /*********************************************/
   5236 /* C1. update advancedPeerAckPoint */
   5237 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
   5238 	asoc->advanced_peer_ack_point = cum_ack;
   5239 }
   5240 /* C2. try to further move advancedPeerAckPoint ahead */
   5241 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
   5242 	struct sctp_tmit_chunk *lchk;
   5243 	uint32_t old_adv_peer_ack_point;
   5244 
   5245 	old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
   5246 	lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
   5247 	/* C3. See if we need to send a Fwd-TSN */
   5248 	if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
   5249 		/*
   5250 		 * ISSUE with ECN, see FWD-TSN processing.
   5251 		 */
   5252 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
   5253 			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
   5254 			               0xee, cum_ack, asoc->advanced_peer_ack_point,
   5255 			               old_adv_peer_ack_point);
   5256 		}
   5257 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
   5258 			send_forward_tsn(stcb, asoc);
   5259 		} else if (lchk) {
   5260 			/* try to FR fwd-tsn's that get lost too */
   5261 			if (lchk->rec.data.fwd_tsn_cnt >= 3) {
   5262 				send_forward_tsn(stcb, asoc);
   5263 			}
   5264 		}
   5265 	}
   5266 	for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
   5267 		if (lchk->whoTo != NULL) {
   5268 			break;
   5269 		}
   5270 	}
   5271 	if (lchk != NULL) {
   5272 		/* Assure a timer is up */
   5273 		sctp_timer_start(SCTP_TIMER_TYPE_SEND,
   5274 		                 stcb->sctp_ep, stcb, lchk->whoTo);
   5275 	}
   5276 }
   5277 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
   5278 	sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
   5279 	               a_rwnd,
   5280 	               stcb->asoc.peers_rwnd,
   5281 	               stcb->asoc.total_flight,
   5282 	               stcb->asoc.total_output_queue_size);
   5283 }
   5284 }
   5285 
   5286 void
   5287 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
   5288 {
   5289 /* Copy cum-ack */
   5290 uint32_t cum_ack, a_rwnd;
   5291 
   5292 cum_ack = ntohl(cp->cumulative_tsn_ack);
   5293 /* Arrange so a_rwnd does NOT change */
   5294 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
   5295 
   5296 /* Now call the express sack handling */
   5297 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
   5298 }
   5299 
   5300 static void
   5301 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
   5302                               struct sctp_stream_in *strmin)
   5303 {
   5304 struct sctp_queued_to_read *control, *ncontrol;
   5305 struct sctp_association *asoc;
   5306 uint32_t mid;
   5307 int need_reasm_check = 0;
   5308 
   5309 KASSERT(stcb != NULL, ("stcb == NULL"));
   5310 SCTP_TCB_LOCK_ASSERT(stcb);
   5311 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep);
   5312 
   5313 asoc = &stcb->asoc;
   5314 mid = strmin->last_mid_delivered;
   5315 /*
   5316  * First deliver anything prior to and including the stream no that
   5317  * came in.
   5318  */
   5319 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
   5320 	if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
   5321 		/* this is deliverable now */
   5322 		if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
   5323 			if (control->on_strm_q) {
   5324 				if (control->on_strm_q == SCTP_ON_ORDERED) {
   5325 					TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
   5326 				} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
   5327 					TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
   5328 #ifdef INVARIANTS
   5329 				} else {
   5330 					panic("strmin: %p ctl: %p unknown %d",
   5331 					      strmin, control, control->on_strm_q);
   5332 #endif
   5333 				}
   5334 				control->on_strm_q = 0;
   5335 			}
   5336 			/* subtract pending on streams */
   5337 			if (asoc->size_on_all_streams >= control->length) {
   5338 				asoc->size_on_all_streams -= control->length;
   5339 			} else {
   5340 #ifdef INVARIANTS
   5341 				panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
   5342 #else
   5343 				asoc->size_on_all_streams = 0;
   5344 #endif
   5345 			}
   5346 			sctp_ucount_decr(asoc->cnt_on_all_streams);
   5347 			/* deliver it to at least the delivery-q */
   5348 			if (stcb->sctp_socket) {
   5349 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
   5350 				sctp_add_to_readq(stcb->sctp_ep, stcb, control,
   5351 				                  &stcb->sctp_socket->so_rcv, 1,
   5352 				                  SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
   5353 			}
   5354 		} else {
   5355 			/* Its a fragmented message */
   5356 			if (control->first_frag_seen) {
   5357 				/* Make it so this is next to deliver, we restore later */
   5358 				strmin->last_mid_delivered = control->mid - 1;
   5359 				need_reasm_check = 1;
   5360 				break;
   5361 			}
   5362 		}
   5363 	} else {
   5364 		/* no more delivery now. */
   5365 		break;
   5366 	}
   5367 }
   5368 if (need_reasm_check) {
   5369 	int ret;
   5370 	ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
   5371 	if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
   5372 		/* Restore the next to deliver unless we are ahead */
   5373 		strmin->last_mid_delivered = mid;
   5374 	}
   5375 	if (ret == 0) {
   5376 		/* Left the front Partial one on */
   5377 		return;
   5378 	}
   5379 	need_reasm_check = 0;
   5380 }
   5381 /*
   5382  * now we must deliver things in queue the normal way  if any are
   5383  * now ready.
   5384  */
   5385 mid = strmin->last_mid_delivered + 1;
   5386 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
   5387 	if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
   5388 		if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
   5389 			/* this is deliverable now */
   5390 			if (control->on_strm_q) {
   5391 				if (control->on_strm_q == SCTP_ON_ORDERED) {
   5392 					TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
   5393 				} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
   5394 					TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
   5395 #ifdef INVARIANTS
   5396 				} else {
   5397 					panic("strmin: %p ctl: %p unknown %d",
   5398 					      strmin, control, control->on_strm_q);
   5399 #endif
   5400 				}
   5401 				control->on_strm_q = 0;
   5402 			}
   5403 			/* subtract pending on streams */
   5404 			if (asoc->size_on_all_streams >= control->length) {
   5405 				asoc->size_on_all_streams -= control->length;
   5406 			} else {
   5407 #ifdef INVARIANTS
   5408 				panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
   5409 #else
   5410 				asoc->size_on_all_streams = 0;
   5411 #endif
   5412 			}
   5413 			sctp_ucount_decr(asoc->cnt_on_all_streams);
   5414 			/* deliver it to at least the delivery-q */
   5415 			strmin->last_mid_delivered = control->mid;
   5416 			if (stcb->sctp_socket) {
   5417 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
   5418 				sctp_add_to_readq(stcb->sctp_ep, stcb, control,
   5419 				                  &stcb->sctp_socket->so_rcv, 1,
   5420 				                  SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
   5421 			}
   5422 			mid = strmin->last_mid_delivered + 1;
   5423 		} else {
   5424 			/* Its a fragmented message */
   5425 			if (control->first_frag_seen) {
   5426 				/* Make it so this is next to deliver */
   5427 				strmin->last_mid_delivered = control->mid - 1;
   5428 				need_reasm_check = 1;
   5429 				break;
   5430 			}
   5431 		}
   5432 	} else {
   5433 		break;
   5434 	}
   5435 }
   5436 if (need_reasm_check) {
   5437 	(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
   5438 }
   5439 }
   5440 
   5441 static void
   5442 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
   5443                              struct sctp_association *asoc, struct sctp_stream_in *strm,
   5444                              struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
   5445 {
   5446 struct sctp_tmit_chunk *chk, *nchk;
   5447 
   5448 /*
   5449  * For now large messages held on the stream reasm that are
   5450  * complete will be tossed too. We could in theory do more
   5451  * work to spin through and stop after dumping one msg aka
   5452  * seeing the start of a new msg at the head, and call the
   5453  * delivery function... to see if it can be delivered... But
   5454  * for now we just dump everything on the queue.
   5455  */
   5456 
   5457 KASSERT(stcb != NULL, ("stcb == NULL"));
   5458 SCTP_TCB_LOCK_ASSERT(stcb);
   5459 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep);
   5460 
   5461 if (!asoc->idata_supported && !ordered &&
   5462     control->first_frag_seen &&
   5463     SCTP_TSN_GT(control->fsn_included, cumtsn)) {
   5464 	return;
   5465 }
   5466 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
   5467 	/* Purge hanging chunks */
   5468 	if (!asoc->idata_supported && !ordered) {
   5469 		if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
   5470 			break;
   5471 		}
   5472 	}
   5473 	TAILQ_REMOVE(&control->reasm, chk, sctp_next);
   5474 	if (asoc->size_on_reasm_queue >= chk->send_size) {
   5475 		asoc->size_on_reasm_queue -= chk->send_size;
   5476 	} else {
   5477 #ifdef INVARIANTS
   5478 		panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
   5479 #else
   5480 		asoc->size_on_reasm_queue = 0;
   5481 #endif
   5482 	}
   5483 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
   5484 	if (chk->data) {
   5485 		sctp_m_freem(chk->data);
   5486 		chk->data = NULL;
   5487 	}
   5488 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   5489 }
   5490 if (!TAILQ_EMPTY(&control->reasm)) {
   5491 	KASSERT(!asoc->idata_supported,
   5492 	    ("Reassembly queue not empty for I-DATA"));
   5493 	KASSERT(!ordered,
   5494 	    ("Reassembly queue not empty for ordered data"));
   5495 	if (control->data) {
   5496 		sctp_m_freem(control->data);
   5497 		control->data = NULL;
   5498 	}
   5499 	control->fsn_included = 0xffffffff;
   5500 	control->first_frag_seen = 0;
   5501 	control->last_frag_seen = 0;
   5502 	if (control->on_read_q) {
   5503 		/*
   5504 		 * We have to purge it from there,
   5505 		 * hopefully this will work :-)
   5506 		 */
   5507 		TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next);
   5508 		control->on_read_q = 0;
   5509 	}
   5510 	chk = TAILQ_FIRST(&control->reasm);
   5511 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
   5512 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
   5513 		sctp_add_chk_to_control(control, strm, stcb, asoc,
   5514 		                        chk, SCTP_READ_LOCK_HELD);
   5515 	}
   5516 	sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
   5517 	return;
   5518 }
   5519 if (control->on_strm_q == SCTP_ON_ORDERED) {
   5520 	TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
   5521 	if (asoc->size_on_all_streams >= control->length) {
   5522 		asoc->size_on_all_streams -= control->length;
   5523 	} else {
   5524 #ifdef INVARIANTS
   5525 		panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
   5526 #else
   5527 		asoc->size_on_all_streams = 0;
   5528 #endif
   5529 	}
   5530 	sctp_ucount_decr(asoc->cnt_on_all_streams);
   5531 	control->on_strm_q = 0;
   5532 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
   5533 	TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
   5534 	control->on_strm_q = 0;
   5535 #ifdef INVARIANTS
   5536 } else if (control->on_strm_q) {
   5537 	panic("strm: %p ctl: %p unknown %d",
   5538 	    strm, control, control->on_strm_q);
   5539 #endif
   5540 }
   5541 control->on_strm_q = 0;
   5542 if (control->on_read_q == 0) {
   5543 	sctp_free_remote_addr(control->whoFrom);
   5544 	if (control->data) {
   5545 		sctp_m_freem(control->data);
   5546 		control->data = NULL;
   5547 	}
   5548 	sctp_free_a_readq(stcb, control);
   5549 }
   5550 }
   5551 
   5552 void
   5553 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
   5554                        struct sctp_forward_tsn_chunk *fwd,
   5555                        int *abort_flag, struct mbuf *m , int offset)
   5556 {
   5557 /* The pr-sctp fwd tsn */
   5558 /*
   5559  * here we will perform all the data receiver side steps for
   5560  * processing FwdTSN, as required in by pr-sctp draft:
   5561  *
   5562  * Assume we get FwdTSN(x):
   5563  *
   5564  * 1) update local cumTSN to x
   5565  * 2) try to further advance cumTSN to x + others we have
   5566  * 3) examine and update re-ordering queue on pr-in-streams
   5567  * 4) clean up re-assembly queue
   5568  * 5) Send a sack to report where we are.
   5569  */
   5570 struct sctp_association *asoc;
   5571 uint32_t new_cum_tsn, gap;
   5572 unsigned int i, fwd_sz, m_size;
   5573 struct sctp_stream_in *strm;
   5574 struct sctp_queued_to_read *control, *ncontrol;
   5575 
   5576 asoc = &stcb->asoc;
   5577 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
   5578 	SCTPDBG(SCTP_DEBUG_INDATA1,
   5579 		"Bad size too small/big fwd-tsn\n");
   5580 	return;
   5581 }
   5582 m_size = (stcb->asoc.mapping_array_size << 3);
   5583 /*************************************************************/
   5584 /* 1. Here we update local cumTSN and shift the bitmap array */
   5585 /*************************************************************/
   5586 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
   5587 
   5588 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
   5589 	/* Already got there ... */
   5590 	return;
   5591 }
   5592 /*
   5593  * now we know the new TSN is more advanced, let's find the actual
   5594  * gap
   5595  */
   5596 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
   5597 asoc->cumulative_tsn = new_cum_tsn;
   5598 if (gap >= m_size) {
   5599 	if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
   5600 		struct mbuf *op_err;
   5601 		char msg[SCTP_DIAG_INFO_LEN];
   5602 
   5603 		/*
   5604 		 * out of range (of single byte chunks in the rwnd I
   5605 		 * give out). This must be an attacker.
   5606 		 */
   5607 		*abort_flag = 1;
   5608 		SCTP_SNPRINTF(msg, sizeof(msg),
   5609 		              "New cum ack %8.8x too high, highest TSN %8.8x",
   5610 		              new_cum_tsn, asoc->highest_tsn_inside_map);
   5611 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   5612 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
   5613 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
   5614 		return;
   5615 	}
   5616 	SCTP_STAT_INCR(sctps_fwdtsn_map_over);
   5617 
   5618 	memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
   5619 	asoc->mapping_array_base_tsn = new_cum_tsn + 1;
   5620 	asoc->highest_tsn_inside_map = new_cum_tsn;
   5621 
   5622 	memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
   5623 	asoc->highest_tsn_inside_nr_map = new_cum_tsn;
   5624 
   5625 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   5626 		sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
   5627 	}
   5628 } else {
   5629 	SCTP_TCB_LOCK_ASSERT(stcb);
   5630 	for (i = 0; i <= gap; i++) {
   5631 		if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
   5632 		    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
   5633 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
   5634 			if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
   5635 				asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
   5636 			}
   5637 		}
   5638 	}
   5639 }
   5640 /*************************************************************/
   5641 /* 2. Clear up re-assembly queue                             */
   5642 /*************************************************************/
   5643 
   5644 /* This is now done as part of clearing up the stream/seq */
   5645 if (asoc->idata_supported == 0) {
   5646 	uint16_t sid;
   5647 
   5648 	/* Flush all the un-ordered data based on cum-tsn */
   5649 	SCTP_INP_READ_LOCK(stcb->sctp_ep);
   5650 	for (sid = 0; sid < asoc->streamincnt; sid++) {
   5651 		strm = &asoc->strmin[sid];
   5652 		if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
   5653 			sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
   5654 		}
   5655 	}
   5656 	SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
   5657 }
   5658 /*******************************************************/
   5659 /* 3. Update the PR-stream re-ordering queues and fix  */
   5660 /*    delivery issues as needed.                       */
   5661 /*******************************************************/
   5662 fwd_sz -= sizeof(*fwd);
   5663 if (m && fwd_sz) {
   5664 	/* New method. */
   5665 	unsigned int num_str;
   5666 	uint32_t mid;
   5667 	uint16_t sid;
   5668 	uint16_t ordered, flags;
   5669 	struct sctp_strseq *stseq, strseqbuf;
   5670 	struct sctp_strseq_mid *stseq_m, strseqbuf_m;
   5671 	offset += sizeof(*fwd);
   5672 
   5673 	SCTP_INP_READ_LOCK(stcb->sctp_ep);
   5674 	if (asoc->idata_supported) {
   5675 		num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
   5676 	} else {
   5677 		num_str = fwd_sz / sizeof(struct sctp_strseq);
   5678 	}
   5679 	for (i = 0; i < num_str; i++) {
   5680 		if (asoc->idata_supported) {
   5681 			stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
   5682 								    sizeof(struct sctp_strseq_mid),
   5683 								    (uint8_t *)&strseqbuf_m);
   5684 			offset += sizeof(struct sctp_strseq_mid);
   5685 			if (stseq_m == NULL) {
   5686 				break;
   5687 			}
   5688 			sid = ntohs(stseq_m->sid);
   5689 			mid = ntohl(stseq_m->mid);
   5690 			flags = ntohs(stseq_m->flags);
   5691 			if (flags & PR_SCTP_UNORDERED_FLAG) {
   5692 				ordered = 0;
   5693 			} else {
   5694 				ordered = 1;
   5695 			}
   5696 		} else {
   5697 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
   5698 								    sizeof(struct sctp_strseq),
   5699 								    (uint8_t *)&strseqbuf);
   5700 			offset += sizeof(struct sctp_strseq);
   5701 			if (stseq == NULL) {
   5702 				break;
   5703 			}
   5704 			sid = ntohs(stseq->sid);
   5705 			mid = (uint32_t)ntohs(stseq->ssn);
   5706 			ordered = 1;
   5707 		}
   5708 		/* Convert */
   5709 
   5710 		/* now process */
   5711 
   5712 		/*
   5713 		 * Ok we now look for the stream/seq on the read queue
   5714 		 * where its not all delivered. If we find it we transmute the
   5715 		 * read entry into a PDI_ABORTED.
   5716 		 */
   5717 		if (sid >= asoc->streamincnt) {
   5718 			/* screwed up streams, stop!  */
   5719 			break;
   5720 		}
   5721 		if ((asoc->str_of_pdapi == sid) &&
   5722 		    (asoc->ssn_of_pdapi == mid)) {
   5723 			/* If this is the one we were partially delivering
   5724 			 * now then we no longer are. Note this will change
   5725 			 * with the reassembly re-write.
   5726 			 */
   5727 			asoc->fragmented_delivery_inprogress = 0;
   5728 		}
   5729 		strm = &asoc->strmin[sid];
   5730 		if (ordered) {
   5731 			TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) {
   5732 				if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
   5733 					sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
   5734 				}
   5735 			}
   5736 		} else {
   5737 			if (asoc->idata_supported) {
   5738 				TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) {
   5739 					if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
   5740 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
   5741 					}
   5742 				}
   5743 			} else {
   5744 				if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
   5745 					sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
   5746 				}
   5747 			}
   5748 		}
   5749 		TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
   5750 			if ((control->sinfo_stream == sid) &&
   5751 			    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
   5752 				control->pdapi_aborted = 1;
   5753 				control->end_added = 1;
   5754 				if (control->on_strm_q == SCTP_ON_ORDERED) {
   5755 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
   5756 					if (asoc->size_on_all_streams >= control->length) {
   5757 						asoc->size_on_all_streams -= control->length;
   5758 					} else {
   5759 #ifdef INVARIANTS
   5760 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
   5761 #else
   5762 						asoc->size_on_all_streams = 0;
   5763 #endif
   5764 					}
   5765 					sctp_ucount_decr(asoc->cnt_on_all_streams);
   5766 				} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
   5767 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
   5768 #ifdef INVARIANTS
   5769 				} else if (control->on_strm_q) {
   5770 					panic("strm: %p ctl: %p unknown %d",
   5771 					      strm, control, control->on_strm_q);
   5772 #endif
   5773 				}
   5774 				control->on_strm_q = 0;
   5775 				sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
   5776 				                stcb,
   5777 				                SCTP_PARTIAL_DELIVERY_ABORTED,
   5778 				                (void *)control,
   5779 				                SCTP_SO_NOT_LOCKED);
   5780 				break;
   5781 			} else if ((control->sinfo_stream == sid) &&
   5782 				   SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
   5783 				/* We are past our victim SSN */
   5784 				break;
   5785 			}
   5786 		}
   5787 		if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
   5788 			/* Update the sequence number */
   5789 			strm->last_mid_delivered = mid;
   5790 		}
   5791 		/* now kick the stream the new way */
   5792 		/*sa_ignore NO_NULL_CHK*/
   5793 		sctp_kick_prsctp_reorder_queue(stcb, strm);
   5794 	}
   5795 	SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
   5796 }
   5797 /*
   5798  * Now slide thing forward.
   5799  */
   5800 sctp_slide_mapping_arrays(stcb);
   5801 }